blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
01c36473b6a401d07cf95dce5655f46b3112326f
|
af5a8f872cd8d689c3646376ce80dc69a0bce7b4
|
/Chapter03/04-flaskdemo.py
|
73defb37af61ae8cdc8e2f50acd8a07cee5fb352
|
[
"MIT"
] |
permissive
|
PacktPublishing/NGINX-Cookbook
|
de66c1544d8baac5a8794d7f6a2fe30f6e3a7a45
|
2cd497d6899388e3bd0721d4e64be428acc7d168
|
refs/heads/master
| 2023-02-06T06:29:57.001116 | 2023-01-30T08:30:16 | 2023-01-30T08:30:16 | 101,969,140 | 34 | 28 | null | null | null | null |
UTF-8
|
Python
| false | false | 218 |
py
|
from flask import Flask
application = Flask(__name__)
@application.route("/")
def hello():
return "<h1>Demo via Nginx with uWSGI!</h1>"
if __name__ == "__main__":
application.run(host='127.0.0.1', port=9001)
|
[
"[email protected]"
] | |
dcfec3f4c8158b231799bb0aa946b45078fcb324
|
66b1748a1238eda820345f914f60da434c668cf0
|
/스파르타/week_2/09_factorial.py
|
a65721c58ea26d7ab8ca00af120f32d77df7c657
|
[] |
no_license
|
kwangminini/Algorhitm
|
5d3140021584239e30468d3dcb353b119b935e76
|
4d9a3b9284c90d141c1a73e14329152455373c53
|
refs/heads/master
| 2023-09-03T07:33:51.228150 | 2023-08-28T13:39:52 | 2023-08-28T13:39:52 | 225,879,016 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 137 |
py
|
def factorial(n):
# 이 부분을 채워보세요!
if n == 1:
return 1
return n * factorial(n-1)
print(factorial(5))
|
[
"[email protected]"
] | |
4081b31cf2708adf515b0cd0051c0277648bb564
|
61699615fab0e91c7dd72d5eff6cd7326e83703c
|
/python/zheng_ze.py
|
53c0db168c6c79b4b7656a9be74ffc164644cb3d
|
[] |
no_license
|
ftZHOU/Pronostics-sportifs
|
b85cae26068c9bc5f9a95c821b907382d2e64386
|
a9384f0ba8e41a4fb0ec049c37c97f30aec45e49
|
refs/heads/master
| 2021-08-07T14:44:16.267738 | 2017-11-08T10:42:51 | 2017-11-08T10:42:51 | 109,962,567 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 850 |
py
|
import urllib2
import re
request = urllib2.Request("http://www.soccerstats.com/latest.asp?league=france")
try:
response = urllib2.urlopen(request)
content = response.read()
#print content
table3 = "<td height='22'>.*?<a href='team\.asp\?league.*?op'>(.*?)</a>.*?<font color='green'>(.*?)</font>.*?<td align='center'>(.*?)</TD>.*?<td align='center'>(.*?)</TD>.*?<td align='center'>(.*?)</TD>.*?<font color='blue'>(.*?)</font>.*?<font color='red'>(.*?)</font>.*?<td align='center'>(.*?)</TD>.*?b>(.*?)</b>"
pattern = re.compile(table3,re.S)
items = re.findall(pattern,content)
#print items
for item in items:
print "team name:"+item[0]+"\n","GP:"+item[1],"W:"+item[2],"D:"+item[3],"L:"+item[4],"G:"+item[5],"F:"+item[6]
except urllib2.HTTPError,e:
print e.code
except urllib2.URLError, e:
print e.reason
|
[
"[email protected]"
] | |
139ac0068a4db76318adde21dbdcaaf837d4d4e5
|
2da72c9f9bbb0b5db33710cddbdee28503e5a606
|
/udacity/artificialIntelligenceForRobots/search3 2.py
|
3de6f54a8632bb7795501af3baee00dc13da130d
|
[] |
no_license
|
gddickinson/python_code
|
2e71fb22b929cb26c2a1456b11dc515af048c441
|
dbb20e171fb556e122350fb40e12cc76adbb9a66
|
refs/heads/master
| 2022-10-26T15:20:40.709820 | 2022-10-11T16:06:27 | 2022-10-11T16:06:27 | 44,060,963 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,732 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 13 07:19:53 2016
@author: George
"""
# -----------
# User Instructions:
#
# Modify the the search function so that it becomes
# an A* search algorithm as defined in the previous
# lectures.
#
# Your function should return the expanded grid
# which shows, for each element, the count when
# it was expanded or -1 if the element was never expanded.
#
# If there is no path from init to goal,
# the function should return the string 'fail'
# ----------
grid = [[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0]]
heuristic = [[9, 8, 7, 6, 5, 4],
[8, 7, 6, 5, 4, 3],
[7, 6, 5, 4, 3, 2],
[6, 5, 4, 3, 2, 1],
[5, 4, 3, 2, 1, 0]]
init = [0, 0]
goal = [len(grid)-1, len(grid[0])-1]
cost = 1
delta = [[-1, 0 ], # go up
[ 0, -1], # go left
[ 1, 0 ], # go down
[ 0, 1 ]] # go right
delta_name = ['^', '<', 'v', '>']
def search(grid,init,goal,cost,heuristic):
# ----------------------------------------
# modify the code below
# ----------------------------------------
closed = [[0 for col in range(len(grid[0]))] for row in range(len(grid))]
closed[init[0]][init[1]] = 1
expand = [[-1 for col in range(len(grid[0]))] for row in range(len(grid))]
action = [[-1 for col in range(len(grid[0]))] for row in range(len(grid))]
x = init[0]
y = init[1]
g = 0
f = 0
open = [[f, g, x, y]]
found = False # flag that is set when search is complete
resign = False # flag set if we can't find expand
count = 0
while not found and not resign:
if len(open) == 0:
resign = True
return "Fail"
else:
open.sort()
open.reverse()
next = open.pop()
x = next[2]
y = next[3]
g = next[1]
f = next[0]
expand[x][y] = count
count += 1
if x == goal[0] and y == goal[1]:
found = True
else:
for i in range(len(delta)):
x2 = x + delta[i][0]
y2 = y + delta[i][1]
if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):
if closed[x2][y2] == 0 and grid[x2][y2] == 0:
g2 = g + cost
f = g2 + heuristic[x2][y2]
open.append([f,g2, x2, y2])
closed[x2][y2] = 1
return expand
test = (search(grid,init,goal,cost, heuristic))
for i in range(len(test)):
print(test[i])
|
[
"[email protected]"
] | |
71980b6881124f741b4171304de8cf80c36a24c4
|
ca8f2b28353e449c10cf520ee1d7d946163c211e
|
/grammar.py
|
1fc7139d3b904220028cf69091fef1a34921e07f
|
[
"MIT"
] |
permissive
|
zwytop/nlp-2017
|
e42452d53279fbe4d1af279b3ad5b165a01b1ccb
|
ecdefc6dc179ef73c981b793673056804f37db51
|
refs/heads/master
| 2021-01-23T05:18:33.126936 | 2017-02-09T10:57:21 | 2017-02-09T10:57:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 472 |
py
|
import nltk
from nltk.draw.tree import TreeView
groucho_grammar = nltk.CFG.fromstring("""
S -> NP VP
PP -> P NP
NP -> Det Nom
Nom -> N | Adj Nom
VP -> V NP | VP PP
Det -> 'the'
Adj -> 'little' | 'fine' | 'fat'
N -> 'brook' | 'trout' | 'bear'
V -> 'saw'
P -> 'in'
""")
sent = 'the little bear saw the fine fat trout in the brook'.split(' ')
parser = nltk.ChartParser(groucho_grammar)
for tree in parser.parse(sent):
TreeView(tree)._cframe.print_to_file('grammar.ps')
|
[
"[email protected]"
] | |
2fbbba64e4146809c447436783d5bf7bf23032fb
|
4d83e8ec852194a811cb18cfb3f4b13bd298216e
|
/egs/word/run_trf_neural_sa.py
|
36b556740118a3faa7047edeb89ad4a445848c96
|
[] |
no_license
|
peternara/TRF-NN-Tensorflow
|
9961dd41195476dd143d45607c8558cae558337e
|
0b235bebdbfe285873c3bef8e62fe475e13ea70a
|
refs/heads/master
| 2021-08-31T16:31:46.097523 | 2017-12-22T03:01:20 | 2017-12-22T03:01:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,639 |
py
|
import tensorflow as tf
import os
import sys
import numpy as np
from base import *
from lm import *
from trf.sa import *
import task
class Opt(trf.DefaultOps):
def __init__(self, trf_model):
super().__init__(trf_model, *task.get_nbest())
self.per_epoch = 0.1
self.next_epoch = 0
self.out_logz = os.path.join(trf_model.logdir, 'logz.dbg')
def run(self, step, epoch):
super().run(step, epoch)
if epoch > self.next_epoch:
self.next_epoch += self.per_epoch
with self.m.time_recoder.recode('true_logz'):
true_logz = self.m.true_logz(5)
nce_logz = self.m.norm_const.get_logz()
with open(self.out_logz, 'at') as f:
f.write('step={} epoch={:.2f}'.format(step, epoch) + '\n')
f.write('nce= ' + ' '.join(['{:.2f}'.format(i) for i in nce_logz]) + '\n')
f.write('true= ' + ' '.join(['{:.2f}'.format(i) for i in true_logz]) + '\n')
def create_config(data):
config = trf.Config(data)
config.chain_num = 100
config.multiple_trial = 10
config.sample_batch_size = 100
# config.auxiliary_model = 'lstm'
config.auxiliary_config.embedding_size = 32
config.auxiliary_config.hidden_size = 32
config.auxiliary_config.hidden_layers = 1
config.auxiliary_config.batch_size = 100
config.auxiliary_config.step_size = 10
config.auxiliary_config.learning_rate = 1.0
config.lr_feat = lr.LearningRateEpochDelay(1e-3)
config.lr_net = lr.LearningRateEpochDelay(1e-3)
config.lr_logz = lr.LearningRateEpochDelay(0.1)
config.opt_feat_method = 'adam'
config.opt_net_method = 'adam'
config.opt_logz_method = 'sgd'
# feat config
# config.feat_config.feat_type_file = '../../tfcode/feat/g4.fs'
config.feat_config = None
# neural config
config.net_config.update(task.get_config_rnn(config.vocab_size))
config.net_config.cnn_skip_connection = False
return config
def create_name(config):
return str(config)
def main(_):
data = reader.Data().load_raw_data(reader.word_raw_dir(),
add_beg_token='</s>', add_end_token='</s>',
add_unknwon_token=None,
max_length=None)
# create config
config = create_config(data)
# config.net_config.only_train_weight = True
# create log dir
logdir = 'trf_sa/' + create_name(config)
# prepare the log dir
wb.prepare_log_dir(logdir, 'trf.log')
config.print()
data.write_vocab(logdir + '/vocab.txt')
data.write_data(data.datas[1], logdir + '/valid.id')
data.write_data(data.datas[2], logdir + '/test.id')
m = trf.TRF(config, data, logdir=logdir, device='/gpu:0')
nce_pretrain_model_path = 'trf_nce/trf_nce10_e16_cnn_(1to5)x16_(3x16)x3_relu_noise2gram/trf.mod'
sv = tf.train.Supervisor(logdir=os.path.join(logdir, 'logs'),
global_step=m.global_step)
sv.summary_writer.add_graph(tf.get_default_graph()) # write the graph to logs
session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
session_config.gpu_options.allow_growth = True
with sv.managed_session(config=session_config) as session:
with session.as_default():
# m.restore_nce_model(nce_pretrain_model_path)
# m.save()
m.train(operation=Opt(m))
if __name__ == '__main__':
tf.app.run(main=main)
|
[
"[email protected]"
] | |
f7858f96013a26b0503c5d0ffa20fc0b9b82059b
|
e07732e5f37e4ea947692cd5aef54370e6706c1b
|
/third_party/skia/infra/bots/recipes/ct_skps.py
|
83fe7f26404542ea68bb7e37047302c42bbd2923
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
davehorner/Prelude
|
5270a7da950fd0cb9d681b2debc4143321160c49
|
bd005acdcffcb54564597246a6ee613b21fa592f
|
refs/heads/master
| 2022-01-06T20:42:51.030436 | 2019-03-10T22:57:37 | 2019-03-10T22:57:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 16,137 |
py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
DEPS = [
'checkout',
'ct',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/step',
'recipe_engine/time',
'run',
'skia_swarming',
'vars',
]
SKPS_VERSION_FILE = 'skps_version'
CT_SKPS_ISOLATE = 'ct_skps.isolate'
# Do not batch archive more slaves than this value. This is used to prevent
# no output timeouts in the 'isolate tests' step.
MAX_SLAVES_TO_BATCHARCHIVE = 100
TOOL_TO_DEFAULT_SKPS_PER_SLAVE = {
'dm': 10000,
'nanobench': 1000,
'get_images_from_skps': 10000,
}
# The SKP repository to use.
DEFAULT_SKPS_CHROMIUM_BUILD = '2b7e85eb251dc7-a3cf3659ed2c08'
def make_path(api, *path):
"""Return a Path object for the given path."""
key = 'custom_%s' % '_'.join(path)
api.path.c.base_paths[key] = tuple(path)
return api.path[key]
def RunSteps(api):
# Figure out which repository to use.
buildername = api.properties['buildername']
if '1k' in buildername:
ct_page_type = 'All'
num_pages = 1000
elif '10k' in buildername:
ct_page_type = 'All'
num_pages = 10000
elif '100k' in buildername:
ct_page_type = 'All'
num_pages = 100000
elif '1m' in buildername:
ct_page_type = 'All'
num_pages = 1000000
else:
raise Exception('Do not recognise the buildername %s.' % buildername)
# Figure out which tool to use.
if 'DM' in buildername:
skia_tool = 'dm'
elif 'BENCH' in buildername:
skia_tool = 'nanobench'
elif 'IMG_DECODE' in buildername:
skia_tool = 'get_images_from_skps'
else:
raise Exception('Do not recognise the buildername %s.' % buildername)
api.vars.setup()
checkout_root = make_path(api, '/', 'b', 'work')
gclient_cache = make_path(api, '/', 'b', 'cache')
api.checkout.bot_update(checkout_root=checkout_root,
gclient_cache=gclient_cache)
api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir)
# Required paths.
infrabots_dir = checkout_root.join('skia', 'infra', 'bots')
isolate_dir = infrabots_dir.join('ct')
isolate_path = isolate_dir.join(CT_SKPS_ISOLATE)
# Copy the required binary to the isolate dir.
src = api.vars.build_dir.join(skia_tool)
api.file.copy('copy %s' % skia_tool, src, isolate_dir.join(skia_tool))
api.skia_swarming.setup(
infrabots_dir.join('tools', 'luci-go'),
swarming_rev='')
skps_chromium_build = api.properties.get(
'skps_chromium_build', DEFAULT_SKPS_CHROMIUM_BUILD)
# Set build properties to make finding SKPs convenient.
webpage_rankings_link = (
'https://storage.cloud.google.com/%s/csv/top-1m.csv'
% api.ct.CT_GS_BUCKET)
api.step.active_result.presentation.properties['Webpage rankings'] = (
webpage_rankings_link)
download_skps_link = (
'https://pantheon.corp.google.com/storage/browser/%s/swarming/skps/%s/%s/'
% (api.ct.CT_GS_BUCKET, ct_page_type, skps_chromium_build))
api.step.active_result.presentation.properties['Download SKPs by rank'] = (
download_skps_link)
# Delete swarming_temp_dir to ensure it starts from a clean slate.
api.run.rmtree(api.skia_swarming.swarming_temp_dir)
num_per_slave = api.properties.get(
'num_per_slave',
min(TOOL_TO_DEFAULT_SKPS_PER_SLAVE[skia_tool], num_pages))
ct_num_slaves = api.properties.get(
'ct_num_slaves',
int(math.ceil(float(num_pages) / num_per_slave)))
# Try to figure out if the SKPs we are going to isolate already exist
# locally by reading the SKPS_VERSION_FILE.
download_skps = True
expected_version_contents = {
"chromium_build": skps_chromium_build,
"page_type": ct_page_type,
"num_slaves": ct_num_slaves,
}
# Note: If this directory is changed, the corresponding places it is
# referenced also needs to change. As of 8/8/17 the other places are:
# * infra/bots/ct/ct_skps.isolate
# * infra/bots/ct/run_ct_skps.py
skps_dir = checkout_root.join(
'skps', skps_chromium_build, ct_page_type, str(ct_num_slaves))
version_file = skps_dir.join(SKPS_VERSION_FILE)
if api.path.exists(version_file): # pragma: nocover
version_file_contents = api.file.read_text(
"Read %s" % version_file,
version_file,
test_data=expected_version_contents)
actual_version_contents = api.json.loads(version_file_contents)
differences = (set(expected_version_contents.items()) ^
set(actual_version_contents.items()))
download_skps = len(differences) != 0
if download_skps:
# Delete and recreate the skps dir.
api.run.rmtree(skps_dir)
api.file.ensure_directory(
'makedirs %s' % api.path.basename(skps_dir), skps_dir)
# If a blacklist file exists then specify SKPs to be blacklisted.
blacklists_dir = infrabots_dir.join('ct', 'blacklists')
blacklist_file = blacklists_dir.join(
'%s_%s_%s.json' % (skia_tool, ct_page_type, skps_chromium_build))
blacklist_skps = []
if api.path.exists(blacklist_file): # pragma: nocover
blacklist_file_contents = api.file.read_text(
"Read %s" % blacklist_file,
blacklist_file)
blacklist_skps = api.json.loads(blacklist_file_contents)['blacklisted_skps']
for slave_num in range(1, ct_num_slaves + 1):
if download_skps:
# Download SKPs.
api.ct.download_swarming_skps(
ct_page_type, slave_num, skps_chromium_build,
skps_dir,
start_range=((slave_num-1)*num_per_slave) + 1,
num_skps=num_per_slave)
# Create this slave's isolated.gen.json file to use for batcharchiving.
extra_variables = {
'SLAVE_NUM': str(slave_num),
'TOOL_NAME': skia_tool,
'GIT_HASH': api.properties['revision'],
'CONFIGURATION': api.vars.configuration,
'BUILDER': buildername,
'CHROMIUM_BUILD': skps_chromium_build,
'PAGE_TYPE': ct_page_type,
'NUM_SLAVES': str(ct_num_slaves),
}
api.skia_swarming.create_isolated_gen_json(
isolate_path, isolate_dir, 'linux', 'ct-%s-%s' % (skia_tool, slave_num),
extra_variables, blacklist=blacklist_skps)
if download_skps:
# Since we had to download SKPs create an updated version file.
api.file.write_text("Create %s" % version_file,
version_file,
api.json.dumps(expected_version_contents))
# Batcharchive everything on the isolate server for efficiency.
max_slaves_to_batcharchive = MAX_SLAVES_TO_BATCHARCHIVE
if '1m' in buildername:
# Break up the "isolate tests" step into batches with <100k files due to
# https://github.com/luci/luci-go/issues/9
max_slaves_to_batcharchive = 5
tasks_to_swarm_hashes = []
for slave_start_num in xrange(1, ct_num_slaves+1, max_slaves_to_batcharchive):
m = min(max_slaves_to_batcharchive, ct_num_slaves)
batcharchive_output = api.skia_swarming.batcharchive(
targets=['ct-' + skia_tool + '-%s' % num for num in range(
slave_start_num, slave_start_num + m)])
tasks_to_swarm_hashes.extend(batcharchive_output)
# Sort the list to go through tasks in order.
tasks_to_swarm_hashes.sort()
# Trigger all swarming tasks.
dimensions={'os': 'Ubuntu-14.04'}
if 'GPU' in buildername:
dimensions['cpu'] = 'x86-64-E3-1230_v5'
dimensions['gpu'] = '10de:1cb3-384.90'
# See crbug.com/700053
dimensions['pool'] = 'Chrome-GPU'
else:
dimensions['cpu'] = 'x86-64-Broadwell_GCE'
dimensions['pool'] = 'Chrome'
tasks = api.skia_swarming.trigger_swarming_tasks(
tasks_to_swarm_hashes, dimensions=dimensions, io_timeout=40*60)
# Now collect all tasks.
env = {'AWS_CREDENTIAL_FILE': None, 'BOTO_CONFIG': None}
failed_tasks = []
for task in tasks:
try:
api.skia_swarming.collect_swarming_task(task)
if skia_tool == 'nanobench':
output_dir = api.skia_swarming.tasks_output_dir.join(
task.title).join('0')
utc = api.time.utcnow()
gs_dest_dir = 'gs://skia-perf/ct/%s/%d/%02d/%02d/%02d/' % (
ct_page_type, utc.year, utc.month, utc.day, utc.hour)
for json_output in api.file.listdir(
'listdir output dir', output_dir, test_data=['file 1', 'file 2']):
with api.context(env=env):
cmd = ['gsutil', 'cp', '-R', json_output, gs_dest_dir]
api.step('upload json output', cmd=cmd, infra_step=True)
except api.step.StepFailure as e:
# Add SKP links for convenience.
api.step.active_result.presentation.links['Webpage rankings'] = (
webpage_rankings_link)
api.step.active_result.presentation.links['Download SKPs by rank'] = (
download_skps_link)
failed_tasks.append(e)
if failed_tasks:
raise api.step.StepFailure(
'Failed steps: %s' % ', '.join([f.name for f in failed_tasks]))
def GenTests(api):
ct_num_slaves = 5
num_per_slave = 10
skia_revision = 'abc123'
path_config = 'kitchen'
yield(
api.test('CT_DM_10k_SKPs') +
api.properties(
buildername=('Test-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Debug-All-' +
'CT_DM_10k_SKPs'),
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
)
)
yield(
api.test('CT_IMG_DECODE_10k_SKPs') +
api.properties(
buildername='Test-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Debug-All-'
'CT_IMG_DECODE_10k_SKPs',
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
)
)
yield(
api.test('CT_DM_100k_SKPs') +
api.properties(
buildername=('Test-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Debug-All-' +
'CT_DM_100k_SKPs'),
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
)
)
yield(
api.test('CT_IMG_DECODE_100k_SKPs') +
api.properties(
buildername='Test-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Debug-All-'
'CT_IMG_DECODE_100k_SKPs',
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
)
)
yield(
api.test('CT_GPU_BENCH_1k_SKPs') +
api.properties(
buildername=(
'Perf-Ubuntu14-Clang-Golo-GPU-QuadroP400-x86_64-Release-All-'
'CT_BENCH_1k_SKPs'),
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
) +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('src')
)
)
yield(
api.test('CT_CPU_BENCH_10k_SKPs') +
api.properties(
buildername=('Perf-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Release-All-'
'CT_BENCH_10k_SKPs'),
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
) +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('src')
)
)
yield(
api.test('CT_GPU_BENCH_10k_SKPs') +
api.properties(
buildername=(
'Perf-Ubuntu14-Clang-Golo-GPU-QuadroP400-x86_64-Release-All-'
'CT_BENCH_10k_SKPs'),
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
) +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('src')
)
)
yield(
api.test('CT_DM_1m_SKPs') +
api.properties(
buildername=('Test-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Debug-All-'
'CT_DM_1m_SKPs'),
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
)
)
yield (
api.test('CT_DM_SKPs_UnknownBuilder') +
api.properties(
buildername=('Test-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Debug-All-' +
'CT_DM_UnknownRepo_SKPs'),
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
) +
api.expect_exception('Exception')
)
yield (
api.test('CT_10k_SKPs_UnknownBuilder') +
api.properties(
buildername=('Test-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Debug-All-' +
'CT_UnknownTool_10k_SKPs'),
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
) +
api.expect_exception('Exception')
)
yield(
api.test('CT_DM_1m_SKPs_slave3_failure') +
api.step_data('ct-dm-3', retcode=1) +
api.properties(
buildername=('Test-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Debug-All-' +
'CT_DM_1m_SKPs'),
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
)
)
yield(
api.test('CT_DM_1m_SKPs_2slaves_failure') +
api.step_data('ct-dm-1', retcode=1) +
api.step_data('ct-dm-3', retcode=1) +
api.properties(
buildername=('Test-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Debug-All'+
'-CT_DM_1m_SKPs'),
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
)
)
builder = 'Test-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Debug-All-CT_DM_10k_SKPs'
yield(
api.test('CT_DM_10k_SKPs_Trybot') +
api.properties(
buildername=builder,
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
patch_ref='refs/changes/89/456789/12',
patch_repo='https://skia.googlesource.com/skia.git',
patch_storage='gerrit')
)
builder = ('Test-Ubuntu14-Clang-GCE-CPU-AVX2-x86_64-Debug-All-'
'CT_IMG_DECODE_10k_SKPs')
yield(
api.test('CT_IMG_DECODE_10k_SKPs_Trybot') +
api.properties(
buildername=builder,
path_config=path_config,
swarm_out_dir='[SWARM_OUT_DIR]',
ct_num_slaves=ct_num_slaves,
num_per_slave=num_per_slave,
repository='https://skia.googlesource.com/skia.git',
revision=skia_revision,
patch_ref='refs/changes/89/456789/12',
patch_repo='https://skia.googlesource.com/skia.git',
patch_storage='gerrit')
)
|
[
"[email protected]"
] | |
43ed3032a9d6d8aa4d915e1207566ac1933f96b9
|
d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4
|
/Codeforces/ECR67/probD.py
|
ebbfe5197c9987bb04c7865a228eb39eb2b6054b
|
[] |
no_license
|
wattaihei/ProgrammingContest
|
0d34f42f60fa6693e04c933c978527ffaddceda7
|
c26de8d42790651aaee56df0956e0b206d1cceb4
|
refs/heads/master
| 2023-04-22T19:43:43.394907 | 2021-05-02T13:05:21 | 2021-05-02T13:05:21 | 264,400,706 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,609 |
py
|
# instead of AVLTree
class BITbisect():
def __init__(self, max):
self.max = max
self.data = [0]*(self.max+1)
# 0からiまでの区間和
# 立っているビットを下から処理
def query_sum(self, i):
s = 0
while i > 0:
s += self.data[i]
i -= i & -i
return s
# i番目の要素にxを足す
# 覆ってる区間すべてに足す
def add(self, i, x):
while i <= self.max:
self.data[i] += x
i += i & -i
def insert(self, x):
self.add(x, 1)
def delete(self, x):
self.add(x, -1)
def count(self, x):
return self.query_sum(x) - self.query_sum(x-1)
def length(self):
return self.query_sum(self.max)
# 下からc番目(0-indexed)の数
# O(log(N))
def search(self, c):
c += 1
s = 0
ind = 0
l = self.max.bit_length()
for i in reversed(range(l)):
if ind + (1<<i) <= self.max:
if s + self.data[ind+(1<<i)] < c:
s += self.data[ind+(1<<i)]
ind += (1<<i)
if ind == self.max:
return False
return ind + 1
def bisect_right(self, x):
return self.query_sum(x)
def bisect_left(self, x):
if x == 1:
return 0
return self.query_sum(x-1)
# listみたいに表示
def display(self):
print('inside BIT:', end=' ')
for x in range(1, self.max+1):
if self.count(x):
c = self.count(x)
for _ in range(c):
print(x, end=' ')
print()
import sys
input = sys.stdin.readline
Q = int(input())
Query = []
for _ in range(Q):
N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
Query.append((N, A, B))
for N, A, B in Query:
bitA = BITbisect(N)
bitB = BITbisect(N)
ans = 'YES'
for i in range(N):
a, b = A[i], B[i]
if bitA.length() == 0 and bitB.length() == 0:
if a == b: continue
if bitB.count(b) > 0:
bitB.delete(b)
else:
bitA.insert(b)
if bitA.count(a) > 0:
bitA.delete(a)
else:
bitB.insert(a)
#print(i)
#bitA.display()
#bitB.display()
if bitA.length() != 0 or bitB.length() != 0:
if i == N-1:
ans = 'NO'
elif B[i] > B[i+1]:
ans = 'NO'
break
print(ans)
|
[
"[email protected]"
] | |
75f8d91434a9301e13f3e2bbbdec53876240c65a
|
5626d2c289e6cc3752f43a0f98c45dd914b9acc8
|
/shaura/testing.py
|
2b02deb282fc698b96617f58fc10e74d1dbb74f6
|
[] |
no_license
|
datakurre/shaura
|
b9e100b99d19789f69900dbb192bc6a57a7bbd43
|
b26aef07c880134e780f4e5fbd851c37414273b2
|
refs/heads/master
| 2020-04-11T03:33:53.301674 | 2011-09-14T07:35:32 | 2011-09-14T07:35:32 | 2,376,328 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,260 |
py
|
# -*- coding: utf-8 -*-
"""zope.testrunner layers"""
from pyramid import testing
class PyramidLayer(object):
@classmethod
def setUp(cls):
cls.config = testing.setUp()
import pyramid_zcml
cls.config.include(pyramid_zcml)
cls.config.load_zcml("shaura:configure.zcml")
@classmethod
def tearDown(cls):
testing.tearDown()
@classmethod
def testSetUp(cls):
pass
@classmethod
def testTearDown(cls):
pass
from shaura import testing_volatile
class VolatileLayer(PyramidLayer):
@classmethod
def setUp(cls):
cls.config.load_zcml("shaura:testing_volatile.zcml")
@classmethod
def tearDown(cls):
pass
@classmethod
def testSetUp(cls):
testing_volatile.DATASTORE.clear()
@classmethod
def testTearDown(cls):
pass
from shaura import testing_app
class VolatileAppLayer(VolatileLayer):
@classmethod
def setUp(cls):
cls.config.load_zcml("shaura:testing_app.zcml")
cls.config._set_root_factory(testing_app.Application)
@classmethod
def tearDown(cls):
pass
@classmethod
def testSetUp(cls):
pass
@classmethod
def testTearDown(cls):
pass
|
[
"[email protected]"
] | |
f3b7d8ddea426fbb7199f1006c6b2961567bed88
|
294767ff9d1190726a82931e0ac16db83eebc3f6
|
/chaospy/quad/interface.py
|
b2191e296f5f3b8125bd53237b24f9c44b8b0ebf
|
[
"MIT"
] |
permissive
|
FKShi/chaospy
|
30ee2d4eac07f1ff713480aba6304726bcacae7d
|
25ecfa7bf5608dc10c0b31d142ded0e3755f5d74
|
refs/heads/master
| 2020-05-22T00:52:20.353874 | 2019-04-21T11:24:54 | 2019-04-21T11:24:54 | 186,181,921 | 1 | 0 | null | 2019-05-11T21:14:04 | 2019-05-11T21:14:04 | null |
UTF-8
|
Python
| false | false | 2,787 |
py
|
"""Frontend for the generation of quadrature rules."""
import inspect
import numpy as np
from scipy.misc import comb
from . import collection, sparse_grid
def generate_quadrature(
order, domain, accuracy=100, sparse=False, rule="C",
composite=1, growth=None, part=None, normalize=False, **kws
):
"""
Numerical quadrature node and weight generator.
Args:
order (int):
The order of the quadrature.
domain (numpy.ndarray, Dist):
If array is provided domain is the lower and upper bounds (lo,up).
Invalid if gaussian is set. If Dist is provided, bounds and nodes
are adapted to the distribution. This includes weighting the nodes
in Clenshaw-Curtis quadrature.
accuracy (int):
If gaussian is set, but the Dist provieded in domain does not
provide an analytical TTR, ac sets the approximation order for the
descitized Stieltje's method.
sparse (bool):
If True used Smolyak's sparse grid instead of normal tensor product
grid.
rule (str):
Rule for generating abscissas and weights. Either done with
quadrature rules, or with random samples with constant weights.
composite (int):
If provided, composite quadrature will be used. Value determines
the number of domains along an axis. Ignored in the case
gaussian=True.
normalize (bool):
In the case of distributions, the abscissas and weights are not
tailored to a distribution beyond matching the bounds. If True, the
samples are normalized multiplying the weights with the density of
the distribution evaluated at the abscissas and normalized
afterwards to sum to one.
growth (bool):
If True sets the growth rule for the composite quadrature rule to
exponential for Clenshaw-Curtis quadrature.
"""
from ..distributions.baseclass import Dist
isdist = isinstance(domain, Dist)
if isdist:
dim = len(domain)
else:
dim = np.array(domain[0]).size
rule = rule.lower()
if len(rule) == 1:
rule = collection.QUAD_SHORT_NAMES[rule]
quad_function = collection.get_function(
rule,
domain,
normalize,
growth=growth,
composite=composite,
accuracy=accuracy,
)
if sparse:
order = np.ones(len(domain), dtype=int)*order
abscissas, weights = sparse_grid.sparse_grid(quad_function, order, dim)
else:
abscissas, weights = quad_function(order)
assert len(weights) == abscissas.shape[1]
assert len(abscissas.shape) == 2
return abscissas, weights
|
[
"[email protected]"
] | |
e47fcc515260d699c5c1d37e0c345d4381a585b3
|
89a90707983bdd1ae253f7c59cd4b7543c9eda7e
|
/python_cookbook/11/simple_authentication_of_clients/server.py
|
b9b26c99523a936203b19bd4f1f7b7feff3a071e
|
[] |
no_license
|
timothyshull/python_reference_code
|
692a7c29608cadfd46a6cc409a000023e95b9458
|
f3e2205dd070fd3210316f5f470d371950945028
|
refs/heads/master
| 2021-01-22T20:44:07.018811 | 2017-03-17T19:17:22 | 2017-03-17T19:17:22 | 85,346,735 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 607 |
py
|
from socket import socket, AF_INET, SOCK_STREAM
from auth import server_authenticate
secret_key = b'peekaboo'
def echo_handler(client_sock):
if not server_authenticate(client_sock, secret_key):
client_sock.close()
return
while True:
msg = client_sock.recv(8192)
if not msg:
break
client_sock.sendall(msg)
def echo_server(address):
s = socket(AF_INET, SOCK_STREAM)
s.bind(address)
s.listen(5)
while True:
c, a = s.accept()
echo_handler(c)
print('Echo server running on port 18000')
echo_server(('', 18000))
|
[
"[email protected]"
] | |
4f620f7dd7a21de6a7e18e9191a7dfa62aa79cf0
|
dc905bec7c109d82f26bdeca5cd1d503ecfa77c6
|
/utils/getSize.py
|
2d439cb5b44c6377e3dfc9306ae2e2e73331b160
|
[
"MIT"
] |
permissive
|
chjz1024/USTC-CS-Courses-Resource
|
d739c7b7b07dbc0b15d456b952dd3572df872cde
|
605d0e704102328aa447a9365446cae45f382d14
|
refs/heads/master
| 2023-03-17T03:28:31.293403 | 2019-01-09T03:14:30 | 2019-01-09T03:14:30 | 163,499,702 | 0 | 0 |
MIT
| 2018-12-29T09:53:27 | 2018-12-29T09:53:27 | null |
UTF-8
|
Python
| false | false | 826 |
py
|
# coding: utf-8
import os
import sys
def formatSize(size):
s = 'BKMGTP'
ct = 0
while size>=(1<<ct):
ct+=10
if ct>=10: ct-=10
return '{sz:.2f}{a}'.format(sz=size/(1<<ct),a=s[ct//10])
def getSize(path='.'):
if os.path.isdir(path):
gen = os.walk(path)
li = []
for root, dirs, files in gen:
for f in files:
sz = os.path.getsize(os.path.join(root ,f))
li.append(sz)
#li.insert(('.',sum(i[1] for i in li)),0)
#size = [f'{i[0]}: {formatSize(i[1])}' for i in li]
return formatSize(sum(li))
else:
return formatSize(os.path.getsize(path))
if __name__ == "__main__":
items = sys.argv[1:]
for i in items:
print('{i}: {sz}'.format(i=i,sz =getSize(i)))
|
[
"[email protected]"
] | |
f8ff84248a0fd4517e75554fa1ca11928afacd36
|
71cc62fe3fec8441794a725b7ce3037dc2723107
|
/ifreewallpapers/apps/profile/templatetags/avatars.py
|
8ed24bb18b20fe8d253a62fe106b8a5cb16962dd
|
[] |
no_license
|
tooxie/django-ifreewallpapers
|
bda676dc5a6c45329ad6763862fe696b3e0c354b
|
75d8f41a4c6aec5c1091203823c824c4223674a6
|
refs/heads/master
| 2020-05-21T12:50:36.907948 | 2011-01-19T04:28:33 | 2011-01-19T04:28:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,993 |
py
|
# coding=UTF-8
from profile import settings as _settings
from profile.models import Profile, Avatar
# import Image
# from PythonMagick import Image
from utils.TuxieMagick import Image
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.template.defaultfilters import slugify
from django.template import Library, Node, TemplateSyntaxError, Variable
from os import path, makedirs
import time
register = Library()
class ResizedThumbnailNode(Node):
def __init__(self, width, height, user, default):
try:
self.width = int(width)
except:
self.width = Variable(width)
try:
self.height = int(height)
except:
self.height = Variable(height)
if not user:
self.username = 'user'
else:
self.username = user
self.default = default
def get_user(self, context):
return Variable(self.username).resolve(context)
def sizes_ok(self, with_original=False):
if with_original:
orig_width = self.orig_width
orig_height = self.orig_height
else:
orig_width, orig_height = _settings.DEFAULT_AVATAR_SIZE
orig_height = _settings.DEFAULT_AVATAR_SIZE[1]
return self.width >= orig_width and self.height >= orig_height
def both_sides_equals(self, fname):
return self.orig_width == self.orig_height
def resize(self, orig='', dest=''):
if not path.exists(orig):
# print orig, 'does not exists'
return None
if path.exists(dest):
# print dest, 'already exists'
return True
if not dest:
dest = orig
self.orig.scale(self.width, self.height)
if self.orig.write(dest):
# print 'resizing done, returning...'
return self.as_url(dest)
else:
print ' *** ERROR *** '
return None # damn! Close but no cigar...
def get_file(self, profile=None):
default = False
file_name = None
# username = slugify(profile.user.username)
file_root = _settings.AVATARS_DIR
# La diferencia entre self.default y default es que el primero indica
# que tengo que devolver el avatar por defecto, mientras que el segundo
# marca si estoy devolviendo el avatar por defecto o no.
if self.default:
default = True
else:
if profile is not None:
# Este try es por si en profile.avatar existe una relación a un
# avatar que no existe en la tabla de avatars.
try:
if profile.avatar:
file_name = profile.avatar.name
except:
profile.avatar = None
profile.save()
default = True
if not file_name or not path.exists(path.join(file_root, file_name)):
file_name = _settings.DEFAULT_AVATAR
default = True
avatar_file = path.join(file_root, file_name)
self.orig = Image(avatar_file)
self.orig_width = self.orig.size().width()
self.orig_height = self.orig.size().height()
if not self.sizes_ok(with_original=True):
if default:
file_name = file_name[file_name.rfind('/')+1:]
file_name = '%(width)i-%(name)s' % \
{'width': self.width, 'name': file_name}
new_avatar = path.join(file_root, file_name)
else:
new_avatar = '' # Hack alert!
self.resize(avatar_file, new_avatar)
return (file_name, default)
def as_url(self, path):
from profile.avatars import path_to_url
return path_to_url(path)
def render(self, context):
try:
# If size is not an int, then it's a Variable, so try to resolve it.
if not isinstance(self.width, int):
self.width = int(self.width.resolve(context))
self.user = self.get_user(context)
except Exception, e:
print e
return '' # just die...
profile = self.user.get_profile()
if not profile:
return ''
file_root = _settings.AVATARS_DIR
file_name, defaulting = self.get_file(profile)
file_path = path.join(file_root, file_name)
return self.as_url(path.join(file_root, file_name))
@register.tag('avatar')
def Thumbnail(parser, token):
bits = token.contents.split()
username, default = None, False
width, height = _settings.DEFAULT_AVATAR_SIZE
if len(bits) == 2:
if bits[1] == 'default':
default = True
else:
username = bits[1]
elif len(bits) == 3:
username = bits[1]
default = bits[2]
return ResizedThumbnailNode(width, height, username, default)
|
[
"[email protected]"
] | |
1c517421f77ee2869587d51a140c71515ee4dfe5
|
8bbfb5b937772066ea965058eb29e9f6362847c2
|
/infobase/tags/Q6_6_0Beta4/build.py
|
3fd22082bdf504d3080509d6f6bc37d4da7852f7
|
[] |
no_license
|
QuakeEngines/QuArK_quake_editor-clone
|
e1aeeb38e7ec8287835d643c3a0bfe5612f0b7f3
|
412bf28a14d4e369479bf38408bd93e6a2612f87
|
refs/heads/master
| 2021-02-15T16:11:17.332239 | 2020-03-04T14:28:50 | 2020-03-04T14:28:50 | 244,911,440 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 28,928 |
py
|
#! /usr/bin/env python
#
# $Header$
#
import string, htmlentitydefs, time, os, sys
EXTENSION = ".txt"
OutputPath = "output"
#
# Text-to-HTML character conversion
#
TEXT_TO_HTML = { }
for c in range(256):
TEXT_TO_HTML[chr(c)] = chr(c)
for entity, character in htmlentitydefs.entitydefs.items():
TEXT_TO_HTML[character] = "&" + entity + ";"
TEXT_TO_HTML_NBSP = TEXT_TO_HTML.copy()
TEXT_TO_HTML_NBSP[" "] = " "
#
# ------------------------------------------------------------
#
#today = time.strftime("%d %b %Y", time.localtime(time.time()))
def text2html(text):
newtext = string.join(map(TEXT_TO_HTML.get, text), "")
# Fix a problem with "<" ">" becomming "&lt;" "&gt;"
newtext = string.replace(newtext, "&lt;", "<")
newtext = string.replace(newtext, "&gt;", ">")
# Hmmm? Lets fix " " too
newtext = string.replace(newtext, "&nbsp;", " ")
return newtext
def text2html_nbsp(text, maxlen=999):
if (len(text) > maxlen):
text = text[:maxlen] + "..."
return string.join(map(TEXT_TO_HTML_NBSP.get, text), "")
def path2html(path):
return string.join(filter(None, string.split(path, "/"))+["html"], ".")
def climbpath(curpath, relpath):
if relpath[:3] == "../" :
return climbpath(curpath[:-1], relpath[3:])
else:
if verboseMode:
print 'CURPATH ' + `curpath`
if curpath != []:
newpath = string.join(curpath, '/') + '/' + relpath
else:
newpath = relpath
if verboseMode:
print 'NEWPATH ' + `newpath`
return newpath
def relpath(curpath, relpath):
if relpath[:2] == './':
return curpath + relpath[2:]
elif relpath[:3] == '../':
track = string.split(curpath, '/')
return climbpath(track[:-1], relpath)
return relpath
def findref(root, path, name, fkw, extraargs):
if verboseMode:
print 'FKW: ' + `fkw["path"]`
# def ref(refnormal, refwithname, kw, name=name, extraargs):
def ref(refnormal, refwithname, kw, name=name):
if name == "":
return refnormal % kw
else:
kw['refname'] = name
return refwithname % kw
path = relpath(fkw["path"], path)
if verboseMode:
print 'PATH: ' + `path`
print 'name: ' + `name`
path0 = path
path = string.split(path, "/")
path1 = ""
while path:
path1 = path1 + path[0] + "/"
for folder in root.folders:
if folder.path == path1:
root = folder
del path[0]
break
else:
if len(path) == 1:
for subfiles in root.files:
if subfiles.kw["hrefaname"] == path[0]:
return ref(REFFILE, REFFILE_NAME, subfiles.kw)
raise "Reference not found to " + path0 + " in " + fkw["htmlfile"]
return ref(REFDIR, REFDIR_NAME, root.kw)
def proc_g(kw, words):
# '<g>...</g>' for Glossary-link
# Ugly hack! This needs proper fixing, and not this semi-hardcoded bullshit.
if words[:1] == '.':
namelink = "fileext"
elif words[:1] >= '0' and words[:1] <= '9':
namelink = "numbers"
else:
namelink = string.lower(words[:1])
return "<a href=\"glossary.html#%s\">%s</a>" % (namelink, words)
def proclink(kw, targetname, extraargs): #DanielPharos
# I know <link> exists in HTML, but we're not using it here, and it just seemed the best name of this!
from links import linksdict
if linksdict.has_key(extraargs):
link = linksdict[extraargs]
else:
raise "unknown link: "+extraargs
return "<a target=\"_blank\" href=\"%s\">%s</a>" % (link, targetname)
def procpic(kw, path, extraargs): #tiglari
if (string.find(path, "/") > -1) or (string.find(path, "\\") > -1) or (path[:1] == "."):
raise "Illegal picture filename: [%s]" % path
picrl = string.join(filter(None, string.split(kw["path"], "/"))+[path], ".")
if extraargs == '':
img = '<img src="%s">' % (picrl)
else:
img = '<img %s src="%s">' % (extraargs, picrl)
try:
data = open(kw["path"]+path, "rb").read()
except:
raise "open-error for file \"%s\"" % (kw["path"]+path)
f = open(OutputPath+"/"+picrl, "wb")
f.write(data)
f.close()
# self.forgotten.remove(path)
return img
def procrsc(kw, path): #tiglari
rscrl = string.join(filter(None, string.split(kw["path"], "/"))+[path], ".")
data = open(kw["path"]+path, "rb").read()
f = open(OutputPath+"/"+rscrl, "wb")
f.write(data)
f.close()
# self.forgotten.remove(path)
return '"%s"' % rscrl
def proczip(kw, path): #tiglari
# self.forgotten.remove(path)
if localMode:
data = open("zips/"+path, "rb").read()
if not os.path.exists(OutputPath+"/zips"):
os.mkdir(OutputPath+"/zips")
f = open(OutputPath+"/zips/"+path, "wb")
f.write(data)
f.close()
return '<a href="%s">%s</a>' % (path, path)
else:
return '<a href="%s%s">%s</a>' % (ZIPLOC, path, path)
def procact(kw, actionstring):
# An 'action' is usually composed of a series of menu-actions the user
# has to drill into. An example: "<act> RMB | Curves|Arch </act>"
actionstring = string.replace(actionstring, " | ", " -> ")
actionstring = string.replace(actionstring, "|", " -> ")
return ACT_HTML % actionstring
def processtext(root, self, data):
def perform_tag_action(tag, line, flags, root, kw):
def perform_ref_action(extraargs, datastring, root, kw):
datastring = string.strip(datastring)
try:
# figure out, if there is a alternative text for the link-reference
idx = string.index(datastring, '\\')
pathname = string.strip(datastring[:idx])
refname = string.strip(datastring[idx+1:])
except (ValueError):
pathname = datastring
refname = "";
return findref(root, pathname, refname, kw, string.strip(extraargs))
def perform_link_action(extraargs, datastring, root, kw):
return proclink(kw, string.strip(datastring), string.strip(extraargs))
def perform_pic_action(extraargs, datastring, root, kw):
return procpic(kw, string.strip(datastring), string.strip(extraargs))
def perform_zip_action(datastring, root, kw):
return proczip(kw, string.strip(datastring))
def perform_rsc_action(datastring, root, kw):
return procrsc(kw, string.strip(datastring))
def perform_act_action(datastring, root, kw):
return procact(kw, string.strip(datastring))
def perform_g_action(datastring, root, kw):
return proc_g(kw, string.strip(datastring))
if (tag[:5] == "<code"):
replacewith = "<div class=\"doccode\"><pre>"
flags["preformatmode"] = flags["preformatmode"] + 1
elif (tag[:6] == "</code"):
replacewith = "</pre></div>"
if (flags["preformatmode"] > 0):
flags["preformatmode"] = flags["preformatmode"] - 1
elif (tag[:4] == "<tt>"):
replacewith = " <tt>"
elif (tag[:5] == "</tt>"):
replacewith = "</tt> "
elif (tag[:4] == "<ref"):
end_tag = string.find(line, "</ref>")
if end_tag == -1:
# A <ref>-tag must have a </ref>-tag on the same line, else this code won't work.
raise "<ref>-tag without any </ref>-tag on same line! <File>.TXT title: \"%s\"" % kw["title"]
replacewith = perform_ref_action(tag[4:-1], line[:end_tag], root, kw)
line = line[end_tag+len("</ref>"):]
elif (tag[:5] == "<link"):
end_tag = string.find(line, "</link>")
if end_tag == -1:
# A <link>-tag must have a </link>-tag on the same line, else this code won't work.
raise "<link>-tag without any </link>-tag on same line! <File>.TXT title: \"%s\"" % kw["title"]
replacewith = perform_link_action(tag[5:-1], line[:end_tag], root, kw)
line = line[end_tag+len("</link>"):]
elif (tag[:4] == "<img"):
end_tag = string.find(line, "</img>")
if end_tag == -1:
# A <img>-tag must have a </img>-tag on the same line, else this code won't work.
raise "<img>-tag without any </img>-tag on same line! <File>.TXT title: \"%s\"" % kw["title"]
replacewith = perform_pic_action(tag[4:-1], line[:end_tag], root, kw)
line = line[end_tag+len("</img>"):]
elif (tag[:4] == "<pic"):
end_tag = string.find(line, "</pic>")
if end_tag == -1:
# A <pic>-tag must have a </pic>-tag on the same line, else this code won't work.
raise "<pic>-tag without any </pic>-tag on same line! <File>.TXT title: \"%s\"" % kw["title"]
replacewith = perform_pic_action(tag[4:-1], line[:end_tag], root, kw)
line = line[end_tag+len("</pic>"):]
elif (tag[:4] == "<zip"):
end_tag = string.find(line, "</zip>")
if end_tag == -1:
# A <zip>-tag must have a </zip>-tag on the same line, else this code won't work.
raise "<zip>-tag without any </zip>-tag on same line! <File>.TXT title: \"%s\"" % kw["title"]
replacewith = perform_zip_action(line[:end_tag], root, kw)
line = line[end_tag+len("</zip>"):]
elif (tag[:4] == "<rsc"):
end_tag = string.find(line, "</rsc>")
if end_tag == -1:
# A <rsc>-tag must have a </rsc>-tag on the same line, else this code won't work.
raise "<rsc>-tag without any </rsc>-tag on same line! <File>.TXT title: \"%s\"" % kw["title"]
replacewith = perform_rsc_action(line[:end_tag], root, kw)
line = line[end_tag+len("</rsc>"):]
elif (tag[:4] == "<act"):
end_tag = string.find(line, "</act>")
if end_tag == -1:
# A <act>-tag must have a </act>-tag on the same line, else this code won't work.
raise "<act>-tag without any </act>-tag on same line! <File>.TXT title: \"%s\"" % kw["title"]
replacewith = perform_act_action(line[:end_tag], root, kw)
line = line[end_tag+len("</act>"):]
elif (tag[:2] == "<g"):
end_tag = string.find(line, "</g>")
if end_tag == -1:
# A <g>-tag must have a </g>-tag on the same line, else this code won't work.
raise "<g>-tag without any </g>-tag on same line! <File>.TXT title: \"%s\"" % kw["title"]
replacewith = perform_g_action(line[:end_tag], root, kw)
line = line[end_tag+len("</g>"):]
elif (tag[:4] == "</i>"):
replacewith = tag
if (line[:6] <> " "):
# Force in a non-breakable-space after end-of-italic.
replacewith = replacewith + " "
elif (tag[:2] == "< "):
raise "Illegal use of '<'-char. Use '<' if a single '<' is needed! <File>.TXT title: \"%s\"" % kw["title"]
else:
replacewith = tag
if (tag[:4] == "<pre"):
flags["preformatmode"] = flags["preformatmode"] + 1
elif (tag[:5] == "</pre"):
if (flags["preformatmode"] > 0):
flags["preformatmode"] = flags["preformatmode"] - 1
return replacewith, line, flags
paragraf_tags_added = 0
flags = { }
flags["prevlineempty"] = 1
flags["preformatmode"] = 0
flags["inhtmlcomment"] = 0
for line in self.text:
correctedline = ""
trimmedline = string.strip(line)
if not trimmedline:
correctedline = "\n"
flags["prevlineempty"] = 1
if (paragraf_tags_added > 0) and (flags["preformatmode"] == 0) and (flags["inhtmlcomment"] == 0):
correctedline = "</p>"
paragraf_tags_added = paragraf_tags_added - 1
else:
# Scan through the 'line' in search for "<tag's" to replace/perform actions on
while len(line) > 0:
if (flags["inhtmlcomment"] == 1):
endofcomment_found = string.find(line, "-->")
if endofcomment_found == -1:
# We're still in HTML-comment
correctedline = correctedline + line
line = ""
else:
# Exiting HTML-comment mode
correctedline = correctedline + line[:endofcomment_found+len("-->")]
line = line[endofcomment_found+len("-->"):]
flags["inhtmlcomment"] = 0
else:
startchar_tag_found = string.find(line, "<")
if startchar_tag_found == -1:
# No "<tag" were found, so just copy the entire line
correctedline = correctedline + text2html(line)
line = ""
else:
# Found a "<tag". Take anything before that, and append to 'correctedline'
correctedline = correctedline + text2html(line[:startchar_tag_found])
line = line[startchar_tag_found:]
if (line[:4] == "<!--"):
flags["inhtmlcomment"] = 1
correctedappend = line[:len("<!--")]
line = line[len("<!--"):]
else:
endchar_tag_found = string.find(line, ">")
if endchar_tag_found == -1:
# there must exist an endchar_tag on the same line!
raise "'%s' without ending '>' problem! <File>.TXT title: \"%s\"" % (line[:5], self.kw["title"])
else:
tag = (line[:endchar_tag_found+1])
if (tag == "<p>") or (tag == "</p>") or (tag[:5] == "<html") or (tag[:6] == "</html"):
# do not allow these tags!
raise "The %s tag is not allowed! <File>.TXT title: \"%s\"" % (tag, self.kw["title"])
correctedappend, line, line_flags = perform_tag_action(tag, line[endchar_tag_found+1:], flags, root, self.kw)
correctedline = correctedline + correctedappend
if flags["prevlineempty"] == 1:
if (flags["preformatmode"] == 0) and (flags["inhtmlcomment"] == 0):
# prepend with paragraf-tag
correctedline = "<p>" + correctedline
paragraf_tags_added = paragraf_tags_added + 1
flags["prevlineempty"] = 0
data.append(correctedline)
for ptags in range(paragraf_tags_added):
data.append("</p>")
def parse(file):
try:
f = open(file, "r")
except:
raise "File missing: %s" % file
try:
kw = { }
# Read the beginning non-empty lines, which should contain "key: value"'s
while 1:
line = string.strip(f.readline())
if not line: # empty line found, stop reading for "key: value"'s
break
keysplit = string.find(line, ":")
if keysplit == -1: # not a valid keypair; we're probably done
break
key = string.strip(line[:keysplit])
value = string.strip(line[keysplit+1:])
try:
data = kw[key]
except (KeyError):
kw[key] = value
else:
kw[key] = data+"\n"+value
restdata = f.readlines()
finally:
f.close()
try:
# Doesn't work in versions lower than Python 2.2
return kw, restdata, os.stat(file).st_mtime
except:
return kw, restdata, os.stat(file)[8] # Decker - changed from [9] to [8] to get the right file-modification-date on Win2K
class File:
def __init__(self, filename):
self.filename = filename
self.kw, self.text, self.lastmodifydate = parse(filename)
class Folder:
def __init__(self, path, classif, parents, prev=None):
self.prev = prev
self.parents = parents
self.path = path
if verboseMode:
print 'Path: '+self.path
self.classif = classif
if classif: # Decker
shortname = string.join(map(lambda s: s+".", classif), "") + " "
else: # Decker
shortname = "" # Decker - Make the 'index.html' title _not_ prefixed with a single space
if verboseMode:
print shortname,
self.kw, self.text, lastmodifydate = parse(self.path + "index" + EXTENSION)
s = self.kw["title"]
if verboseMode:
print s
self.kw["htmltitle"] = text2html_nbsp(s)
self.kw["htmltitleshort"] = text2html_nbsp(s, 25) # Decker - Try to prevent text-wrapping, so make it max 25 characters long
self.kw["classif"] = shortname
self.kw["path"] = path
if not classif:
shortname = "index.html"
else:
shortname = path2html(path)
self.kw["htmlfile"] = shortname
self.kw["navprev"] = NAVNOPREV
self.kw["navup"] = NAVNOUP
self.kw["navnext"] = NAVNONEXT
if parents:
self.kw["parenthtmlfile"] = parents[-1].kw["htmlfile"]
self.kw["navup"] = NAVUP % parents[-1].kw
# Recusivee into sub-folders
self.folders = []
self.forgotten = map(string.lower, os.listdir("./" + self.path))
self.forgotten.remove("index" + EXTENSION)
self.kw["next"] = ""
self.kw["nextfooter"] = ""
htmlpath = path2html(path)
previous = None
for foldername in string.split(self.kw.get("subdir", "")):
folder = Folder(path + foldername + "/", classif + (str(len(self.folders) + 1),), parents + (self,), previous)
if folder.lastmodifydate > lastmodifydate:
lastmodifydate = folder.lastmodifydate
self.folders.append(folder)
self.forgotten.remove(foldername)
previous = folder
self.files = []
for filename in string.split(self.kw.get("desc", "")):
file = File(self.path + filename + EXTENSION)
if file.lastmodifydate > lastmodifydate:
lastmodifydate = file.lastmodifydate
file.kw["htmlfile"] = shortname
file.kw["hrefaname"] = filename
file.kw["updateday"] = time.strftime("%d %b %Y", time.localtime(file.lastmodifydate))
file.kw["path"] = path # tiglari @: Gotta go away!
self.files.append(file) #@(kw, text)
self.forgotten.remove(filename + EXTENSION)
self.lastmodifydate = lastmodifydate
self.kw["updateday"] = time.strftime("%d %b %Y", time.localtime(lastmodifydate))
# Setup backwards navigation links
if not parents:
lvl = MAINHEADERLVL
else:
lvl = SUBHEADERLVL
for folder in parents:
lvl = lvl + HEADERLVL % folder.kw
self.kw["headerlvl"] = lvl
def navigation(self):
# Setup navigation links (Prev-Up-Next) # Decker
try:
prev = self.parents[-1]
i = len(prev.folders) - 1
while (i >= 0 and prev.folders[i] != self):
i = i - 1
if (i > 0):
prev = prev.folders[i - 1]
while (len(prev.folders) > 0):
prev = prev.folders[-1]
prev.kw["navnext"] = NAVNEXT % self.kw
self.kw["navprev"] = NAVPREV % prev.kw
except:
pass
for folder in self.folders:
folder.navigation()
def writefiles(self, root, filewriter):
if verboseMode:
print 'writing file: ' + self.kw["htmlfile"], " [%s]" % self.kw["title"]
filewriter(self.kw["htmlfile"], self.makefile(root))
for folder in self.folders:
folder.writefiles(root, filewriter)
def makefile(self, root):
data = [ HEADER_BEGIN % self.kw ]
processtext(root, self, data)
data.append(HEADER_END % { })
if self.folders:
data.append(SUBDIR_BEGIN % self.kw)
for folder in self.folders:
data.append(SUBDIR_ITEM % folder.kw)
if folder.folders:
data.append(SUBSUBDIR_BEGIN % folder.kw)
for subfolder in folder.folders:
data.append(SUBSUBDIR_ITEM % subfolder.kw)
data.append(SUBSUBDIR_END % folder.kw)
if folder.files:
if len(folder.files) < 11:
data.append(SUBFILES_BEGIN % folder.kw)
for subfiles in folder.files:
data.append(SUBFILES_ITEM % subfiles.kw)
data.append(SUBFILES_END % folder.kw)
else:
# If more than 10 files, put into two columns
data.append(SUBFILES_TABLEBEGIN % { });
data.append(SUBFILES_BEGIN % folder.kw)
cnt = 0
for subfiles in folder.files:
if cnt == ((len(folder.files)+1) / 2):
data.append(SUBFILES_END % folder.kw)
data.append(SUBFILES_TABLEMIDDLE % { });
data.append(SUBFILES_BEGIN % folder.kw)
data.append(SUBFILES_ITEM % subfiles.kw)
cnt = cnt + 1
data.append(SUBFILES_END % folder.kw)
data.append(SUBFILES_TABLEEND % { });
data.append(SUBDIR_END % self.kw)
if self.files:
data.append(FILES_BEGIN % self.kw)
if len(self.files) < 11:
data.append(FILES_ITEMBEGIN % self.kw)
for subfiles in self.files:
data.append(FILES_ITEM % subfiles.kw)
data.append(FILES_ITEMEND % self.kw)
else:
# If more than 10 files, put into two columns
data.append(SUBFILES_TABLEBEGIN % { });
data.append(FILES_ITEMBEGIN % self.kw)
cnt = 0
for subfiles in self.files:
if cnt == ((len(self.files)+1) / 2):
data.append(FILES_ITEMEND % self.kw)
data.append(SUBFILES_TABLEMIDDLE % { });
data.append(FILES_ITEMBEGIN % self.kw)
data.append(FILES_ITEM % subfiles.kw)
cnt = cnt + 1
data.append(FILES_ITEMEND % self.kw)
data.append(SUBFILES_TABLEEND % { });
data.append(FILES_MIDDLE % self.kw)
for subfiles in self.files:
data.append(FILE_BEGIN % subfiles.kw)
processtext(root, subfiles, data)
data.append(FILE_END % subfiles.kw)
data.append(FILES_END % self.kw)
data.append(FOOTER % self.kw)
return data
def viewforgotten(self):
for s in self.forgotten:
if s[-1:]!="~" and s!="cvs" and string.find(s,'.png')==-1 and string.find(s,'.jpg')==-1 and string.find(s,'.gif')==-1:
print "*** NOTE: file '%s' not found in index" % (self.path+s)
for folder in self.folders:
folder.viewforgotten()
def defaultwriter(filename, data, writemode="w"):
# write the target file
f = open(OutputPath+"/"+filename, writemode)
f.writelines(data)
f.close()
def run(filewriter):
def printline(text):
if len(text)>77-3-1:
print text
else:
print "---" + text + "-"*(80-len(text)-3-1)
# load format file
execfile("format.py", globals(), globals())
# recursively load everything in memory
printline("FINDING ALL FILES")
root = Folder("", (), ())
# recursively set navigation links
printline("SETTING UP NAVIGATION")
root.navigation() # Decker
# recursively write everything to disk
printline("WRITING FILES TO DISK")
root.writefiles(root, filewriter)
for filename in string.split(root.kw.get("extrafiles_text", "")):
filewriter(filename, [open(filename, "r").read()])
for filename in string.split(root.kw.get("extrafiles_binary", "")):
filewriter(filename, [open(filename, "rb").read()], "wb")
printline("PRINTING FORGOTTEN FILES")
root.forgotten = []
root.viewforgotten()
localMode=0
verboseMode=0
for flag in sys.argv:
if flag=='-local':
localMode=1
if flag=='-verbose':
verboseMode=1
if not os.path.exists(OutputPath):
os.mkdir(OutputPath)
run(defaultwriter)
#
# $Log$
# Revision 1.29 2008/09/20 17:19:28 danielpharos
# Fix climbing path all the way back to main path not working.
#
# Revision 1.28 2008/08/09 19:50:08 danielpharos
# Fixed a double space appearing in img-tags
#
# Revision 1.27 2008/08/09 18:53:19 danielpharos
# Fix inconsistent handling of percent-signs (fixes double percent-signs in output).
#
# Revision 1.26 2008/07/21 19:40:00 danielpharos
# Re-upload new build files: fixed incompatibilities with older Python versions.
#
# Revision 1.25 2008/07/15 18:41:25 cdunde
# To roll back changing of format.txt to format.py and all changes to build.py since May 17, 2008 that broke building of the InfoBase.
#
# Revision 1.24 2008/05/18 15:15:32 danielpharos
# Added another forbidden tag
#
# Revision 1.23 2008/05/18 12:44:59 danielpharos
# Made a class out of files to make it all more readable
#
# Revision 1.22 2008/05/18 12:17:33 danielpharos
# Nicely close file handle after parsing the file + possibly faster keyword-parsing
#
# Revision 1.21 2008/05/17 22:22:21 danielpharos
# Small internal changes.
#
# Revision 1.20 2003/07/09 21:47:45 cdunde
# To correct case setting of web page links.
#
# Revision 1.19 2003/01/02 06:36:32 rowdy
# do not warn about pictures which are not in the index
#
# Revision 1.18 2002/05/03 17:37:58 decker_dk
# Added two seperator lines, to indicate what step have been executed.
#
# Revision 1.17 2001/07/25 19:17:02 decker_dk
# Added exception-handling when opening files thats missing.
#
# Revision 1.16 2001/02/28 19:54:10 tiglari
# removed extraarg from ref in findref
#
# Revision 1.15 2001/02/28 19:12:25 decker_dk
# Added <g>...</g> Glossary-links. Though not the best method.
#
# Revision 1.14 2001/02/25 16:38:22 decker_dk
# Added <act> </act> functionality
#
# Revision 1.13 2001/02/20 19:33:14 decker_dk
# Changed to .PNG image-format, and a comment in BUILD.PY
#
# Revision 1.12 2001/02/15 19:43:16 decker_dk
# Recoded the BUILD.PY to support somewhat basic-HTML.
#
# Revision 1.11 2000/11/12 06:31:50 tiglari
# <REF> file \ name
# <ZIP> file.zip
#
# Revision 1.10 2000/11/02 06:36:24 tiglari
# support for explicit names in REF's
#
# Revision 1.9 2000/11/01 21:15:23 decker_dk
# Misc. updates.
#
# Revision 1.8 2000/10/29 03:04:04 tiglari
# added <rsc> (resource) tag to get a resource renamed & shifted into the output
# in the same style as <pic>, but only the quoted new name is returned into
# the doc, so that the thing can be part of a normal <img > etc. tag. eg:
# ...<img src=
# <rsc>coolpic.jpg
# width=200 height=100>...
#
# Revision 1.7 2000/10/24 19:43:13 decker_dk
# Prev/Up/Next navigation, new CSS and misc. changes.
#
# Revision 1.6 2000/10/19 20:06:39 tiglari
# relative paths (./,../) for <pic> and <ref>
# cross-links to next added to output
#
# Revision 1.5 2000/10/18 16:39:34 tiglari
# added image-handling facility, preliminary
#
#
|
[
"nobody@5419a3ea-6bc3-475c-b79b-167d7c3fbd5f"
] |
nobody@5419a3ea-6bc3-475c-b79b-167d7c3fbd5f
|
1d03f8ab19d1108f3100cb736df9f97204ba0258
|
a90e5b2f4cf3a3919bd082296834c9f0efa99b71
|
/code/python/invertTree.py
|
dd798d65bced2df2badc5643ff9ec2179c1500cd
|
[] |
no_license
|
yjshiki/leetcode
|
c6330e53fa3db463909787ca882a702e4952a2a1
|
628e732c15afe1da7f3aa690bbbc27866fcb1188
|
refs/heads/master
| 2020-06-05T00:38:54.136840 | 2020-05-14T04:29:25 | 2020-05-14T04:29:25 | 192,253,725 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 484 |
py
|
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
res = root
stack = []
while root or stack:
if root:
stack.append(root)
root = root.left
else:
root = stack.pop()
root.left, root.right = root.right, root.left
root = root.left
return res
|
[
"[email protected]"
] | |
ccb373ceffa902e5f1e9bd4adf964c88e6abf2b0
|
db5f520bf54122c11640a964bb50a47a6aeef8d6
|
/readthedocs/projects/search_indexes.py
|
8ec2d7a8b109aa106b5e28111a3eaf86fc2645aa
|
[
"MIT"
] |
permissive
|
jasongrlicky/readthedocs.org
|
4f0f74e2ffc3647f68349aa68dbac5b80633c742
|
538e9312527c085e665c101d66d37ba44b64e88e
|
refs/heads/master
| 2020-12-25T10:08:35.805404 | 2011-06-24T18:52:10 | 2011-06-24T18:52:10 | 1,416,718 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,767 |
py
|
# -*- coding: utf-8-*-
import os
import codecs
import BeautifulSoup
from django.utils.html import strip_tags
from haystack.indexes import *
from haystack import site
from projects.models import File, ImportedFile, Project
class ProjectIndex(SearchIndex):
text = CharField(document=True, use_template=True)
author = CharField(model_attr='user')
title = CharField(model_attr='name')
description = CharField(model_attr='description')
repo_type = CharField(model_attr='repo_type')
class FileIndex(SearchIndex):
text = CharField(document=True, use_template=True)
author = CharField(model_attr='project__user', faceted=True)
project = CharField(model_attr='project__name', faceted=True)
title = CharField(model_attr='heading')
#Should prob make a common subclass for this and FileIndex
class ImportedFileIndex(SearchIndex):
text = CharField(document=True)
author = CharField(model_attr='project__user', faceted=True)
project = CharField(model_attr='project__name', faceted=True)
title = CharField(model_attr='name')
def prepare_text(self, obj):
try:
full_path = obj.project.rtd_build_path()
to_read = os.path.join(full_path, obj.path.lstrip('/'))
content = codecs.open(to_read, encoding="utf-8", mode='r').read()
bs = BeautifulSoup.BeautifulSoup(content)
soup = bs.find("div", {"class": "document"})
return strip_tags(soup).replace(u'¶', '')
except (AttributeError, IOError) as e:
if 'full_path' in locals():
print "%s not found: %s " % (full_path, e)
#obj.delete()
site.register(File, FileIndex)
site.register(ImportedFile, ImportedFileIndex)
site.register(Project, ProjectIndex)
|
[
"[email protected]"
] | |
c6bed9b93e57f2c5b784b15df3dd9cb422697a7a
|
ba7640cffff3085f045d69f37735de0f759e66c3
|
/__init__.py
|
487deef5265afc2b0d3dd4e984777a3440475805
|
[
"Apache-2.0"
] |
permissive
|
luoqingfu/reudom
|
f5e88292a7e8cdbb372340795bc5ec5c85a26931
|
3c52ff4aa2cd772260bbf3575f2844d76bc2f16a
|
refs/heads/master
| 2020-12-07T13:23:29.972584 | 2019-12-24T14:57:05 | 2019-12-24T14:57:05 | 232,730,930 | 1 | 0 |
Apache-2.0
| 2020-01-09T05:38:10 | 2020-01-09T05:38:09 | null |
UTF-8
|
Python
| false | false | 1,333 |
py
|
#!/usr/bin/python
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .running.test_runner import main
from .case import TestCase
from .testdata import ddt, ddt_class
from .skip import skip
from requests import request
from requests import *
from unittest import TestCase
from Crypto.Cipher import AES
from Crypto import *
from Crypto import Cipher, Hash, Protocol, PublicKey, Random, SelfTest, Signature, Util
from .CryptoAES.aesEncrypt import aesCrypt
__author__ = "Barry"
__version__ = "1.1.6"
__description__ = "Automated testing framework based on requests and unittest interface."
|
[
"[email protected]"
] | |
c93f3061445440c61fac3d56c93db2d8ca6ae38f
|
171a504d38951da46ac8b6f594477f6798f18d00
|
/applications/StructuralMechanicsApplication/python_scripts/trilinos_structural_mechanics_implicit_dynamic_solver.py
|
f173ddd9c60ca0d19eb7cdd8975b30abd6b870c0
|
[] |
no_license
|
adrigzr/Kratos
|
e3d385c10e6a9661f95dfbf998dca3844b7d14c1
|
9a281b74acb00f5590e0fec1bd3caa34255e5d9b
|
refs/heads/master
| 2021-07-24T01:18:50.128534 | 2017-10-20T08:29:02 | 2017-10-20T08:29:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,063 |
py
|
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
#import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.mpi as mpi
import KratosMultiphysics.TrilinosApplication as TrilinosApplication
import KratosMultiphysics.MetisApplication as MetisApplication
import trilinos_structural_mechanics_solver
# Check that KratosMultiphysics was imported in the main script
KratosMultiphysics.CheckForPreviousImport()
def CreateSolver(main_model_part, custom_settings):
return TrilinosImplicitMechanicalSolver(main_model_part, custom_settings)
class TrilinosImplicitMechanicalSolver(trilinos_structural_mechanics_solver.TrilinosMechanicalSolver):
"""The trilinos structural mechanics implicit dynamic solver.
Public member variables:
dynamic_settings -- settings for the implicit dynamic solvers.
For more information see:
structural_mechanics_solver.py
trilinos_structural_mechanics_solver.py
"""
def __init__(self, main_model_part, custom_settings):
# Set defaults and validate custom settings.
self.dynamic_settings = KratosMultiphysics.Parameters("""
{
"damp_factor_m" :-0.3
}
""")
self.validate_and_transfer_matching_settings(custom_settings, self.dynamic_settings)
# Validate the remaining settings in the base class.
if not custom_settings.Has("scheme_type"): # Override defaults in the base class.
custom_settings.AddEmptyValue("scheme_type")
custom_settings["scheme_type"].SetString("Newmark")
# Construct the base solver.
super(TrilinosImplicitMechanicalSolver, self).__init__(main_model_part, custom_settings)
def AddVariables(self):
super(TrilinosImplicitMechanicalSolver, self).AddVariables()
# Add dynamic variables.
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.ACCELERATION)
if self.settings["rotation_dofs"].GetBool():
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_ACCELERATION)
print("::[TrilinosImplicitMechanicalSolver]:: Variables ADDED")
#### Private functions ####
def _create_solution_scheme(self):
scheme_type = self.settings["scheme_type"].GetString()
if (scheme_type == "Newmark"):
damp_factor_m = 0.0
elif (scheme_type == "Bossak"):
damp_factor_m = self.dynamic_settings["damp_factor_m"].GetDouble()
else:
raise Exception("Unsupported scheme_type: " + scheme_type)
mechanical_scheme = TrilinosApplication.TrilinosResidualBasedBossakDisplacementScheme(damp_factor_m)
return mechanical_scheme
|
[
"[email protected]"
] | |
888d90fbd5a780402d5392e40b30f69a2708ef1a
|
396841279a035033487b6c9fd5db6fc699b288af
|
/backend/chat/models.py
|
07c413103e65923af2fa18a0731c2326de9ffa76
|
[] |
no_license
|
Shamsulhaq/realtime-chat
|
b9d9b753252f70e6d682a6f86630474408bebb40
|
5462bcb1f42787d3c8e4a62037c7ef401bcce077
|
refs/heads/master
| 2023-07-06T22:48:14.355157 | 2021-08-05T11:37:34 | 2021-08-05T11:37:34 | 393,011,449 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,252 |
py
|
# at chatapp/backend/chat/models.py
from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
User = get_user_model()
class Conversation(models.Model):
user_one = models.ForeignKey(
User,
related_name='participent',
on_delete=models.CASCADE
)
user_two = models.ForeignKey(
User,
related_name='participent_two',
on_delete=models.CASCADE
)
timestamp = models.DateTimeField(
auto_now_add=True
)
class Meta:
unique_together = ['user_one', 'user_two']
def __str__(self):
return str(self.id)
def last_message(self):
return self.messages.all().last()
def conversation_url(self):
return reverse("chats:room", kwargs={"room_name": self.pk})
class Message(models.Model):
conversation = models.ForeignKey(
Conversation,
related_name='messages',
on_delete=models.CASCADE
)
author = models.ForeignKey(
User,
related_name='sender',
on_delete=models.CASCADE
)
content = models.TextField()
timestamp = models.DateTimeField(
auto_now_add=True
)
def __str__(self):
return self.content
|
[
"[email protected]"
] | |
b4730082313d847d8b38fedb02dcf83f354fb541
|
da8471ad2f90a3efa31acb0c986020357cdb5e4c
|
/confidant/scripts/archive.py
|
1f93f9e044726de0bdc2c6fdcfd05a0b21cbfc89
|
[
"Apache-2.0"
] |
permissive
|
lyft/confidant
|
af18cc7085303ee5bab873c78567e14ae48630ab
|
8033824e0b3c156ee5588e5b31f8dff8e421a01e
|
refs/heads/master
| 2023-09-01T20:46:07.051295 | 2023-08-21T17:01:49 | 2023-08-21T17:01:49 | 42,324,225 | 1,918 | 146 |
Apache-2.0
| 2023-09-06T21:20:59 | 2015-09-11T18:02:12 |
Python
|
UTF-8
|
Python
| false | false | 2,789 |
py
|
import sys
import logging
from datetime import datetime
from flask_script import Command, Option
from confidant import settings
from confidant.models.credential import Credential
from confidant.services import credentialmanager
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
class ArchiveCredentials(Command):
"""
Command to permanently archive credentials to an archive dynamodb table.
"""
option_list = [
Option(
'--days',
dest='days',
type=int,
help=('Permanently archive disabled credentials last modified'
' greater than this many days (mutually exclusive with'
' --ids)'),
),
Option(
'--force',
action='store_true',
dest='force',
default=False,
help=('By default, this script runs in dry-run mode, this option'
' forces the run and makes the changes indicated by the'
' dry run'),
),
Option(
'--ids',
dest='ids',
help=('Archive a comma separated list of credential IDs. (mutually'
' exclusive with --days)'),
),
]
def run(self, days, force, ids):
if not settings.DYNAMODB_TABLE_ARCHIVE:
logger.error('DYNAMODB_TABLE_ARCHIVE is not configured, exiting.')
return 1
if days and ids:
logger.error('--days and --ids options are mutually exclusive')
return 1
if not days and not ids:
logger.error('Either --days or --ids options are required')
return 1
credentials = []
if ids:
# filter strips an empty string
_ids = [_id.strip() for _id in list(filter(None, ids.split(',')))]
if not _ids:
logger.error('Passed in --ids argument is empty')
return 1
for credential in Credential.batch_get(_ids):
if credential.enabled:
logger.warning(
'Skipping enabled credential {}'.format(credential.id)
)
continue
credentials.append(credential)
else:
for credential in Credential.data_type_date_index.query(
'credential'
):
tz = credential.modified_date.tzinfo
now = datetime.now(tz)
delta = now - credential.modified_date
if not credential.enabled and delta.days > days:
credentials.append(credential)
credentialmanager.archive_credentials(credentials, force=force)
|
[
"[email protected]"
] | |
2b60ca9956c9c1ab4e069b128087a7712afe9fa7
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2652486_0/Python/Nooodles/C.py
|
d3ac03b56445bbc8e423e2fc1572aa92e94b5711
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,844 |
py
|
def Solve( P ):
Is3 = False; Is5 = False; K = len(P);
New = []; ANS = [];
for e in P:
New.append(e);
if e%3 == 0:
Is3 = True;
if e%5 == 0:
Is5 = True;
print New;
New.sort();
while New[0] == 1:
New.pop(0);
K -= 1
print New;
if Is3:
ANS.append(3);
for i in range(K):
if New[i]%3 == 0:
New[i] /= 3 ;
if Is5:
ANS.append(5);
for i in range(K):
if New[i]%5 == 0:
New[i] /= 5;
print Is3, Is5, New;
return
def Brute1(a,b,c):
VAL = [1, a, b, c, a*b, a*c, b*c, a*b*c];
VAL.sort();
for i in range(6,-1,-1):
if VAL[i] == VAL[i+1]:
VAL.pop(i);
return VAL;
T = int(raw_input());
#R = 100; N = 3; M = 5; K = 7;
Data = [];
for q in range(T):
print 'Case #1:';
[R, N, M, K] = map(int, raw_input().split());
PAIR = []; PROD = [];
for a in range(2,6):
for b in range(a,6):
for c in range(b,6):
PAIR += [str(a)+str(b)+str(c)];
PROD.append( Brute1(a,b,c) );
# print PAIR[-1], PROD[-1]
# for a in range(20):
# print a, PROD[a];
# print
for case in range(R):
Inp = map(int, raw_input().split())
# print Inp
Hit = [];
for i in range(20):
Good = True;
for e in Inp:
if e not in PROD[i]:
# print 'badness', i, e, PAIR[i], PROD[i]
Good = False;
if Good:
Hit.append(i);
print PAIR[Hit[0]]
# Match( map(int, raw_input().split()) );
|
[
"[email protected]"
] | |
9c56448ce8d78dda08d50c5e06464a14cce689bd
|
22ebcc842dbc933bfa8fdad89b8b8ef48ecc91c7
|
/plots/spread/plot_spread_factors2.py
|
5d429b0c892e8313c0fa1fbb03e2abb8a10e8d4b
|
[] |
no_license
|
klikooo/thesis-src
|
192651c18f243c59cfa588e7052dc1a96ab0a146
|
64f2ee824afdc2d3fd0f98c6d9fcfda597b9ad9f
|
refs/heads/master
| 2020-04-16T18:16:20.638147 | 2019-08-20T14:59:52 | 2019-08-20T14:59:52 | 161,623,404 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,132 |
py
|
from decimal import Decimal
import copy
import plots.spread.plot as plot
import matplotlib.pyplot as plt
import util
setting = {"experiment": '3',
"data_set": util.DataSet.ASCAD,
"subkey_index": 2,
"unmask": True,
"desync": 0,
"use_hw": True,
"spread_factor": 6,
"epochs": 80,
"batch_size": 100,
"lr": '%.2E' % Decimal(0.0001),
"l2_penalty": 0,
"train_size": 1000,
"kernel_sizes": [0],
"num_layers": [0],
"channel_sizes": [0],
"network_name": "SpreadNet",
"runs": range(5),
"init_weights": "",
"title": "",
"plot_colors": ["acqua", "black", "brown", "darkblue", "darkgreen", "fuchsia",
"goldenrod", "green", "grey", "indigo", "lavender"],
"ge_x": [],
"ge_y": [],
"ta": [],
"va": [],
"tl": [],
"vl": [],
"line_title": [],
"line_title2": "$Spread_{PH}$",
"plot_markers": [" ", "*", "+"]
}
def plot_factors(spread_factors, save_name, x_lim, y_lim, show=False, train_size=1000):
setting_spread = copy.deepcopy(setting)
setting_spread.update({"network_name": "SpreadNet",
"line_title2": "$Spread_{PH}$",
"plot_colors": ["r", "g", "b"],
"plot_marker": [" "],
"train_size": train_size,
})
setting_dense_spread = copy.deepcopy(setting)
setting_dense_spread.update({"network_name": "DenseSpreadNet",
"line_title2": "$MLP_{RT}$",
"plot_colors": ["r", "g", "b"],
"plot_marker": ["-"],
"train_size": train_size,
})
settings_spread = []
settings_dense_spread = []
colors = ["r", "g", "b", "y", "g", "b"]
for spread_factor, color in zip(spread_factors, colors):
print(spread_factor)
s_spread = copy.deepcopy(setting_spread)
s_dense_spread = copy.deepcopy(setting_dense_spread)
s_spread.update({
"data_set": util.DataSet.ASCAD,
"spread_factor": spread_factor,
"plot_colors": [color],
"plot_markers": [" "],
"line_title2": s_spread['line_title2'] + " sf " + str(spread_factor)
})
s_dense_spread.update({
"spread_factor": spread_factor,
"plot_colors": [color],
"plot_markers": ["h"],
"line_title2": s_dense_spread['line_title2'] + " sf " + str(spread_factor)
})
settings_spread.append(s_spread)
dpa_spread = copy.deepcopy(s_spread)
dpa_spread.update({"data_set": util.DataSet.DPA_V4,
"plot_colors": [color],
"plot_markers": ["h"],
"line_title2": dpa_spread['line_title2'] + " DPA sf " + str(spread_factor)})
settings_dense_spread.append(s_dense_spread)
settings_spread.append(dpa_spread)
network_settings = {
"SpreadNet": settings_spread,
# "DenseSpreadNet": settings_dense_spread
}
plot.create_plot(network_settings, save_name, x_lim, y_lim)
if show:
plt.show()
#########
# ASCAD #
#########
data_set = util.DataSet.ASCAD
setting.update({"data_set": util.DataSet.ASCAD})
###############
# TEST FOR HW #
###############
# Set the global setting to HW
setting.update({"use_hw": True})
# Test for HW with different training sizes
path = "/media/rico/Data/TU/thesis/report/img/spread/factors"
hw_save_name = f"{path}/{data_set}_hw_" + "{}.png"
plot_factors([3, 6, 9, 12], hw_save_name.format(1000), [-1, 100], [0, 101], show=False)
plot_factors([3, 6, 9, 12], hw_save_name.format(5000), [-1, 25], [0, 70], train_size=5000)
plot_factors([3, 6, 9, 12], hw_save_name.format(20000), [-1, 25], [0, 70], train_size=20000)
plot_factors([3, 6, 9, 12], hw_save_name.format(40000), [-1, 25], [0, 70], train_size=40000)
###############
# TEST FOR ID #
###############
# Set the global setting to ID
setting.update({"use_hw": False})
# Test for ID with different training sizes
id_save_name = f"{path}/{data_set}_id_" + "{}.png"
plot_factors([3, 6, 9, 12], id_save_name.format(1000), [-100, 3500], [0, 140], show=False)
plot_factors([3, 6, 9, 12], id_save_name.format(5000), [-1, 25], [0, 70], train_size=5000)
plot_factors([3, 6, 9, 12], id_save_name.format(20000), [-1, 10], [0, 30], train_size=20000)
plot_factors([3, 6, 9, 12], id_save_name.format(40000), [-1, 10], [0, 20], train_size=40000)
#########
# DPAv4 #
#########
# Set the data set
data_set = util.DataSet.DPA_V4
setting.update({"data_set": util.DataSet.ASCAD})
###############
# TEST FOR HW #
###############
# Set the global setting to HW
setting.update({"use_hw": True})
# Test for HW with different training sizes
path = "/media/rico/Data/TU/thesis/report/img/spread/factors"
hw_save_name = f"{path}/{data_set}_hw_" + "{}.png"
plot_factors([3, 6, 9, 12], hw_save_name.format(1000), [-1, 75], [0, 105], show=False)
plot_factors([3, 6, 9, 12], hw_save_name.format(5000), [-1, 25], [0, 70], train_size=5000)
plot_factors([3, 6, 9, 12], hw_save_name.format(20000), [-1, 25], [0, 70], train_size=20000)
plot_factors([3, 6, 9, 12], hw_save_name.format(40000), [-1, 25], [0, 70], train_size=40000)
###############
# TEST FOR ID #
###############
# Set the global setting to ID
setting.update({"use_hw": False})
# Test for ID with different training sizes
id_save_name = f"{path}/{data_set}_id_" + "{}.png"
plot_factors([3, 6, 9, 12], id_save_name.format(1000), [-100, 3500], [0, 140], show=False)
plot_factors([3, 6, 9, 12], id_save_name.format(5000), [-1, 25], [0, 100], train_size=5000)
plot_factors([3, 6, 9, 12], id_save_name.format(20000), [-1, 10], [0, 30], train_size=20000)
plot_factors([3, 6, 9, 12], id_save_name.format(40000), [-1, 10], [0, 20], train_size=40000)
|
[
"[email protected]"
] | |
2835fc3c17d97c08452787df63f76db069a5df95
|
e8a87fa14006f1479161293a898e2f73eefc91f7
|
/Week4/Tarefa 02/Exercicio_01_number_primo.py
|
459db3bf35bb6448bddfe6e7a3737684cd7193bd
|
[
"MIT"
] |
permissive
|
WesGtoX/Intro-Computer-Science-with-Python-Part01
|
7880cc4483662104ecaa3c199826cb129ae00dca
|
50561e4c104ced2f5e468e382f45e4ca5cb2279e
|
refs/heads/master
| 2020-03-21T06:29:10.584323 | 2018-06-21T21:28:43 | 2018-06-21T21:28:43 | 138,223,598 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 263 |
py
|
n = int(input("Digite um número inteiro: "))
primo = True
divisor = 2
while divisor < n and primo:
if n % divisor == 0:
primo = False
divisor += 1
if primo and n != 1:
print("primo")
else:
print("não primo")
|
[
"[email protected]"
] | |
494992c771c97a259199b4e2c2478e269a3d2032
|
fb671897604a99a4c85e912717dd27d9b93a3543
|
/src/engine/sounds.py
|
bc5e18335fbbfdd58245daae02d53e2e2782c9c1
|
[] |
no_license
|
davidpendergast/grovetender
|
650215063ef57a49e2a185ce695f463032736ee9
|
384f546af7f76d3826545875c252edaefdd632e3
|
refs/heads/master
| 2022-07-11T04:26:02.878880 | 2022-06-26T05:51:10 | 2022-06-26T05:51:10 | 256,640,586 | 4 | 1 | null | 2022-06-26T05:51:11 | 2020-04-18T00:48:26 |
Python
|
UTF-8
|
Python
| false | false | 1,782 |
py
|
import pygame
from src.utils.util import Utils
import traceback
_MASTER_VOLUME = 1.0
_LOADED_EFFECTS = {} # effect_id -> Effect object
_RECENTLY_PLAYED = {} # effect_id -> ticks since last play
RECENCY_LIMIT = 4 # if an effect was already played X ticks ago, don't play it again
def set_volume(volume):
global _MASTER_VOLUME
_MASTER_VOLUME = Utils.bound(volume, 0.0, 1.0)
def update():
to_remove = []
for effect in _RECENTLY_PLAYED:
if _RECENTLY_PLAYED[effect] >= RECENCY_LIMIT:
to_remove.append(effect)
else:
_RECENTLY_PLAYED[effect] = _RECENTLY_PLAYED[effect] + 1
for effect in to_remove:
del _RECENTLY_PLAYED[effect]
def play_sound(sound):
"""
:param sound: either an effect_path, or a tuple (effect_path, volume)
"""
if sound is None:
return
if isinstance(sound, tuple):
effect_path = sound[0]
volume = sound[1]
else:
effect_path = sound
volume = 1.0
if _MASTER_VOLUME == 0 or volume <= 0 or effect_path is None:
return
if effect_path in _RECENTLY_PLAYED:
return
if effect_path in _LOADED_EFFECTS:
effect = _LOADED_EFFECTS[effect_path]
effect.set_volume(_MASTER_VOLUME * volume)
else:
try:
effect = pygame.mixer.Sound(effect_path)
effect.set_volume(_MASTER_VOLUME * volume)
except Exception:
print("ERROR: failed to load sound effect {}".format(effect_path))
traceback.print_exc()
effect = None
_LOADED_EFFECTS[effect_path] = effect
if effect is not None:
_RECENTLY_PLAYED[effect_path] = 0
# print("INFO: playing sound effect: {}".format(effect_path))
effect.play()
|
[
"[email protected]"
] | |
6e8a22680c7a5fb533a518c9e01bd3dbc4e797b5
|
6e7f45c7a7f5b7a2333c6bfe8a4ae8723537dc62
|
/pronostico/settings.py
|
8c476df723d65a4a066e52776e642edcee201abd
|
[] |
no_license
|
devnandito/pronosticos
|
8bcacfd7cf3032fb5b831c331ef0719f0c050205
|
0796562243dc51aa2e5b1e8e51d10497d15aa8e3
|
refs/heads/master
| 2021-05-28T04:49:53.532946 | 2014-08-24T14:36:19 | 2014-08-24T14:36:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,693 |
py
|
# Django settings for pronostico project.
import os
RUTA_PROYECTO = os.path.dirname(os.path.realpath(__file__))
DEFAULT_CHARSET = 'utf-8'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'pronostico', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'postgres',
'PASSWORD': '1234',
'HOST': '192.168.192.152', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'es-mx'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
#STATIC_URL = '/static/'
STATIC_URL = os.path.join(RUTA_PROYECTO,'/static/')
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(RUTA_PROYECTO,'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'd6)7j+b+w-id+kpvk+1t%!$q0$r(rbqi+!y9h5m-e^)wu#1a0x'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'pronostico.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'pronostico.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(RUTA_PROYECTO,'plantillas'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'principal',
'gunicorn',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"[email protected]"
] | |
870e815caf5014255375f8956850095165e7b89b
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/distributed_passes/test_build_cinn_pass_resnet.py
|
6f608a5299670435d9be93a605837060354b0858
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 |
Apache-2.0
| 2023-09-14T19:20:51 | 2016-08-15T06:59:08 |
C++
|
UTF-8
|
Python
| false | false | 1,384 |
py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from dist_pass_test_base import DistPassTestBase
from model_zoo import resnet_model
import paddle
from paddle.distributed.passes import PassManager, new_pass
class TestBuildCINNPass(DistPassTestBase):
def init(self):
self.atol = 0.5
self.rtol = 0.0
def apply_passes(self, main_prog, startup_prog):
pass_manager = PassManager(
[
new_pass("build_cinn"),
new_pass("fuse_elewise_add_act"),
]
)
pass_manager.apply([main_prog], [startup_prog])
print(pass_manager.names)
def test_bs_32(self):
if paddle.is_compiled_with_cinn():
self.check_main(resnet_model, batch_size=32)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
dd5091a2cf2790d210743f7a8548f6daf53c1721
|
d0cb58e1658d4b5b88bdc07e497dc8092707ae02
|
/2021/02february/05dialect.py
|
df530b033aff277696ce9660a95e906229059d3a
|
[] |
no_license
|
June-fu/python365
|
27f9b753d38ade549d59aa8f2d8bda0fb8b1e20c
|
242033a4b644a7566fbfa4dba9b60f60aa31fe91
|
refs/heads/master
| 2021-07-02T21:42:28.454091 | 2021-05-04T15:08:44 | 2021-05-04T15:08:44 | 233,629,713 | 0 | 0 | null | 2020-01-13T15:52:58 | 2020-01-13T15:36:53 | null |
UTF-8
|
Python
| false | false | 868 |
py
|
#!/usr/bin/python
'''
# @ Author: june-fu
# @ Create Time: 2021-03-13 22:45:08
# @ Modified by: june-fu
# @ Modified time: 2021-03-13 22:45:10
# @ Description:
'''
import pandas as pd
from io import StringIO
data = 'label1,label2,label3\nindex1,"a,c,e\nindex2,b,d,f'
print(data)
# By default, read_csv uses the Excel dialect and treats the double quote as the quote character
import csv
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = pd.read_csv(StringIO(data), dialect=dia)
print(df)
# all of the dialect options can be specified separately by keyword arguments
data2 = 'a,b,c~1,2,3~4,5,6'
print(pd.read_csv(StringIO(data2), lineterminator='~'))
# another common dialect option is skipinintialspace ,to skip any whitespace after a delimiter
data3 = 'a, b, c\n1, 3, 2\n4, 5, 6'
print(data3)
print(pd.read_csv(StringIO(data3), skipinitialspace=True))
|
[
"[email protected]"
] | |
d1cb7036a1cf941780faab1c0b64128cb8f1ec78
|
1070490055b5c981d936038959731134b01ce272
|
/apps/users/migrations/0005_auto_20170327_1444.py
|
ee50300c072d68d14bdc022c9266044d9a86412f
|
[] |
no_license
|
ljingen/MxOnline
|
401d5be37e11cb866dc8eb78acc9b6de053c5708
|
1b471dd6b4968f79dd6866bb5e3e6413b760c8a1
|
refs/heads/master
| 2021-10-11T08:57:05.304124 | 2018-02-11T06:59:32 | 2018-02-11T06:59:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 554 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-27 14:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_banner_emailverifyrecord'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='gender',
field=models.CharField(choices=[('MAN', '\u7537'), ('FEMALE', '\u5973')], default='MAN', max_length=6, verbose_name='\u6027\u522b'),
),
]
|
[
"[email protected]"
] | |
eeaf814b80f20507a25c829f946883439e8b6ed9
|
51b4eee3ee70878650cacb24104f4546ae65dd9f
|
/tests/modules/plugins/another.py
|
2ff84dacf3da5307d00c3690a99e4cd4563a175f
|
[] |
no_license
|
gabrielferreira/coveragepy
|
4d81e283bddd2a05c62e7ebbac09ea15d7e22daa
|
790fbd51a31d86be6219247c786feee49b10a565
|
refs/heads/master
| 2020-05-29T09:33:11.516105 | 2015-06-22T11:15:10 | 2015-06-22T11:15:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 116 |
py
|
"""A plugin for tests to reference."""
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
|
[
"[email protected]"
] | |
8039502bd646c312cc145afd795a2888c0b371f6
|
ccbfc7818c0b75929a1dfae41dc061d5e0b78519
|
/aliyun-openapi-python-sdk-master/aliyun-python-sdk-baas/aliyunsdkbaas/request/v20180731/DescribeOrdererLogsRequest.py
|
be856c0d614b151f838c0ed18c1a50a5aca82921
|
[
"Apache-2.0"
] |
permissive
|
P79N6A/dysms_python
|
44b634ffb2856b81d5f79f65889bfd5232a9b546
|
f44877b35817e103eed469a637813efffa1be3e4
|
refs/heads/master
| 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,463 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeOrdererLogsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Baas', '2018-07-31', 'DescribeOrdererLogs')
def get_Lines(self):
return self.get_query_params().get('Lines')
def set_Lines(self,Lines):
self.add_query_param('Lines',Lines)
def get_ConsortiumId(self):
return self.get_query_params().get('ConsortiumId')
def set_ConsortiumId(self,ConsortiumId):
self.add_query_param('ConsortiumId',ConsortiumId)
def get_OrdererName(self):
return self.get_query_params().get('OrdererName')
def set_OrdererName(self,OrdererName):
self.add_query_param('OrdererName',OrdererName)
|
[
"[email protected]"
] | |
a212b30d912750ff9fce5a42e4e78bf0e89cea39
|
dc18846eb4e956e3067b187c17eec203ad02b732
|
/Web Scrapping/html/lucky.py
|
5fbfee4cc9ed6bc9221a0ad5ef2a614573aef13e
|
[] |
no_license
|
rafal-mizera/automate_the_boring_stuff
|
53e46e2f5668b3f64f67297c997f2a44695d765b
|
966e9d04546f33f7fcd12c569e19c4d736a4eb44
|
refs/heads/master
| 2023-07-17T08:41:03.235120 | 2021-09-01T16:28:57 | 2021-09-01T16:28:57 | 402,127,057 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 601 |
py
|
#! python3
# lucky.py - Opens several Google search results.
import requests
import webbrowser
import bs4
import sys
print('Googling...') # display text while downloading the google page
# searched = sys.argv[1:]
searched = "polska"
res = requests.get(f"http://www.google.com/search?q={searched}")
# res.raise_for_status()
soup = bs4.BeautifulSoup(res.text,features="html.parser")
links_to_open = soup.select('div#main > div > div > div > a')
print(links_to_open)
numOpen = min(5,len(links_to_open))
for i in range(numOpen):
webbrowser.open("google.com" + links_to_open[i].get("href"))
|
[
"[email protected]"
] | |
a3f5e078854a80495877e17b416a7359c6c94591
|
d305e9667f18127e4a1d4d65e5370cf60df30102
|
/tests/ut/python/dataset/test_callbacks.py
|
4ccbe659ee3020e9e667a00ce96b9115ea4a12ef
|
[
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
imyzx2017/mindspore_pcl
|
d8e5bd1f80458538d07ef0a8fc447b552bd87420
|
f548c9dae106879d1a83377dd06b10d96427fd2d
|
refs/heads/master
| 2023-01-13T22:28:42.064535 | 2020-11-18T11:15:41 | 2020-11-18T11:15:41 | 313,906,414 | 6 | 1 |
Apache-2.0
| 2020-11-18T11:25:08 | 2020-11-18T10:57:26 | null |
UTF-8
|
Python
| false | false | 16,116 |
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from builtins import range, super
import time
import pytest
from mindspore import context
from mindspore import log as logger
from mindspore.dataset.callback import DSCallback, WaitedDSCallback
from mindspore.train import Model
from mindspore.train.callback import Callback
import mindspore.dataset as ds
import mindspore.nn as nn
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class BaseCallback(DSCallback):
def __init__(self, step_size=1, events=None, cb_id=0):
super().__init__(step_size)
self.events = events
self.cb_id = cb_id
def append(self, event_name, ds_run_context):
event = [event_name, ds_run_context.cur_epoch_num,
ds_run_context.cur_step_num_in_epoch, ds_run_context.cur_step_num]
event = '_'.join([str(e) for e in event])
index = -1
for i, e in enumerate(self.events):
if e[0] == event:
index = i
break
if index != -1:
self.events[index][1].append(self.cb_id)
else:
self.events.append((event, [self.cb_id]))
class Begin(BaseCallback):
def ds_begin(self, ds_run_context):
self.append("begin", ds_run_context)
class EpochBegin(BaseCallback):
def ds_epoch_begin(self, ds_run_context):
self.append("epoch_begin", ds_run_context)
class EpochEnd(BaseCallback):
def ds_epoch_end(self, ds_run_context):
self.append("epoch_end", ds_run_context)
class StepBegin(BaseCallback):
def ds_step_begin(self, ds_run_context):
self.append("step_begin", ds_run_context)
class StepEnd(BaseCallback):
def ds_step_end(self, ds_run_context):
self.append("step_end", ds_run_context)
class MyDSCallback(Begin, EpochBegin, EpochEnd, StepBegin, StepEnd):
pass
def generate_expected(epoch_num, step_num, step_size=1, map_num=1, repeat=1):
events = []
cb_id = list(range(map_num))
def append(name, e, s):
event = [name, e + 1, s + 1, e * step_num * repeat + s + 1]
event = '_'.join([str(ev) for ev in event])
events.append((event, cb_id))
events.append(("begin_0_0_0", cb_id))
for e in range(epoch_num):
append("epoch_begin", e, -1)
for s in range(step_num * repeat):
if s % step_size == 0:
append("step_begin", e, s)
append("step_end", e, s)
append("epoch_end", e, step_num * repeat - 1)
return events
def build_test_case_1cb(epochs, steps, step_size=1, repeat=1):
events = []
arr = list(range(1, steps + 1))
data = ds.NumpySlicesDataset(arr, shuffle=False)
my_cb = MyDSCallback(step_size=step_size, events=events)
data = data.map(operations=(lambda x: x), callbacks=my_cb)
if repeat != 1:
if repeat % 2 == 0 and repeat != 2:
data = data.repeat(2)
data = data.map(operations=(lambda x: x))
data = data.repeat(repeat // 2)
else:
data = data.repeat(repeat)
itr = data.create_tuple_iterator(num_epochs=epochs)
for _ in range(epochs):
for _ in itr:
pass
expected_events = generate_expected(epochs, steps, step_size, 1, repeat)
assert expected_events == events
def build_test_case_2cbs(epochs, steps):
events1 = []
events2 = []
my_cb1 = MyDSCallback(events=events1)
my_cb2 = MyDSCallback(events=events2)
arr = list(range(1, steps + 1))
data = ds.NumpySlicesDataset(arr, shuffle=False)
data = data.map(operations=(lambda x: x), callbacks=[my_cb1, my_cb2])
itr = data.create_tuple_iterator(num_epochs=epochs)
for _ in range(epochs):
for _ in itr:
pass
expected_events = generate_expected(epochs, steps)
assert expected_events == events1
assert expected_events == events2
def build_test_case_2maps(epochs, steps):
events = []
my_cb1 = MyDSCallback(events=events, cb_id=0)
my_cb2 = MyDSCallback(events=events, cb_id=1)
arr = list(range(1, steps + 1))
data = ds.NumpySlicesDataset(arr, shuffle=False)
data = data.map(operations=(lambda x: x), callbacks=my_cb1)
data = data.map(operations=(lambda x: x), callbacks=my_cb2)
itr = data.create_tuple_iterator(num_epochs=epochs)
for _ in range(epochs):
for _ in itr:
pass
expected_events = generate_expected(epochs, steps, map_num=2)
assert expected_events[1:] == events[1:]
for event in events:
assert len(event) == 2
event, cb_ids = event
if event != "begin_0_0_0":
assert cb_ids[0] == 0
assert cb_ids[1] == 1
def test_callbacks_all_methods():
logger.info("test_callbacks_all_methods")
build_test_case_1cb(1, 1)
build_test_case_1cb(1, 2)
build_test_case_1cb(1, 3)
build_test_case_1cb(1, 4)
build_test_case_1cb(2, 1)
build_test_case_1cb(2, 2)
build_test_case_1cb(2, 3)
build_test_case_1cb(2, 4)
build_test_case_1cb(3, 1)
build_test_case_1cb(3, 2)
build_test_case_1cb(3, 3)
build_test_case_1cb(3, 4)
def test_callbacks_var_step_size():
logger.info("test_callbacks_var_step_size")
build_test_case_1cb(1, 2, 2)
build_test_case_1cb(1, 3, 2)
build_test_case_1cb(1, 4, 2)
build_test_case_1cb(2, 2, 2)
build_test_case_1cb(2, 3, 2)
build_test_case_1cb(2, 4, 2)
build_test_case_1cb(3, 2, 2)
build_test_case_1cb(3, 3, 2)
build_test_case_1cb(3, 4, 2)
def test_callbacks_all_2cbs():
logger.info("test_callbacks_all_2cbs")
build_test_case_2cbs(4, 1)
build_test_case_2cbs(4, 2)
build_test_case_2cbs(4, 3)
build_test_case_2cbs(4, 4)
class MyWaitedCallback(WaitedDSCallback):
def __init__(self, events, step_size=1):
super().__init__(step_size)
self.events = events
def sync_epoch_begin(self, train_run_context, ds_run_context):
event = f"ds_epoch_begin_{ds_run_context.cur_epoch_num}_{ds_run_context.cur_step_num}"
self.events.append(event)
def sync_step_begin(self, train_run_context, ds_run_context):
event = f"ds_step_begin_{ds_run_context.cur_epoch_num}_{ds_run_context.cur_step_num}"
self.events.append(event)
class MyMSCallback(Callback):
def __init__(self, events):
self.events = events
def epoch_end(self, run_context):
cb_params = run_context.original_args()
event = f"ms_epoch_end_{cb_params.cur_epoch_num}_{cb_params.cur_step_num}"
self.events.append(event)
def step_end(self, run_context):
cb_params = run_context.original_args()
event = f"ms_step_end_{cb_params.cur_epoch_num}_{cb_params.cur_step_num}"
self.events.append(event)
class Net(nn.Cell):
def construct(self, x, y):
return x
def test_callbacks_non_sink():
logger.info("test_callbacks_non_sink")
events = []
my_cb1 = MyWaitedCallback(events, 1)
my_cb2 = MyMSCallback(events)
arr = [1, 2, 3, 4]
data = ds.NumpySlicesDataset((arr, arr), column_names=["c1", "c2"], shuffle=False)
data = data.map(operations=(lambda x: x), callbacks=my_cb1)
net = Net()
model = Model(net)
model.train(2, data, dataset_sink_mode=False, callbacks=[my_cb2, my_cb1])
expected_synced_events = ['ms_step_end_1_1', 'ds_step_begin_1_2', 'ms_step_end_1_2', 'ds_step_begin_1_3',
'ms_step_end_1_3', 'ds_step_begin_1_4', 'ms_step_end_1_4',
'ms_epoch_end_1_4', 'ds_epoch_begin_2_4',
'ds_step_begin_2_5', 'ms_step_end_2_5', 'ds_step_begin_2_6',
'ms_step_end_2_6', 'ds_step_begin_2_7', 'ms_step_end_2_7', 'ds_step_begin_2_8',
'ms_step_end_2_8', 'ms_epoch_end_2_8']
assert events[:18] == expected_synced_events
def test_callbacks_non_sink_batch_size2():
logger.info("test_callbacks_non_sink_batch_size2")
events = []
my_cb1 = MyWaitedCallback(events, 2)
my_cb2 = MyMSCallback(events)
arr = [1, 2, 3, 4]
data = ds.NumpySlicesDataset((arr, arr), column_names=["c1", "c2"], shuffle=False)
data = data.map(operations=(lambda x: x), callbacks=my_cb1)
data = data.batch(2)
net = Net()
model = Model(net)
model.train(2, data, dataset_sink_mode=False, callbacks=[my_cb2, my_cb1])
expected_synced_events = ['ms_step_end_1_1', 'ds_step_begin_1_3',
'ms_step_end_1_2',
'ms_epoch_end_1_2', 'ds_epoch_begin_2_4',
'ds_step_begin_2_5', 'ms_step_end_2_3', 'ds_step_begin_2_7',
'ms_step_end_2_4', 'ms_epoch_end_2_4']
assert events[:10] == expected_synced_events
def test_callbacks_non_sink_mismatch_size():
logger.info("test_callbacks_non_sink_mismatch_size")
default_timeout = ds.config.get_callback_timeout()
ds.config.set_callback_timeout(1)
events = []
my_cb1 = MyWaitedCallback(events, 2)
my_cb2 = MyMSCallback(events)
arr = [1, 2, 3, 4]
data = ds.NumpySlicesDataset((arr, arr), column_names=["c1", "c2"], shuffle=False)
data = data.map(operations=(lambda x: x), callbacks=my_cb1)
data = data.batch(3)
net = Net()
model = Model(net)
with pytest.raises(Exception) as err:
model.train(2, data, dataset_sink_mode=False, callbacks=[my_cb2, my_cb1])
assert "RuntimeError: ds_step_begin timed out after 1 second(s)" in str(err.value)
ds.config.set_callback_timeout(default_timeout)
def test_callbacks_validations():
logger.info("test_callbacks_validations")
with pytest.raises(Exception) as err:
data = ds.NumpySlicesDataset([1, 2, 3, 4], shuffle=False)
data.map(operations=(lambda x: x), callbacks=0)
assert "Argument callbacks with value 0 is not " in str(err.value)
with pytest.raises(Exception) as err:
my_cb1 = MyDSCallback()
data = ds.NumpySlicesDataset([1, 2, 3, 4], shuffle=False)
data.map(operations=(lambda x: x), callbacks=[my_cb1, 0])
assert "Argument callbacks[1] with value 0 is not " in str(err.value)
with pytest.raises(Exception) as err:
class BadCB(DSCallback):
pass
my_cb = BadCB()
data = ds.NumpySlicesDataset([1, 2, 3, 4], shuffle=False)
data = data.map(operations=(lambda x: x), callbacks=my_cb)
for _ in data:
pass
assert "Provided Callback class did not override any of the 6 callback methods." in str(err.value)
def test_callbacks_sink_simulation():
logger.info("test_callback_sink_simulation")
events = []
epochs = 2
my_cb = MyWaitedCallback(events, 1)
data = ds.NumpySlicesDataset([1, 2, 3, 4], shuffle=False)
data = data.map(operations=(lambda x: x), callbacks=my_cb)
data = data.to_device()
data.send(num_epochs=epochs)
for e in range(epochs):
for s in range(4):
time.sleep(0.5)
events.append(f"ms_step_end_{e + 1}_{e * 4 + s + 1}")
my_cb.step_end(run_context=0)
events.append(f"ms_epoch_end_{e + 1}_{(e + 1) * 4}")
my_cb.epoch_end(run_context=0)
expected_synced_events = ['ms_step_end_1_1', 'ds_step_begin_1_2', 'ms_step_end_1_2', 'ds_step_begin_1_3',
'ms_step_end_1_3', 'ds_step_begin_1_4', 'ms_step_end_1_4',
'ms_epoch_end_1_4', 'ds_epoch_begin_2_4',
'ds_step_begin_2_5', 'ms_step_end_2_5', 'ds_step_begin_2_6',
'ms_step_end_2_6', 'ds_step_begin_2_7', 'ms_step_end_2_7', 'ds_step_begin_2_8',
'ms_step_end_2_8', 'ms_epoch_end_2_8']
assert events == expected_synced_events
def test_callbacks_repeat():
logger.info("test_callbacks_repeat")
build_test_case_1cb(epochs=2, steps=2, step_size=1, repeat=2)
build_test_case_1cb(epochs=2, steps=2, step_size=1, repeat=3)
build_test_case_1cb(epochs=2, steps=2, step_size=2, repeat=3)
build_test_case_1cb(epochs=3, steps=2, step_size=4, repeat=3)
build_test_case_1cb(epochs=2, steps=2, step_size=1, repeat=2)
build_test_case_1cb(epochs=2, steps=2, step_size=1, repeat=4)
build_test_case_1cb(epochs=2, steps=2, step_size=2, repeat=8)
build_test_case_1cb(epochs=3, steps=2, step_size=4, repeat=16)
def test_callbacks_exceptions():
logger.info("test_callbacks_exceptions")
class BadCB(DSCallback):
def ds_begin(self, ds_run_context):
raise RuntimeError("Bad begin")
with pytest.raises(Exception) as err:
data = ds.NumpySlicesDataset([1, 2, 3, 4], shuffle=False)
data = data.map(operations=(lambda x: x), callbacks=BadCB())
for _ in data:
pass
assert "RuntimeError: Bad begin" in str(err.value)
def test_callbacks_train_end():
logger.info("test_callback_sink_simulation")
# No asserts are needed, just test there is no deadlock or exceptions
events = []
epochs = 2
my_cb = MyWaitedCallback(events, 1)
data = ds.NumpySlicesDataset([1, 2, 3, 4], shuffle=False)
data = data.map(operations=(lambda x: x), callbacks=[my_cb])
data = data.to_device()
data.send(num_epochs=epochs)
time.sleep(0.5)
my_cb.end(run_context={})
time.sleep(0.5)
def test_callbacks_one_cb():
logger.info("test_callbacks_one_cb")
data = ds.NumpySlicesDataset([1, 2, 3, 4], shuffle=False)
events1 = []
events2 = []
events3 = []
my_begin = Begin(events=events1, cb_id=1)
my_epoch_begin = EpochBegin(events=events2, cb_id=2)
my_epoch_end = EpochEnd(events=events3, cb_id=3)
my_step_begin = StepBegin(events=events3, cb_id=3)
my_step_end = StepEnd(events=events2, cb_id=2)
data = data.map(operations=(lambda x: x), callbacks=my_begin)
data = data.map(operations=(lambda x: x), callbacks=[my_epoch_begin, my_step_end])
data = data.map(operations=(lambda x: x), callbacks=[my_epoch_end, my_step_begin])
itr = data.create_tuple_iterator(num_epochs=2)
for _ in range(2):
for _ in itr:
pass
expected_events1 = [('begin_0_0_0', [1])]
expected_events2 = [('epoch_begin_1_0_0', [2]), ('step_end_1_1_1', [2]), ('step_end_1_2_2', [2]),
('step_end_1_3_3', [2]), ('step_end_1_4_4', [2]), ('epoch_begin_2_0_4', [2]),
('step_end_2_1_5', [2]), ('step_end_2_2_6', [2]), ('step_end_2_3_7', [2]),
('step_end_2_4_8', [2])]
expected_events3 = [('step_begin_1_1_1', [3]), ('step_begin_1_2_2', [3]), ('step_begin_1_3_3', [3]),
('step_begin_1_4_4', [3]), ('epoch_end_1_4_4', [3]), ('step_begin_2_1_5', [3]),
('step_begin_2_2_6', [3]), ('step_begin_2_3_7', [3]), ('step_begin_2_4_8', [3]),
('epoch_end_2_4_8', [3])]
assert events1 == expected_events1
assert events2 == expected_events2
assert events3 == expected_events3
if __name__ == '__main__':
test_callbacks_all_2cbs()
test_callbacks_all_methods()
test_callbacks_exceptions()
test_callbacks_repeat()
test_callbacks_sink_simulation()
test_callbacks_validations()
test_callbacks_var_step_size()
test_callbacks_non_sink_batch_size2()
test_callbacks_non_sink()
test_callbacks_one_cb()
test_callbacks_non_sink_mismatch_size()
test_callbacks_train_end()
|
[
"[email protected]"
] | |
6ff608ee85a894cbca04ef671a0f5e95b6ee2d32
|
9a6b66106e369d462bbd6fa60b60ad173562704a
|
/app/views/__init__.py
|
54c19cadc19bbcc52a9189db06efc3f41c9cf54f
|
[
"MIT"
] |
permissive
|
msmmi/pulp
|
5cc0161ac6e7486e56f42e4a07786a604b61f5c3
|
506fe310b6835ae52a3a024c49fe8d5ee8c755d2
|
refs/heads/master
| 2020-03-29T22:46:32.097884 | 2018-09-24T05:06:10 | 2018-09-24T05:06:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 25 |
py
|
from . import load_views
|
[
"[email protected]"
] | |
8688a6e2cbff4ef628c50804024f8b9dea0c3e0e
|
f38e02b7f5cd7b67e672045f3b45823ecd7d158c
|
/sum.py
|
bcf7ae04bd78997c272e70336ab016b0278704a8
|
[] |
no_license
|
liberbell/basepy1
|
f8a70cff4295cd758ff86556f6e05b7e34899d36
|
67052719ec5ecb8de1fb0a07b59d1f565b4b83bf
|
refs/heads/master
| 2020-03-28T09:00:07.469151 | 2018-09-19T08:18:36 | 2018-09-19T08:18:36 | 148,006,137 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 79 |
py
|
a, b = 0, 1
while b < 10:
print(a, b)
a, b = b, a + b
print(a, b)
|
[
"[email protected]"
] | |
dc0f826e0cf4a9785e43d13f6a9f9043383d8525
|
6d1b3f61524e06d22074d170f41dec20b5c466c1
|
/research/qtopt/t2r_models.py
|
0aff74c4151f27790ab3584add36101cbe17853c
|
[
"Apache-2.0"
] |
permissive
|
omprakash58/tensor2robot
|
a7b8cb022886f5f4fff1ced83e7a601cff8c5de6
|
96c885688a5738ebb6772a20efc08fb70f98a55a
|
refs/heads/master
| 2022-12-24T05:41:28.617489 | 2020-09-29T17:56:59 | 2020-09-29T17:57:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,562 |
py
|
# coding=utf-8
# Copyright 2020 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T2R Model for QT-Opt. Reference OSS implementation ONLY."""
import abc
from typing import Optional, Tuple, Text
from absl import logging
import gin
import six
from tensor2robot.models import abstract_model
from tensor2robot.models import critic_model
# image_transformations code has some small discrepancies from the distortion
# parameters used in the QT-Opt paper.
from tensor2robot.preprocessors import image_transformations
from tensor2robot.preprocessors import spec_transformation_preprocessor
from tensor2robot.research.qtopt import networks
from tensor2robot.research.qtopt import optimizer_builder
from tensor2robot.utils import tensorspec_utils
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import training as contrib_training
from tensorflow_estimator.contrib.estimator.python.estimator import replicate_model_fn
TRAIN = tf.estimator.ModeKeys.TRAIN
EVAL = tf.estimator.ModeKeys.EVAL
PREDICT = tf.estimator.ModeKeys.PREDICT
INPUT_SHAPE = (512, 640, 3)
TARGET_SHAPE = (472, 472)
def pack_features_kuka_e2e(tf_model, *policy_inputs):
"""Crop, Convert, Maybe Distort images.
Args:
tf_model: Model that is converting policy inputs to features..
*policy_inputs: Inputs to the policy
Returns:
features: preprocessed features.
"""
del tf_model, policy_inputs
raise NotImplementedError
@gin.configurable
@six.add_metaclass(abc.ABCMeta)
class LegacyGraspingModelWrapper(critic_model.CriticModel):
"""T2R wrapper class around grasping network definitions."""
def __init__(self,
loss_function=tf.losses.log_loss,
learning_rate=1e-4,
model_weights_averaging=.9999,
momentum=.9,
export_batch_size=1,
use_avg_model_params=True,
learning_rate_decay_factor=.999,
**kwargs):
"""Constructor."""
# Set the default value for hparams if the user doesn't specificy
# them.
default_hparams = dict(
batch_size=32,
examples_per_epoch=3000000,
learning_rate_decay_factor=learning_rate_decay_factor,
learning_rate=learning_rate,
model_weights_averaging=model_weights_averaging,
momentum=momentum,
num_epochs_per_decay=2.0,
optimizer='momentum',
rmsprop_decay=.9,
rmsprop_epsilon=1.0,
use_avg_model_params=use_avg_model_params)
self.hparams = contrib_training.HParams(**default_hparams)
self._export_batch_size = export_batch_size
self.kwargs = kwargs
super(LegacyGraspingModelWrapper, self).__init__(
loss_function=loss_function,
create_optimizer_fn=lambda: optimizer_builder.BuildOpt(self.hparams),
action_batch_size=kwargs.get('action_batch_size'),
use_avg_model_params=use_avg_model_params)
@abc.abstractproperty
def legacy_model_class(self):
pass
def create_legacy_model(self):
class_ = self.legacy_model_class
return class_(**self.kwargs)
@abc.abstractmethod
def pack_features(self, *policy_inputs):
pass
def get_trainable_variables(self):
"""Returns list of trainable model variables."""
return contrib_framework.get_trainable_variables(
self.legacy_model_class.__name__)
def get_variables(self):
"""Returns list of model variables."""
return contrib_framework.get_variables(self.legacy_model_class.__name__)
def get_label_specification(self, mode):
del mode
grasp_success_spec = tensorspec_utils.ExtendedTensorSpec(
shape=(1,), dtype=tf.float32, name='grasp_success')
return tensorspec_utils.TensorSpecStruct(
reward=grasp_success_spec)
def create_legacy_input_specification(self):
"""Compatibility method needed by cem policies."""
return self.legacy_model_class.create_input_specifications()
def get_global_step(self):
# tf.train.get_global_step() does not work well under model_fn for TPU.
with tf.variable_scope('', reuse=tf.AUTO_REUSE):
return tf.broadcast_to(
tf.get_variable('global_step', shape=[], dtype=tf.int64),
shape=(self._export_batch_size,))
def q_func(self,
features,
scope,
mode,
config = None,
params = None,
reuse=tf.AUTO_REUSE):
"""See base class."""
images = [features.state.image, features.state.image_1]
grasp_params = tf.concat(
[features.action.world_vector, features.action.vertical_rotation],
axis=1)
model = self.create_legacy_model()
is_training = mode == TRAIN
_, end_points = model.model(images, grasp_params, is_training=is_training)
# Return sigmoid(logits).
return {
'q_predicted': end_points['predictions'],
'global_step': self.get_global_step()
}
def create_optimizer(self):
"""Create the optimizer and scaffold used for training."""
config = self.get_run_config()
original_optimizer = self._create_optimizer_fn()
# Override self.scaffold_fn with a custom scaffold_fn that uses the
# swapping saver required for MovingAverageOptimizer.
use_avg_model_params = self.hparams.use_avg_model_params
def scaffold_fn():
"""Create a scaffold object."""
# MovingAverageOptimizer requires Swapping Saver.
scaffold = tf.train.Scaffold()
if use_avg_model_params:
saver = original_optimizer.swapping_saver(
keep_checkpoint_every_n_hours=1)
else:
saver = None
scaffold = tf.train.Scaffold(saver=saver, copy_from_scaffold=scaffold)
# The saver needs to be added to the graph for td3 hooks.
tf.add_to_collection(tf.GraphKeys.SAVERS, scaffold.saver)
return scaffold
self._scaffold_fn = scaffold_fn
optimizer = original_optimizer
if (self._use_sync_replicas_optimizer and
config is not None and config.num_worker_replicas > 1):
optimizer = tf.train.SyncReplicasOptimizer(
optimizer,
replicas_to_aggregate=config.num_worker_replicas - 1,
total_num_replicas=config.num_worker_replicas)
if self.is_device_gpu:
optimizer = replicate_model_fn.TowerOptimizer.TowerOptimizer(optimizer)
return optimizer
def create_train_op(self,
loss,
optimizer,
update_ops=None,
train_outputs=None):
"""Create the train of from the loss obtained from model_train_fn.
Args:
loss: The loss we compute within model_train_fn.
optimizer: An instance of `tf.train.Optimizer`.
update_ops: List of update ops to execute alongside the training op.
train_outputs: (Optional) A dict with additional tensors the training
model generates.
Returns:
train_op: Op for the training step.
"""
# We overwrite the default train op creation since we only want to train
# with a subset of the variables.
variables_to_train = self.get_trainable_variables()
summarize_gradients = self._summarize_gradients
if self.is_device_tpu:
# TPUs don't support summaries up until now. Hence, we overwrite the user
# provided summarize_gradients option to False.
if self._summarize_gradients:
logging.info('We cannot use summarize_gradients on TPUs.')
summarize_gradients = False
return contrib_training.create_train_op(
loss,
optimizer,
summarize_gradients=summarize_gradients,
variables_to_train=variables_to_train,
update_ops=update_ops)
def model_train_fn(self,
features,
labels,
inference_outputs,
mode,
config=None,
params=None):
"""See base class."""
del mode, config, params
self.loss_fn(features, labels, inference_outputs)
return tf.losses.get_total_loss()
class DefaultGrasping44ImagePreprocessor(
spec_transformation_preprocessor.SpecTransformationPreprocessor):
"""The default preprocessor for the Grasping44.
This preprocessor takes the feature and label specs from the model and
alters some of the specs, e.g. image conversions. The default processor
does not list it's in_*_specification and out_*_specification explicitly since
it is very close to the model and only performs the minimal required model
specific changes. New more general preprocessors should list their
in_*_specification as well as out_*_specification.
"""
def _transform_in_feature_specification(
self, tensor_spec_struct
):
"""The specification for the input features for the preprocess_fn.
Here we will transform the feature spec to represent the requirements
for preprocessing.
Args:
tensor_spec_struct: A flat spec structure {str: TensorSpec}.
Returns:
tensor_spec_struct: The transformed flat spec structure {str:
TensorSpec}.
"""
self.update_spec(
tensor_spec_struct,
'state/image',
shape=(512, 640, 3),
dtype=tf.uint8,
data_format='jpeg')
return tensor_spec_struct
def _preprocess_fn(
self, features,
labels,
mode
):
"""The preprocessing function which will be executed prior to the model_fn.
Args:
features: The input features extracted from a single example in our
in_feature_specification format.
labels: (Optional) The input labels extracted from a single example in our
in_label_specification format.
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
Returns:
features: The preprocessed features, potentially adding
additional tensors derived from the input features.
labels: (Optional) The preprocessed labels, potentially
adding additional tensors derived from the input features and labels.
"""
if mode == TRAIN:
image = image_transformations.RandomCropImages(
[features.state.image], INPUT_SHAPE, TARGET_SHAPE)[0]
else:
image = image_transformations.CenterCropImages(
[features.state.image], INPUT_SHAPE, TARGET_SHAPE)[0]
image = tf.image.convert_image_dtype(image, tf.float32)
if mode == TRAIN:
image = (
image_transformations.ApplyPhotometricImageDistortions([image])[0])
features.state.image = image
return features, labels
@gin.configurable
class Grasping44E2EOpenCloseTerminateGripperStatusHeightToBottom(
LegacyGraspingModelWrapper):
"""QT-Opt T2R model."""
def __init__(self, action_batch_size=None, **hparams):
"""Port of Grasping44E2EOpenCloseTerminateGripperStatusHeightToBottom model.
The Grasping44 model which controls gripper open/close actions. This model
maintains current gripper status as part of the state. Good performance with
Q-Learning on kuka_e2e_grasping task.
Args:
action_batch_size: If specified, the size of action minibatches used in
PREDICT mode.
**hparams: Args to be passed to the parent class.
"""
super(Grasping44E2EOpenCloseTerminateGripperStatusHeightToBottom,
self).__init__(
action_batch_size=action_batch_size, **hparams)
def get_state_specification(self):
image_spec = tensorspec_utils.ExtendedTensorSpec(
shape=(472, 472, 3), dtype=tf.float32, name='image_1')
return tensorspec_utils.TensorSpecStruct(image=image_spec)
def get_action_specification(self):
close_gripper_spec = tensorspec_utils.ExtendedTensorSpec(
shape=(1,), dtype=tf.float32, name='close_gripper')
open_gripper_spec = tensorspec_utils.ExtendedTensorSpec(
shape=(1,), dtype=tf.float32, name='open_gripper')
terminate_episode_spec = tensorspec_utils.ExtendedTensorSpec(
shape=(1,), dtype=tf.float32, name='terminate_episode')
gripper_closed_spec = tensorspec_utils.ExtendedTensorSpec(
shape=(1,), dtype=tf.float32, name='gripper_closed')
world_vector_spec = tensorspec_utils.ExtendedTensorSpec(
shape=(3), dtype=tf.float32, name='world_vector')
vertical_rotation_spec = tensorspec_utils.ExtendedTensorSpec(
shape=(2), dtype=tf.float32, name='vertical_rotation')
height_to_bottom_spec = tensorspec_utils.ExtendedTensorSpec(
shape=(1,), dtype=tf.float32, name='height_to_bottom')
return tensorspec_utils.TensorSpecStruct(
world_vector=world_vector_spec,
vertical_rotation=vertical_rotation_spec,
close_gripper=close_gripper_spec,
open_gripper=open_gripper_spec,
terminate_episode=terminate_episode_spec,
gripper_closed=gripper_closed_spec,
height_to_bottom=height_to_bottom_spec)
@property
def default_preprocessor_cls(self):
return DefaultGrasping44ImagePreprocessor
def q_func(self,
features,
scope,
mode,
config = None,
params = None,
reuse=tf.AUTO_REUSE,
goal_vector_fn=None,
goal_spatial_fn=None):
base_model = self.create_legacy_model()
concat_axis = 1
if mode == PREDICT and self._tile_actions_for_predict:
concat_axis = 2
images = [None, features.state.image]
grasp_params = base_model.create_grasp_params_input(
features.action.to_dict(), concat_axis)
is_training = mode == TRAIN
_, end_points = base_model.model(
images,
grasp_params,
goal_spatial_fn=goal_spatial_fn,
goal_vector_fn=goal_vector_fn,
is_training=is_training)
return {
'q_predicted': end_points['predictions'],
'global_step': self.get_global_step()
}
def pack_features(self, *policy_inputs):
return pack_features_kuka_e2e(self, *policy_inputs)
@property
def legacy_model_class(self):
return networks.Grasping44E2EOpenCloseTerminateGripperStatusHeightToBottom
|
[
"[email protected]"
] | |
224346578cbb7fb74119a3175e7fac840954cc3e
|
bf60236048450e951b994d66666edd4f73be5101
|
/application/frontend/views.py
|
80bfb6ac9c738a29fa388e03e36b2a262d0f01d2
|
[
"MIT"
] |
permissive
|
DBeath/testing-flask-tutorial
|
0fe756beffc44ef49e55493e337022a263350f20
|
1eecb2c49c19d0ced001f164c11f3d0dfe5b9d7a
|
refs/heads/master
| 2021-01-12T02:50:22.130064 | 2017-04-26T15:09:28 | 2017-04-26T15:09:28 | 78,115,654 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,620 |
py
|
from flask import request, make_response, abort, Response, jsonify
from application.frontend import frontend_blueprint as bp
from application.frontend.models import User, user_schema
from application.database import db
@bp.route('/')
def index():
return 'Hello World!'
def add(a, b):
return int(a) + int(b)
@bp.route('/add', methods=['POST'])
def add_view():
a = request.args.get('input1')
b = request.args.get('input2')
if not a or not b:
return abort(400)
result = add(a, b)
return make_response(str(result))
@bp.route('/users')
def get_users():
users = User.query.all()
users_dump = user_schema.dump(users, many=True).data
return jsonify(users_dump)
@bp.route('/users', methods=['POST'])
def create_user():
name = request.args.get('name')
user = User(name=name)
db.session.add(user)
db.session.commit()
user_dump = user_schema.dump(user).data
return jsonify(user=user_dump)
@bp.route('/users/<int:user_id>', methods=['GET', 'POST'])
def single_user(user_id):
user = User.query.filter_by(id=user_id).first()
if user is None:
abort(404)
if request.methods == 'POST':
name = request.args.get('name')
user.name = name
db.session.commit()
user_dump = user_schema.dump(user).data
return jsonify(user=user_dump)
@bp.route('/users/<int:user_id>/delete', methods=['POST'])
def delete_user(user_id):
user = User.query.filter_by(id=user_id).first()
if user is None:
abort(404)
db.session.delete(user)
db.session.commit()
return Response(status_code=200)
|
[
"[email protected]"
] | |
a03a4f8c39aa405e97e2e3b48f55af9f1dcf2cb6
|
ef6229d281edecbea3faad37830cb1d452d03e5b
|
/ucsmsdk/mometa/ether/EtherSwitchIntFIo.py
|
87672b94c32dcffbd4ebf9a682a5a834eb655104
|
[
"Apache-2.0"
] |
permissive
|
anoop1984/python_sdk
|
0809be78de32350acc40701d6207631322851010
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
refs/heads/master
| 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 18,333 |
py
|
"""This module contains the general information for EtherSwitchIntFIo ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EtherSwitchIntFIoConsts():
ACK_ACK_IN_PROGRESS = "ack-in-progress"
ACK_ACKNOWLEDGED = "acknowledged"
ACK_AUTO_ACK = "auto-ack"
ACK_EVALUATION = "evaluation"
ACK_OK = "ok"
ACK_REMOVING = "removing"
ACK_UN_ACKNOWLEDGED = "un-acknowledged"
ACK_UN_INITIALIZED = "un-initialized"
ACK_UNSUPPORTED_CONNECTIVITY = "unsupported-connectivity"
ADMIN_STATE_DISABLED = "disabled"
ADMIN_STATE_ENABLED = "enabled"
CHASSIS_ID_N_A = "N/A"
DEL_FE_TS_NEVER = "never"
DISCOVERY_ABSENT = "absent"
DISCOVERY_MIS_CONNECT = "mis-connect"
DISCOVERY_MISSING = "missing"
DISCOVERY_NEW = "new"
DISCOVERY_PRESENT = "present"
DISCOVERY_UN_INITIALIZED = "un-initialized"
ENCAP_DOT1Q = "dot1q"
ENCAP_ISL = "isl"
ENCAP_NEGOTIATE = "negotiate"
ENCAP_PROPRIETARY = "proprietary"
ENCAP_UNKNOWN = "unknown"
IF_ROLE_DIAG = "diag"
IF_ROLE_FCOE_NAS_STORAGE = "fcoe-nas-storage"
IF_ROLE_FCOE_STORAGE = "fcoe-storage"
IF_ROLE_FCOE_UPLINK = "fcoe-uplink"
IF_ROLE_MGMT = "mgmt"
IF_ROLE_MONITOR = "monitor"
IF_ROLE_NAS_STORAGE = "nas-storage"
IF_ROLE_NETWORK = "network"
IF_ROLE_NETWORK_FCOE_UPLINK = "network-fcoe-uplink"
IF_ROLE_SERVER = "server"
IF_ROLE_SERVICE = "service"
IF_ROLE_STORAGE = "storage"
IF_ROLE_UNKNOWN = "unknown"
IF_TYPE_AGGREGATION = "aggregation"
IF_TYPE_PHYSICAL = "physical"
IF_TYPE_UNKNOWN = "unknown"
IF_TYPE_VIRTUAL = "virtual"
MODE_E = "E"
MODE_F = "F"
MODE_SD = "SD"
MODE_ACCESS = "access"
MODE_FABRIC = "fabric"
MODE_N_PROXY = "n_proxy"
MODE_PROMISCUOUS_ACCESS = "promiscuousAccess"
MODE_PROMISCUOUS_TRUNK = "promiscuousTrunk"
MODE_TRUNK = "trunk"
MODE_UNKNOWN = "unknown"
MODE_VNTAG = "vntag"
NEW_FE_TS_NEVER = "never"
OPER_STATE_ADMIN_DOWN = "admin-down"
OPER_STATE_DOWN = "down"
OPER_STATE_ERROR_DISABLED = "error-disabled"
OPER_STATE_FAILED = "failed"
OPER_STATE_HARDWARE_FAILURE = "hardware-failure"
OPER_STATE_INDETERMINATE = "indeterminate"
OPER_STATE_LINK_DOWN = "link-down"
OPER_STATE_LINK_UP = "link-up"
OPER_STATE_NO_LICENSE = "no-license"
OPER_STATE_SFP_NOT_PRESENT = "sfp-not-present"
OPER_STATE_SOFTWARE_FAILURE = "software-failure"
OPER_STATE_UDLD_AGGR_DOWN = "udld-aggr-down"
OPER_STATE_UP = "up"
PEER_CHASSIS_ID_N_A = "N/A"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
XCVR_TYPE_1000BASECX = "1000basecx"
XCVR_TYPE_1000BASELH = "1000baselh"
XCVR_TYPE_1000BASELX = "1000baselx"
XCVR_TYPE_1000BASESX = "1000basesx"
XCVR_TYPE_1000BASET = "1000baset"
XCVR_TYPE_1000BASEUNKNOWN = "1000baseunknown"
XCVR_TYPE_1000BASEVX = "1000basevx"
XCVR_TYPE_1000BASEX = "1000basex"
XCVR_TYPE_1000BASEZX = "1000basezx"
XCVR_TYPE_10GBASEER = "10gbaseer"
XCVR_TYPE_10GBASELR = "10gbaselr"
XCVR_TYPE_10GBASELRM = "10gbaselrm"
XCVR_TYPE_10GBASESR = "10gbasesr"
XCVR_TYPE_10GBASEZR = "10gbasezr"
XCVR_TYPE_CWDM1471 = "cwdm1471"
XCVR_TYPE_CWDM1531 = "cwdm1531"
XCVR_TYPE_CWDM1551 = "cwdm1551"
XCVR_TYPE_DWDMSFP = "dwdmsfp"
XCVR_TYPE_FET = "fet"
XCVR_TYPE_H10GACU10M = "h10gacu10m"
XCVR_TYPE_H10GACU15M = "h10gacu15m"
XCVR_TYPE_H10GACU1M = "h10gacu1m"
XCVR_TYPE_H10GACU3M = "h10gacu3m"
XCVR_TYPE_H10GACU5M = "h10gacu5m"
XCVR_TYPE_H10GACU7M = "h10gacu7m"
XCVR_TYPE_H10GACUAOC10M = "h10gacuaoc10m"
XCVR_TYPE_H10GACUAOC15M = "h10gacuaoc15m"
XCVR_TYPE_H10GACUAOC1M = "h10gacuaoc1m"
XCVR_TYPE_H10GACUAOC2M = "h10gacuaoc2m"
XCVR_TYPE_H10GACUAOC3M = "h10gacuaoc3m"
XCVR_TYPE_H10GACUAOC5M = "h10gacuaoc5m"
XCVR_TYPE_H10GACUAOC7M = "h10gacuaoc7m"
XCVR_TYPE_H10GAOC10M = "h10gaoc10m"
XCVR_TYPE_H10GAOC1M = "h10gaoc1m"
XCVR_TYPE_H10GAOC2M = "h10gaoc2m"
XCVR_TYPE_H10GAOC3M = "h10gaoc3m"
XCVR_TYPE_H10GAOC5M = "h10gaoc5m"
XCVR_TYPE_H10GAOC7M = "h10gaoc7m"
XCVR_TYPE_H10GCU10M = "h10gcu10m"
XCVR_TYPE_H10GCU1M = "h10gcu1m"
XCVR_TYPE_H10GCU2M = "h10gcu2m"
XCVR_TYPE_H10GCU3M = "h10gcu3m"
XCVR_TYPE_H10GCU5M = "h10gcu5m"
XCVR_TYPE_H10GCU7M = "h10gcu7m"
XCVR_TYPE_H10GLRMSM = "h10glrmsm"
XCVR_TYPE_H10GUSR = "h10gusr"
XCVR_TYPE_QSFP40GCR4 = "qsfp40gcr4"
XCVR_TYPE_QSFP40GCSR4 = "qsfp40gcsr4"
XCVR_TYPE_QSFP40GFET = "qsfp40gfet"
XCVR_TYPE_QSFP40GLR4 = "qsfp40glr4"
XCVR_TYPE_QSFP40GSR4 = "qsfp40gsr4"
XCVR_TYPE_QSFP40GSRBD = "qsfp40gsrbd"
XCVR_TYPE_QSFP4SFP10GCU1M = "qsfp4sfp10gcu1m"
XCVR_TYPE_QSFP4SFP10GCU2M = "qsfp4sfp10gcu2m"
XCVR_TYPE_QSFP4SFP10GCU3M = "qsfp4sfp10gcu3m"
XCVR_TYPE_QSFP4SFP10GCU5M = "qsfp4sfp10gcu5m"
XCVR_TYPE_QSFP4X10GA0C10M = "qsfp4x10ga0c10m"
XCVR_TYPE_QSFP4X10GA0C1M = "qsfp4x10ga0c1m"
XCVR_TYPE_QSFP4X10GA0C2M = "qsfp4x10ga0c2m"
XCVR_TYPE_QSFP4X10GA0C3M = "qsfp4x10ga0c3m"
XCVR_TYPE_QSFP4X10GA0C5M = "qsfp4x10ga0c5m"
XCVR_TYPE_QSFP4X10GA0C7M = "qsfp4x10ga0c7m"
XCVR_TYPE_QSFP4X10GA0CUNKNOWN = "qsfp4x10ga0cunknown"
XCVR_TYPE_QSFP4X10GAC10M = "qsfp4x10gac10m"
XCVR_TYPE_QSFP4X10GAC1M = "qsfp4x10gac1m"
XCVR_TYPE_QSFP4X10GAC3M = "qsfp4x10gac3m"
XCVR_TYPE_QSFP4X10GAC5M = "qsfp4x10gac5m"
XCVR_TYPE_QSFP4X10GAC7M = "qsfp4x10gac7m"
XCVR_TYPE_QSFP4X10GLR = "qsfp4x10glr"
XCVR_TYPE_QSFPH40GACU10M = "qsfph40gacu10m"
XCVR_TYPE_QSFPH40GACU1M = "qsfph40gacu1m"
XCVR_TYPE_QSFPH40GACU3M = "qsfph40gacu3m"
XCVR_TYPE_QSFPH40GACU5M = "qsfph40gacu5m"
XCVR_TYPE_QSFPH40GACU7M = "qsfph40gacu7m"
XCVR_TYPE_QSFPH40GAOC10M = "qsfph40gaoc10m"
XCVR_TYPE_QSFPH40GAOC15M = "qsfph40gaoc15m"
XCVR_TYPE_QSFPH40GAOC1M = "qsfph40gaoc1m"
XCVR_TYPE_QSFPH40GAOC2M = "qsfph40gaoc2m"
XCVR_TYPE_QSFPH40GAOC3M = "qsfph40gaoc3m"
XCVR_TYPE_QSFPH40GAOC5M = "qsfph40gaoc5m"
XCVR_TYPE_QSFPH40GAOC7M = "qsfph40gaoc7m"
XCVR_TYPE_QSFPH40GAOCUNKNOWN = "qsfph40gaocunknown"
XCVR_TYPE_QSFPH40GCU1M = "qsfph40gcu1m"
XCVR_TYPE_QSFPH40GCU2M = "qsfph40gcu2m"
XCVR_TYPE_QSFPH40GCU3M = "qsfph40gcu3m"
XCVR_TYPE_QSFPH40GCU5M = "qsfph40gcu5m"
XCVR_TYPE_QSFPLOOP = "qsfploop"
XCVR_TYPE_QSFPQSA = "qsfpqsa"
XCVR_TYPE_QSFPUNKNOWN = "qsfpunknown"
XCVR_TYPE_SFP = "sfp"
XCVR_TYPE_UNKNOWN = "unknown"
XCVR_TYPE_X2 = "x2"
class EtherSwitchIntFIo(ManagedObject):
"""This is EtherSwitchIntFIo class."""
consts = EtherSwitchIntFIoConsts()
naming_props = set([u'portId'])
mo_meta = MoMeta("EtherSwitchIntFIo", "etherSwitchIntFIo", "port-[port_id]", VersionMeta.Version101e, "InputOutput", 0x7f, [], ["read-only"], [u'portGroup'], [u'equipmentXcvr', u'etherNiErrStats', u'faultInst', u'portDomainEp'], ["Get"])
prop_meta = {
"ack": MoPropertyMeta("ack", "ack", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ack-in-progress", "acknowledged", "auto-ack", "evaluation", "ok", "removing", "un-acknowledged", "un-initialized", "unsupported-connectivity"], []),
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["disabled", "enabled"], []),
"aggr_port_id": MoPropertyMeta("aggr_port_id", "aggrPortId", "uint", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"chassis_id": MoPropertyMeta("chassis_id", "chassisId", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["N/A"], ["0-255"]),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"del_fe_ts": MoPropertyMeta("del_fe_ts", "delFeTs", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, ["never"], ["0-4294967295"]),
"discovery": MoPropertyMeta("discovery", "discovery", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["absent", "mis-connect", "missing", "new", "present", "un-initialized"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"encap": MoPropertyMeta("encap", "encap", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["dot1q", "isl", "negotiate", "proprietary", "unknown"], []),
"ep_dn": MoPropertyMeta("ep_dn", "epDn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"if_role": MoPropertyMeta("if_role", "ifRole", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["diag", "fcoe-nas-storage", "fcoe-storage", "fcoe-uplink", "mgmt", "monitor", "nas-storage", "network", "network-fcoe-uplink", "server", "service", "storage", "unknown"], []),
"if_type": MoPropertyMeta("if_type", "ifType", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["aggregation", "physical", "unknown", "virtual"], []),
"locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|server|chassis|internal|external),){0,5}(defaultValue|unknown|server|chassis|internal|external){0,1}""", [], []),
"mac_addr": MoPropertyMeta("mac_addr", "macAddr", "string", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, None, None, None, r"""(([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]))|0""", [], []),
"mode": MoPropertyMeta("mode", "mode", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["E", "F", "SD", "access", "fabric", "n_proxy", "promiscuousAccess", "promiscuousTrunk", "trunk", "unknown", "vntag"], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"new_fe_ts": MoPropertyMeta("new_fe_ts", "newFeTs", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, ["never"], ["0-4294967295"]),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["admin-down", "down", "error-disabled", "failed", "hardware-failure", "indeterminate", "link-down", "link-up", "no-license", "sfp-not-present", "software-failure", "udld-aggr-down", "up"], []),
"peer_aggr_port_id": MoPropertyMeta("peer_aggr_port_id", "peerAggrPortId", "uint", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"peer_chassis_id": MoPropertyMeta("peer_chassis_id", "peerChassisId", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["N/A"], ["0-255"]),
"peer_dn": MoPropertyMeta("peer_dn", "peerDn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"peer_port_id": MoPropertyMeta("peer_port_id", "peerPortId", "uint", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"peer_slot_id": MoPropertyMeta("peer_slot_id", "peerSlotId", "uint", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"port_id": MoPropertyMeta("port_id", "portId", "uint", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x10, None, None, None, [], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"slot_id": MoPropertyMeta("slot_id", "slotId", "uint", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"state_qual": MoPropertyMeta("state_qual", "stateQual", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["A", "B", "NONE"], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"ts": MoPropertyMeta("ts", "ts", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"xcvr_type": MoPropertyMeta("xcvr_type", "xcvrType", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["1000basecx", "1000baselh", "1000baselx", "1000basesx", "1000baset", "1000baseunknown", "1000basevx", "1000basex", "1000basezx", "10gbaseer", "10gbaselr", "10gbaselrm", "10gbasesr", "10gbasezr", "cwdm1471", "cwdm1531", "cwdm1551", "dwdmsfp", "fet", "h10gacu10m", "h10gacu15m", "h10gacu1m", "h10gacu3m", "h10gacu5m", "h10gacu7m", "h10gacuaoc10m", "h10gacuaoc15m", "h10gacuaoc1m", "h10gacuaoc2m", "h10gacuaoc3m", "h10gacuaoc5m", "h10gacuaoc7m", "h10gaoc10m", "h10gaoc1m", "h10gaoc2m", "h10gaoc3m", "h10gaoc5m", "h10gaoc7m", "h10gcu10m", "h10gcu1m", "h10gcu2m", "h10gcu3m", "h10gcu5m", "h10gcu7m", "h10glrmsm", "h10gusr", "qsfp40gcr4", "qsfp40gcsr4", "qsfp40gfet", "qsfp40glr4", "qsfp40gsr4", "qsfp40gsrbd", "qsfp4sfp10gcu1m", "qsfp4sfp10gcu2m", "qsfp4sfp10gcu3m", "qsfp4sfp10gcu5m", "qsfp4x10ga0c10m", "qsfp4x10ga0c1m", "qsfp4x10ga0c2m", "qsfp4x10ga0c3m", "qsfp4x10ga0c5m", "qsfp4x10ga0c7m", "qsfp4x10ga0cunknown", "qsfp4x10gac10m", "qsfp4x10gac1m", "qsfp4x10gac3m", "qsfp4x10gac5m", "qsfp4x10gac7m", "qsfp4x10glr", "qsfph40gacu10m", "qsfph40gacu1m", "qsfph40gacu3m", "qsfph40gacu5m", "qsfph40gacu7m", "qsfph40gaoc10m", "qsfph40gaoc15m", "qsfph40gaoc1m", "qsfph40gaoc2m", "qsfph40gaoc3m", "qsfph40gaoc5m", "qsfph40gaoc7m", "qsfph40gaocunknown", "qsfph40gcu1m", "qsfph40gcu2m", "qsfph40gcu3m", "qsfph40gcu5m", "qsfploop", "qsfpqsa", "qsfpunknown", "sfp", "unknown", "x2"], []),
}
prop_map = {
"ack": "ack",
"adminState": "admin_state",
"aggrPortId": "aggr_port_id",
"chassisId": "chassis_id",
"childAction": "child_action",
"delFeTs": "del_fe_ts",
"discovery": "discovery",
"dn": "dn",
"encap": "encap",
"epDn": "ep_dn",
"fltAggr": "flt_aggr",
"ifRole": "if_role",
"ifType": "if_type",
"locale": "locale",
"macAddr": "mac_addr",
"mode": "mode",
"model": "model",
"name": "name",
"newFeTs": "new_fe_ts",
"operState": "oper_state",
"peerAggrPortId": "peer_aggr_port_id",
"peerChassisId": "peer_chassis_id",
"peerDn": "peer_dn",
"peerPortId": "peer_port_id",
"peerSlotId": "peer_slot_id",
"portId": "port_id",
"revision": "revision",
"rn": "rn",
"sacl": "sacl",
"serial": "serial",
"slotId": "slot_id",
"stateQual": "state_qual",
"status": "status",
"switchId": "switch_id",
"transport": "transport",
"ts": "ts",
"type": "type",
"vendor": "vendor",
"xcvrType": "xcvr_type",
}
def __init__(self, parent_mo_or_dn, port_id, **kwargs):
self._dirty_mask = 0
self.port_id = port_id
self.ack = None
self.admin_state = None
self.aggr_port_id = None
self.chassis_id = None
self.child_action = None
self.del_fe_ts = None
self.discovery = None
self.encap = None
self.ep_dn = None
self.flt_aggr = None
self.if_role = None
self.if_type = None
self.locale = None
self.mac_addr = None
self.mode = None
self.model = None
self.name = None
self.new_fe_ts = None
self.oper_state = None
self.peer_aggr_port_id = None
self.peer_chassis_id = None
self.peer_dn = None
self.peer_port_id = None
self.peer_slot_id = None
self.revision = None
self.sacl = None
self.serial = None
self.slot_id = None
self.state_qual = None
self.status = None
self.switch_id = None
self.transport = None
self.ts = None
self.type = None
self.vendor = None
self.xcvr_type = None
ManagedObject.__init__(self, "EtherSwitchIntFIo", parent_mo_or_dn, **kwargs)
|
[
"[email protected]"
] | |
ff118d7c1264b48508950a512dfbf4c0577d567f
|
a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea
|
/airflow/operators/python.py
|
920a75d08f7661a8ee7f409581b29eb96a52f62e
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ishiis/airflow
|
4305794e36b611d01f49e3f2401be3dc49782670
|
292440d54f4db84aaf0c5a98cf5fcf34303f2fa8
|
refs/heads/master
| 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 |
Apache-2.0
| 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null |
UTF-8
|
Python
| false | false | 23,492 |
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import os
import pickle
import shutil
import sys
import types
import warnings
from tempfile import TemporaryDirectory
from textwrap import dedent
from typing import Any, Callable, Collection, Dict, Iterable, List, Mapping, Optional, Sequence, Union
import dill
from airflow.exceptions import AirflowException
from airflow.models.baseoperator import BaseOperator
from airflow.models.skipmixin import SkipMixin
from airflow.models.taskinstance import _CURRENT_CONTEXT
from airflow.utils.context import Context, context_copy_partial, context_merge
from airflow.utils.operator_helpers import KeywordParameters
from airflow.utils.process_utils import execute_in_subprocess
from airflow.utils.python_virtualenv import prepare_virtualenv, write_python_script
def task(python_callable: Optional[Callable] = None, multiple_outputs: Optional[bool] = None, **kwargs):
"""
Deprecated function that calls @task.python and allows users to turn a python function into
an Airflow task. Please use the following instead:
from airflow.decorators import task
@task
def my_task()
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as keys.
Defaults to False.
:return:
"""
# To maintain backwards compatibility, we import the task object into this file
# This prevents breakages in dags that use `from airflow.operators.python import task`
from airflow.decorators.python import python_task
warnings.warn(
"""airflow.operators.python.task is deprecated. Please use the following instead
from airflow.decorators import task
@task
def my_task()""",
DeprecationWarning,
stacklevel=2,
)
return python_task(python_callable=python_callable, multiple_outputs=multiple_outputs, **kwargs)
class PythonOperator(BaseOperator):
"""
Executes a Python callable
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PythonOperator`
When running your callable, Airflow will pass a set of keyword arguments that can be used in your
function. This set of kwargs correspond exactly to what you can use in your jinja templates.
For this to work, you need to define ``**kwargs`` in your function header, or you can add directly the
keyword arguments you would like to get - for example with the below code your callable will get
the values of ``ti`` and ``next_ds`` context variables.
With explicit arguments:
.. code-block:: python
def my_python_callable(ti, next_ds):
pass
With kwargs:
.. code-block:: python
def my_python_callable(**kwargs):
ti = kwargs["ti"]
next_ds = kwargs["next_ds"]
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function
:param op_args: a list of positional arguments that will get unpacked when
calling your callable
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied. (templated)
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:param show_return_value_in_logs: a bool value whether to show return_value
logs. Defaults to True, which allows return value log output.
It can be set to False to prevent log output of return value when you return huge data
such as transmission a large amount of XCom to TaskAPI.
"""
template_fields: Sequence[str] = ('templates_dict', 'op_args', 'op_kwargs')
template_fields_renderers = {"templates_dict": "json", "op_args": "py", "op_kwargs": "py"}
BLUE = '#ffefeb'
ui_color = BLUE
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects(e.g protobuf).
shallow_copy_attrs: Sequence[str] = (
'python_callable',
'op_kwargs',
)
mapped_arguments_validated_by_init = True
def __init__(
self,
*,
python_callable: Callable,
op_args: Optional[Collection[Any]] = None,
op_kwargs: Optional[Mapping[str, Any]] = None,
templates_dict: Optional[Dict[str, Any]] = None,
templates_exts: Optional[Sequence[str]] = None,
show_return_value_in_logs: bool = True,
**kwargs,
) -> None:
if kwargs.get("provide_context"):
warnings.warn(
"provide_context is deprecated as of 2.0 and is no longer required",
DeprecationWarning,
stacklevel=2,
)
kwargs.pop('provide_context', None)
super().__init__(**kwargs)
if not callable(python_callable):
raise AirflowException('`python_callable` param must be callable')
self.python_callable = python_callable
self.op_args = op_args or ()
self.op_kwargs = op_kwargs or {}
self.templates_dict = templates_dict
if templates_exts:
self.template_ext = templates_exts
self.show_return_value_in_logs = show_return_value_in_logs
def execute(self, context: Context) -> Any:
context_merge(context, self.op_kwargs, templates_dict=self.templates_dict)
self.op_kwargs = self.determine_kwargs(context)
return_value = self.execute_callable()
if self.show_return_value_in_logs:
self.log.info("Done. Returned value was: %s", return_value)
else:
self.log.info("Done. Returned value not shown")
return return_value
def determine_kwargs(self, context: Mapping[str, Any]) -> Mapping[str, Any]:
return KeywordParameters.determine(self.python_callable, self.op_args, context).unpacking()
def execute_callable(self):
"""
Calls the python callable with the given arguments.
:return: the return value of the call.
:rtype: any
"""
return self.python_callable(*self.op_args, **self.op_kwargs)
class BranchPythonOperator(PythonOperator, SkipMixin):
"""
Allows a workflow to "branch" or follow a path following the execution
of this task.
It derives the PythonOperator and expects a Python function that returns
a single task_id or list of task_ids to follow. The task_id(s) returned
should point to a task directly downstream from {self}. All other "branches"
or directly downstream tasks are marked with a state of ``skipped`` so that
these paths can't move forward. The ``skipped`` states are propagated
downstream to allow for the DAG state to fill up and the DAG run's state
to be inferred.
"""
def execute(self, context: Context) -> Any:
branch = super().execute(context)
# TODO: The logic should be moved to SkipMixin to be available to all branch operators.
if isinstance(branch, str):
branches = {branch}
elif isinstance(branch, list):
branches = set(branch)
elif branch is None:
branches = set()
else:
raise AirflowException("Branch callable must return either None, a task ID, or a list of IDs")
valid_task_ids = set(context["dag"].task_ids)
invalid_task_ids = branches - valid_task_ids
if invalid_task_ids:
raise AirflowException(
f"Branch callable must return valid task_ids. Invalid tasks found: {invalid_task_ids}"
)
self.skip_all_except(context['ti'], branch)
return branch
class ShortCircuitOperator(PythonOperator, SkipMixin):
"""
Allows a pipeline to continue based on the result of a ``python_callable``.
The ShortCircuitOperator is derived from the PythonOperator and evaluates the result of a
``python_callable``. If the returned result is False or a falsy value, the pipeline will be
short-circuited. Downstream tasks will be marked with a state of "skipped" based on the short-circuiting
mode configured. If the returned result is True or a truthy value, downstream tasks proceed as normal and
an ``XCom`` of the returned result is pushed.
The short-circuiting can be configured to either respect or ignore the ``trigger_rule`` set for
downstream tasks. If ``ignore_downstream_trigger_rules`` is set to True, the default setting, all
downstream tasks are skipped without considering the ``trigger_rule`` defined for tasks. However, if this
parameter is set to False, the direct downstream tasks are skipped but the specified ``trigger_rule`` for
other subsequent downstream tasks are respected. In this mode, the operator assumes the direct downstream
tasks were purposely meant to be skipped but perhaps not other subsequent tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ShortCircuitOperator`
:param ignore_downstream_trigger_rules: If set to True, all downstream tasks from this operator task will
be skipped. This is the default behavior. If set to False, the direct, downstream task(s) will be
skipped but the ``trigger_rule`` defined for a other downstream tasks will be respected.
"""
def __init__(self, *, ignore_downstream_trigger_rules: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.ignore_downstream_trigger_rules = ignore_downstream_trigger_rules
def execute(self, context: Context) -> Any:
condition = super().execute(context)
self.log.info("Condition result is %s", condition)
if condition:
self.log.info('Proceeding with downstream tasks...')
return condition
downstream_tasks = context['task'].get_flat_relatives(upstream=False)
self.log.debug("Downstream task IDs %s", downstream_tasks)
if downstream_tasks:
dag_run = context["dag_run"]
execution_date = dag_run.execution_date
if self.ignore_downstream_trigger_rules is True:
self.log.info("Skipping all downstream tasks...")
self.skip(dag_run, execution_date, downstream_tasks)
else:
self.log.info("Skipping downstream tasks while respecting trigger rules...")
# Explicitly setting the state of the direct, downstream task(s) to "skipped" and letting the
# Scheduler handle the remaining downstream task(s) appropriately.
self.skip(dag_run, execution_date, context["task"].get_direct_relatives(upstream=False))
self.log.info("Done.")
class PythonVirtualenvOperator(PythonOperator):
"""
Allows one to run a function in a virtualenv that is created and destroyed
automatically (with certain caveats).
The function must be defined using def, and not be
part of a class. All imports must happen inside the function
and no variables outside of the scope may be referenced. A global scope
variable named virtualenv_string_args will be available (populated by
string_args). In addition, one can pass stuff through op_args and op_kwargs, and one
can use a return value.
Note that if your virtualenv runs in a different Python major version than Airflow,
you cannot use return values, op_args, op_kwargs, or use any macros that are being provided to
Airflow through plugins. You can use string_args though.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PythonVirtualenvOperator`
:param python_callable: A python function with no references to outside variables,
defined with def, which will be run in a virtualenv
:param requirements: Either a list of requirement strings, or a (templated)
"requirements file" as specified by pip.
:param python_version: The Python version to run the virtualenv with. Note that
both 2 and 2.7 are acceptable forms.
:param use_dill: Whether to use dill to serialize
the args and result (pickle is default). This allow more complex types
but requires you to include dill in your requirements.
:param system_site_packages: Whether to include
system_site_packages in your virtualenv.
See virtualenv documentation for more information.
:param pip_install_options: a list of pip install options when installing requirements
See 'pip install -h' for available options
:param op_args: A list of positional arguments to pass to python_callable.
:param op_kwargs: A dict of keyword arguments to pass to python_callable.
:param string_args: Strings that are present in the global var virtualenv_string_args,
available to python_callable at runtime as a list[str]. Note that args are split
by newline.
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
"""
template_fields: Sequence[str] = tuple({'requirements'} | set(PythonOperator.template_fields))
template_ext: Sequence[str] = ('.txt',)
BASE_SERIALIZABLE_CONTEXT_KEYS = {
'ds',
'ds_nodash',
'inlets',
'next_ds',
'next_ds_nodash',
'outlets',
'prev_ds',
'prev_ds_nodash',
'run_id',
'task_instance_key_str',
'test_mode',
'tomorrow_ds',
'tomorrow_ds_nodash',
'ts',
'ts_nodash',
'ts_nodash_with_tz',
'yesterday_ds',
'yesterday_ds_nodash',
}
PENDULUM_SERIALIZABLE_CONTEXT_KEYS = {
'data_interval_end',
'data_interval_start',
'execution_date',
'logical_date',
'next_execution_date',
'prev_data_interval_end_success',
'prev_data_interval_start_success',
'prev_execution_date',
'prev_execution_date_success',
'prev_start_date_success',
}
AIRFLOW_SERIALIZABLE_CONTEXT_KEYS = {'macros', 'conf', 'dag', 'dag_run', 'task', 'params'}
def __init__(
self,
*,
python_callable: Callable,
requirements: Union[None, Iterable[str], str] = None,
python_version: Optional[Union[str, int, float]] = None,
use_dill: bool = False,
system_site_packages: bool = True,
pip_install_options: Optional[List[str]] = None,
op_args: Optional[Collection[Any]] = None,
op_kwargs: Optional[Mapping[str, Any]] = None,
string_args: Optional[Iterable[str]] = None,
templates_dict: Optional[Dict] = None,
templates_exts: Optional[List[str]] = None,
**kwargs,
):
if (
not isinstance(python_callable, types.FunctionType)
or isinstance(python_callable, types.LambdaType)
and python_callable.__name__ == "<lambda>"
):
raise AirflowException('PythonVirtualenvOperator only supports functions for python_callable arg')
if (
python_version
and str(python_version)[0] != str(sys.version_info.major)
and (op_args or op_kwargs)
):
raise AirflowException(
"Passing op_args or op_kwargs is not supported across different Python "
"major versions for PythonVirtualenvOperator. Please use string_args."
)
if not shutil.which("virtualenv"):
raise AirflowException('PythonVirtualenvOperator requires virtualenv, please install it.')
super().__init__(
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
templates_dict=templates_dict,
templates_exts=templates_exts,
**kwargs,
)
if not requirements:
self.requirements: Union[List[str], str] = []
elif isinstance(requirements, str):
self.requirements = requirements
else:
self.requirements = list(requirements)
self.string_args = string_args or []
self.python_version = python_version
self.use_dill = use_dill
self.system_site_packages = system_site_packages
self.pip_install_options = pip_install_options
self.pickling_library = dill if self.use_dill else pickle
def execute(self, context: Context) -> Any:
serializable_keys = set(self._iter_serializable_context_keys())
serializable_context = context_copy_partial(context, serializable_keys)
return super().execute(context=serializable_context)
def determine_kwargs(self, context: Mapping[str, Any]) -> Mapping[str, Any]:
return KeywordParameters.determine(self.python_callable, self.op_args, context).serializing()
def execute_callable(self):
with TemporaryDirectory(prefix='venv') as tmp_dir:
requirements_file_name = f'{tmp_dir}/requirements.txt'
if not isinstance(self.requirements, str):
requirements_file_contents = "\n".join(str(dependency) for dependency in self.requirements)
else:
requirements_file_contents = self.requirements
if not self.system_site_packages and self.use_dill:
requirements_file_contents += '\ndill'
with open(requirements_file_name, 'w') as file:
file.write(requirements_file_contents)
if self.templates_dict:
self.op_kwargs['templates_dict'] = self.templates_dict
input_filename = os.path.join(tmp_dir, 'script.in')
output_filename = os.path.join(tmp_dir, 'script.out')
string_args_filename = os.path.join(tmp_dir, 'string_args.txt')
script_filename = os.path.join(tmp_dir, 'script.py')
prepare_virtualenv(
venv_directory=tmp_dir,
python_bin=f'python{self.python_version}' if self.python_version else None,
system_site_packages=self.system_site_packages,
requirements_file_path=requirements_file_name,
pip_install_options=self.pip_install_options,
)
self._write_args(input_filename)
self._write_string_args(string_args_filename)
write_python_script(
jinja_context=dict(
op_args=self.op_args,
op_kwargs=self.op_kwargs,
pickling_library=self.pickling_library.__name__,
python_callable=self.python_callable.__name__,
python_callable_source=self.get_python_source(),
),
filename=script_filename,
render_template_as_native_obj=self.dag.render_template_as_native_obj,
)
execute_in_subprocess(
cmd=[
f'{tmp_dir}/bin/python',
script_filename,
input_filename,
output_filename,
string_args_filename,
]
)
return self._read_result(output_filename)
def get_python_source(self):
"""
Returns the source of self.python_callable
@return:
"""
return dedent(inspect.getsource(self.python_callable))
def _write_args(self, filename):
if self.op_args or self.op_kwargs:
with open(filename, 'wb') as file:
self.pickling_library.dump({'args': self.op_args, 'kwargs': self.op_kwargs}, file)
def _iter_serializable_context_keys(self):
yield from self.BASE_SERIALIZABLE_CONTEXT_KEYS
if self.system_site_packages or 'apache-airflow' in self.requirements:
yield from self.AIRFLOW_SERIALIZABLE_CONTEXT_KEYS
yield from self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS
elif 'pendulum' in self.requirements:
yield from self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS
def _write_string_args(self, filename):
with open(filename, 'w') as file:
file.write('\n'.join(map(str, self.string_args)))
def _read_result(self, filename):
if os.stat(filename).st_size == 0:
return None
with open(filename, 'rb') as file:
try:
return self.pickling_library.load(file)
except ValueError:
self.log.error(
"Error deserializing result. Note that result deserialization "
"is not supported across major Python versions."
)
raise
def __deepcopy__(self, memo):
# module objects can't be copied _at all__
memo[id(self.pickling_library)] = self.pickling_library
return super().__deepcopy__(memo)
def get_current_context() -> Context:
"""
Obtain the execution context for the currently executing operator without
altering user method's signature.
This is the simplest method of retrieving the execution context dictionary.
**Old style:**
.. code:: python
def my_task(**context):
ti = context["ti"]
**New style:**
.. code:: python
from airflow.operators.python import get_current_context
def my_task():
context = get_current_context()
ti = context["ti"]
Current context will only have value if this method was called after an operator
was starting to execute.
"""
if not _CURRENT_CONTEXT:
raise AirflowException(
"Current context was requested but no context was found! "
"Are you running within an airflow task?"
)
return _CURRENT_CONTEXT[-1]
|
[
"[email protected]"
] | |
fa8fd1e5e8b9ef6df7d8391d8581f2b663c17a64
|
21e6fd368aee8acb80747141291a00f83fb67d1e
|
/python/WoT/SourcesRes/9.10/client/messenger/proto/xmpp/xmppserversettings.py
|
db4502d71aa9092ff1e3783f2c8d59a2f969d7d8
|
[] |
no_license
|
Infernux/Projects
|
e7d1eab9b25471c543aa82985ec0bfcca2cfe05e
|
da7a9f71231b76dafbc4c7348065f1fc2dead854
|
refs/heads/master
| 2023-08-24T09:17:23.834855 | 2023-08-05T14:18:46 | 2023-08-05T14:18:46 | 23,395,952 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,157 |
py
|
# 2015.01.14 22:41:28 CET
import types
from debug_utils import LOG_ERROR
from messenger.proto.interfaces import IProtoSettings
from messenger.proto.xmpp.gloox_wrapper import CONNECTION_IMPL_TYPE
from messenger.proto.xmpp.jid import JID
import random
_NUMBER_OF_ITEMS_IN_SAMPLE = 2
def _makeSample(*args):
queue = []
for seq in args:
count = min(len(seq), _NUMBER_OF_ITEMS_IN_SAMPLE)
queue.extend(random.sample(seq, count))
return queue
def _validateConnection(record):
result = True
if len(record) == 2:
(host, port,) = record
if not host:
result = False
if type(port) is not types.IntType:
result = False
else:
result = False
return result
class ConnectionsIterator(object):
def __init__(self, base = None, alt = None, bosh = None):
super(ConnectionsIterator, self).__init__()
self.__tcp = _makeSample(base or [], alt or [])
self.__bosh = _makeSample(bosh or [])
def __iter__(self):
return self
def __len__(self):
return len(self.__tcp) + len(self.__bosh)
def clear(self):
self.__tcp = []
self.__bosh = []
def hasNext(self):
return len(self.__tcp) > 0 or len(self.__bosh) > 0
def next(self):
if self.__tcp:
cType = CONNECTION_IMPL_TYPE.TCP
(host, port,) = self.__tcp.pop(0)
elif self.__bosh:
cType = CONNECTION_IMPL_TYPE.BOSH
(host, port,) = self.__bosh.pop(0)
else:
raise StopIteration
return (cType, host, port)
class XmppServerSettings(IProtoSettings):
__slots__ = ('enabled', 'connections', 'domain', 'port', 'resource', 'altConnections', 'boshConnections')
def __init__(self):
super(XmppServerSettings, self).__init__()
self.clear()
def __repr__(self):
return 'XmppServerSettings(enabled = {0!r:s}, connections = {1!r:s}, altConnections = {2!r:s}, boshConnections = {3!r:s}, domain = {4:>s}, port = {5:n}, resource = {6:>s})'.format(self.enabled, self.connections, self.altConnections, self.boshConnections, self.domain, self.port, self.resource)
def update(self, data):
if 'xmpp_connections' in data:
self.connections = filter(_validateConnection, data['xmpp_connections'])
else:
self.connections = []
if 'xmpp_alt_connections' in data:
self.altConnections = filter(_validateConnection, data['xmpp_alt_connections'])
else:
self.altConnections = []
if 'xmpp_bosh_connections' in data:
self.boshConnections = filter(_validateConnection, data['xmpp_bosh_connections'])
else:
self.boshConnections = []
if 'xmpp_host' in data:
self.domain = data['xmpp_host']
else:
self.domain = ''
if 'xmpp_port' in data:
self.port = data['xmpp_port']
else:
self.port = -1
if 'xmpp_resource' in data:
self.resource = data['xmpp_resource']
else:
self.resource = ''
if 'xmpp_enabled' in data:
self.enabled = data['xmpp_enabled']
if self.enabled and not self.connections and not self.altConnections and not self.boshConnections and not self.domain:
LOG_ERROR('Can not find host to connection. XMPP is disabled', self.connections, self.altConnections, self.domain)
self.enabled = False
else:
self.enabled = False
def clear(self):
self.enabled = False
self.connections = []
self.altConnections = []
self.boshConnections = []
self.domain = None
self.port = -1
self.resource = ''
def isEnabled(self):
return self.enabled
def getFullJID--- This code section failed: ---
0 LOAD_FAST 'databaseID'
3 POP_JUMP_IF_TRUE '18'
6 LOAD_ASSERT 'AssertionError'
9 LOAD_CONST "Player's databaseID can not be empty"
12 CALL_FUNCTION_1 ''
15 RAISE_VARARGS ''
18 LOAD_GLOBAL 'JID'
21 CALL_FUNCTION_0 ''
24 STORE_FAST 'jid'
27 LOAD_FAST 'jid'
30 LOAD_ATTR 'setNode'
33 LOAD_FAST 'databaseID'
36 CALL_FUNCTION_1 ''
39 POP_TOP ''
40 LOAD_FAST 'jid'
43 LOAD_ATTR 'setDomain'
46 LOAD_FAST 'self'
49 LOAD_ATTR 'domain'
52 CALL_FUNCTION_1 ''
55 POP_TOP ''
56 LOAD_FAST 'jid'
59 LOAD_ATTR 'setResource'
62 LOAD_FAST 'self'
65 LOAD_ATTR 'resource'
68 CALL_FUNCTION_1 ''
71 POP_TOP ''
72 LOAD_FAST 'jid'
75 RETURN_VALUE ''
-1 RETURN_LAST ''
Syntax error at or near `RETURN_VALUE' token at offset 75
def getConnectionsIterator(self):
iterator = ConnectionsIterator(self.connections, self.altConnections, self.boshConnections)
if not iterator.hasNext():
iterator = ConnectionsIterator([(self.domain, self.port)])
return iterator
# decompiled 0 files: 0 okay, 1 failed, 0 verify failed
# 2015.01.14 22:41:28 CET
|
[
"[email protected]"
] | |
26ac13b6fb750d4aaa746aaca2f2568788d5d99b
|
e61749d3dd1999e938d494b8181753c3552f21de
|
/HW6/P3b.py
|
f2de3ec5ac9398e591ba37ef5a6bfbe953ee7c01
|
[] |
no_license
|
arsalan2400/HWSolutions
|
6b3f95b924769dd795a1c638d3e0b9d0c055c7ab
|
790625048f1addf3828a9f415e3237fd76d91a90
|
refs/heads/master
| 2020-03-12T18:18:23.685324 | 2018-04-22T01:23:48 | 2018-04-22T01:23:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 977 |
py
|
import HW6.Classes as Cls
import scr.FigureSupport as figureLibrary
# create a multiple game sets
multipleGameSets=Cls.MultipleGameSets(ids=range(1000), prob_head=0.5, n_games_in_a_set=10)
# simulate all game sets
multipleGameSets.simulation()
# print projected mean reward
print('Projected mean reward',
multipleGameSets.get_mean_total_reward())
# print projection interval
print('95% projection interval of average rewards',
multipleGameSets.get_PI_total_reward(0.05))
# plot
figureLibrary.graph_histogram(
data=multipleGameSets.get_all_total_rewards(),
title="Histogram of gambler's total reward from playing the gam 10 times",
x_label='Mean Rewards',
y_label='Count')
print('We need a transient-state simulation for this perspective.')
print('We are not able to rely on the Law of Large Numbers to make inference because our data is very limited.')
print('Therefore, we must use the sample mean and projection intervals for interpretation.')
|
[
"[email protected]"
] | |
2b01fa0abe56f95d48e2e37f94a8e11566d7f6f9
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/17/usersdata/89/6051/submittedfiles/lecker.py
|
b34824dbc0c001becd11a56724debfad535a0e1b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 77 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
#entrada
|
[
"[email protected]"
] | |
4c0d32ed51f42bfd3f2e7e885983d382928996d7
|
5c8139f1e57e06c7eaf603bd8fe74d9f22620513
|
/PartB/py是否为其他单词的前缀信息.py
|
9d72ce3ca43c4346a0d144a49300729dbb43f94a
|
[] |
no_license
|
madeibao/PythonAlgorithm
|
c8a11d298617d1abb12a72461665583c6a44f9d2
|
b4c8a75e724a674812b8a38c0202485776445d89
|
refs/heads/master
| 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,788 |
py
|
'''
给你一个字符串 sentence 作为句子并指定检索词为 searchWord ,其中句子由若干用 单个空格 分隔的单词组成。
请你检查检索词 searchWord 是否为句子 sentence 中任意单词的前缀。
如果 searchWord 是某一个单词的前缀,则返回句子 sentence 中该单词所对应的下标(下标从 1 开始)。
如果 searchWord 是多个单词的前缀,则返回匹配的第一个单词的下标(最小下标)。
如果 searchWord 不是任何单词的前缀,则返回 -1 。
字符串 S 的 「前缀」是 S 的任何前导连续子字符串。
示例 1:
输入:sentence = "i love eating burger", searchWord = "burg"
输出:4
解释:"burg" 是 "burger" 的前缀,而 "burger" 是句子中第 4 个单词。
示例 2:
输入:sentence = "this problem is an easy problem", searchWord = "pro"
输出:2
解释:"pro" 是 "problem" 的前缀,而 "problem" 是句子中第 2 个也是第 6 个单词,但是应该返回最小下标 2 。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/check-if-a-word-occurs-as-a-prefix-of-any-word-in-a-sentence
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
#================================================================================================================================
class Solution:
def isPrefixOfWord(self, sentence: str, searchWord: str) -> int:
for num, word in enumerate(sentence.split(), 1):
if word.startswith(searchWord):
return num
return -1
if __name__ == "__main__":
s =Solution()
sentence = "i love eating burger"
searchWord = "burg"
print(s.isPrefixOfWord(sentence, searchWord))
|
[
"[email protected]"
] | |
1a18436938b2026c2e96cf6e849448ee6b385a49
|
0d701bb4c545c7753266b76e965a40bda93ea4b7
|
/weppy_bs3/ext.py
|
e21b0329aaf4ee9da18c9151e71bcea0e8d2c5da
|
[
"BSD-3-Clause"
] |
permissive
|
johnmahugu/weppy-bs3
|
a58732974ab87915580a5a644de5cd8d8006edfa
|
11270ae5bbbd2484db63766248f24d24328b985d
|
refs/heads/master
| 2020-04-08T16:03:02.694954 | 2017-10-23T16:57:58 | 2017-10-23T16:57:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,745 |
py
|
# -*- coding: utf-8 -*-
"""
weppy_bs3.ext
-------------
Provides the bootstrap3 extension for weppy
:copyright: (c) 2015 by Giovanni Barillari
:license: BSD, see LICENSE for more details.
"""
import os
import shutil
from weppy.extensions import Extension, TemplateExtension, TemplateLexer
from weppy.forms import FormStyle
from weppy.html import tag, asis
class BS3(Extension):
default_static_folder = 'bs3'
default_config = dict(
set_as_default_style=True,
static_folder='bs3',
date_format="DD/MM/YYYY",
time_format="HH:mm:ss",
datetime_format="DD/MM/YYYY HH:mm:ss",
time_pickseconds=True,
icon_time='fa fa-clock-o',
icon_date='fa fa-calendar',
icon_up='fa fa-arrow-up',
icon_down='fa fa-arrow-down'
)
assets = [
'bootstrap.min.js',
'bootstrap.min.css',
'moment.min.js',
'bootstrap-datetimepicker.min.js',
'bootstrap-datetimepicker.min.css']
def on_load(self):
# init and create required folder
self.env.folder = os.path.join(
self.app.static_path, self.config.static_folder)
if not os.path.exists(self.env.folder):
os.mkdir(self.env.folder)
# load assets and copy to app
for asset in self.assets:
static_file = os.path.join(self.env.folder, asset)
if not os.path.exists(static_file):
source_file = os.path.join(
os.path.dirname(__file__), 'assets', asset)
shutil.copy2(source_file, static_file)
# set formstyle if needed
if self.config.set_as_default_style:
self.app.config.ui.forms_style = BS3FormStyle
# init template extension
self.env.assets = self.assets
self.app.add_template_extension(BS3Template)
@property
def FormStyle(self):
return BS3FormStyle
class BS3Lexer(TemplateLexer):
evaluate_value = False
def process(self, ctx, value):
for asset in self.ext.env.assets:
file_ext = asset.rsplit(".", 1)[-1]
url = '/static/' + self.ext.config.static_folder + '/' + asset
if file_ext == 'js':
static = (
'<script type="text/javascript" src="' + url +
'"></script>')
elif file_ext == 'css':
static = (
'<link rel="stylesheet" href="' + url +
'" type="text/css">')
else:
continue
ctx.html(static)
## add font awesome from external cdn
url = (
'//maxcdn.bootstrapcdn.com/font-awesome/4.4.0/css/' +
'font-awesome.min.css')
static = '<link href="' + url + '" rel="stylesheet">'
ctx.html(static)
class BS3Template(TemplateExtension):
namespace = 'BS3'
lexers = {'include_bs3': BS3Lexer}
_datepicker_xml = """
<script type="text/javascript">
$(function() {
$('#%(divid)s').datetimepicker({
pickTime: false,
format: '%(format)s',
minDate: %(minDate)s,
maxDate: %(maxDate)s,
icons: {
date: "%(icon_date)s",
up: "%(icon_up)s",
down: "%(icon_down)s"
}
});
});
</script>"""
_timepicker_xml = """
<script type="text/javascript">
$(function() {
$('#%(divid)s').datetimepicker({
pickDate: false,
useSeconds: %(use_seconds)s,
format: '%(format)s',
icons: {
time: "%(icon_time)s",
up: "%(icon_up)s",
down: "%(icon_down)s"
}
});
});
</script>"""
_datetimepicker_xml = """
<script type="text/javascript">
$(function() {
$('#%(divid)s').datetimepicker({
useSeconds: %(use_seconds)s,
format: '%(format)s',
minDate: %(minDate)s,
maxDate: %(maxDate)s,
icons: {
date: "%(icon_date)s",
time: "%(icon_time)s",
up: "%(icon_up)s",
down: "%(icon_down)s"
}
});
});
</script>"""
class BS3FormStyle(FormStyle):
@staticmethod
def widget_bool(attr, field, value, _id=None):
return FormStyle.widget_bool(attr, field, value,
_class="bool checkbox", _id=_id)
@staticmethod
def widget_date(attr, field, value, _class='date', _id=None):
def load_js():
dformat = attr.get('date_format', attr['env'].date_format)
icon_up = attr.get('icon_up', attr['env'].icon_up)
icon_down = attr.get('icon_down', attr['env'].icon_down)
s = asis(_datepicker_xml % dict(
divid=fid + "_cat",
format=dformat,
minDate=dates["minDate"],
maxDate=dates["maxDate"],
icon_date=icon_date,
icon_up=icon_up,
icon_down=icon_down))
return s
icon_date = attr.get('icon_date', attr['env'].icon_date)
dates = {}
for dname in ["minDate", "maxDate"]:
if not attr.get(dname):
dates[dname] = '$.fn.datetimepicker.defaults.' + dname
else:
dates[dname] = '"' + attr[dname] + '"'
fid = _id or field.name
res = []
js = load_js()
res.append(
tag.input(
_name=field.name, _type='text', _id=fid, _class="form-control",
_value=str(value) if value is not None else ''))
res.append(
tag.span(tag.span(_class=icon_date), _class="input-group-addon"))
res.append(js)
return tag.div(*res, _id=fid + "_cat", _class='input-group date')
@staticmethod
def widget_time(attr, field, value, _class='time', _id=None):
def load_js():
tformat = attr.get('time_format', attr['env'].time_format)
icon_up = attr.get('icon_up', attr['env'].icon_up)
icon_down = attr.get('icon_down', attr['env'].icon_down)
pick_seconds = "true" if use_seconds else "false"
s = asis(
_timepicker_xml % dict(
divid=fid + "_cat",
format=tformat,
use_seconds=pick_seconds,
icon_time=icon_time,
icon_up=icon_up,
icon_down=icon_down))
return s
icon_time = attr.get('icon_time', attr['env'].icon_time)
use_seconds = attr.get('time_pickseconds',
attr['env'].time_pickseconds)
fid = _id or field.name
res = []
js = load_js()
_value = str(value) if value is not None else ''
if not use_seconds:
_value = _value[:-2]
res.append(
tag.input(
_name=field.name, _type='text', _id=fid, _class="form-control",
_value=_value))
res.append(
tag.span(tag.span(_class=icon_time), _class="input-group-addon"))
res.append(js)
return tag.div(*res, _id=fid + "_cat", _class='input-group time')
@staticmethod
def widget_datetime(attr, field, value, _class='datetime', _id=None):
def load_js():
dformat = attr.get('datetime_format', attr['env'].datetime_format)
icon_time = attr.get('icon_time', attr['env'].icon_time)
icon_up = attr.get('icon_up', attr['env'].icon_up)
icon_down = attr.get('icon_down', attr['env'].icon_down)
pick_seconds = "true" if use_seconds else "false"
s = asis(
_datetimepicker_xml % dict(
divid=fid + "_cat",
format=dformat,
use_seconds=pick_seconds,
minDate=dates["minDate"],
maxDate=dates["maxDate"],
icon_date=icon_date,
icon_time=icon_time,
icon_up=icon_up,
icon_down=icon_down))
return s
icon_date = attr.get('icon_date', attr['env'].icon_date)
dates = {}
for dname in ["minDate", "maxDate"]:
if not attr.get(dname):
dates[dname] = '$.fn.datetimepicker.defaults.' + dname
else:
dates[dname] = '"' + attr[dname] + '"'
use_seconds = attr.get(
'time_pickseconds', attr['env'].time_pickseconds)
fid = _id or field.name
res = []
js = load_js()
_value = str(value) if value is not None else ''
if not use_seconds:
_value = _value[:-2]
res.append(
tag.input(
_name=field.name, _type='text', _id=fid, _class="form-control",
_value=_value))
res.append(
tag.span(
tag.span(_class=icon_date), _class="input-group-addon"))
res.append(js)
return tag.div(*res, _id=fid + "_cat", _class='input-group datetime')
def on_start(self):
from weppy.expose import Expose
self.attr['env'] = Expose.application.ext.BS3.config
self.parent = tag.fieldset()
def style_widget(self, widget):
wtype = widget['_class'].split(' ')[0]
if wtype not in ["bool", "upload_wrap", "input-group"]:
widget['_class'] += " form-control"
def create_label(self, label):
wid = self.element.widget['_id']
return tag.label(label, _for=wid, _class='col-sm-2 control-label')
def create_comment(self, comment):
return tag.p(comment, _class='help-block')
def create_error(self, error):
return tag.p(error, _class='text-danger')
def add_widget(self, widget):
_class = 'form-group'
label = self.element.label
wrapper = tag.div(widget, _class='col-sm-10')
if self.element.error:
wrapper.append(self.element.error)
_class += ' has-error'
if self.element.comment:
wrapper.append(self.element.comment)
self.parent.append(tag.div(label, wrapper, _class=_class))
def add_buttons(self):
submit = tag.input(_type='submit', _value=self.attr['submit'],
_class='btn btn-primary')
buttons = tag.div(submit, _class="col-sm-10 col-sm-offset-2")
self.parent.append(tag.div(buttons, _class='form-group'))
def render(self):
self.attr['_class'] = self.attr.get('_class', 'form-horizontal')
return FormStyle.render(self)
|
[
"[email protected]"
] | |
357507b900e424e19a92a66e71c1080beba97bfa
|
b47f2e3f3298388b1bcab3213bef42682985135e
|
/experiments/fdtd-2d/tmp_files/6239.py
|
fa47a2670566765c6d603faaaae743187fd6ebde
|
[
"BSD-2-Clause"
] |
permissive
|
LoopTilingBenchmark/benchmark
|
29cc9f845d323431e3d40e878cbfc6d1aad1f260
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
refs/heads/master
| 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 411 |
py
|
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/6239.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,16,2)
tile(1,4,64,4)
tile(2,2,16,2)
tile(2,4,64,4)
tile(3,2,16,2)
tile(3,4,64,4)
|
[
"[email protected]"
] | |
522aad1fd4649f79eca749cf445bf4427de2ad6b
|
160a83456fd1d9c79b4995405ca8954c81f5ae47
|
/my_first_app_22679/settings.py
|
0ed661f02f55e690e3661bd29d401e7183d8f76d
|
[] |
no_license
|
crowdbotics-apps/my-first-app-22679
|
1aeaadde640feb814ecc0ec29812da7d2e5fff21
|
35ec5e6b1ea1f71fa546cfdf1061c8a9a54cd97e
|
refs/heads/master
| 2023-02-04T08:36:29.985627 | 2020-11-30T18:18:18 | 2020-11-30T18:18:18 | 313,410,622 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,779 |
py
|
"""
Django settings for my_first_app_22679 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
"storages",
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "my_first_app_22679.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "my_first_app_22679.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
and AWS_STORAGE_BUCKET_NAME
and AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"[email protected]"
] | |
9ca6187c5ae5548d7761cabf96c1ea07cc15a17e
|
c3432a248c8a7a43425c0fe1691557c0936ab380
|
/CodePlus_Practice_RE/2021.03.FourthWeek/0326/14395_4연산.py
|
08550d6f6d30ea04c95e486d19ef4fba1ed9b403
|
[] |
no_license
|
Parkyunhwan/BaekJoon
|
13cb3af1f45212d7c418ecc4b927f42615b14a74
|
9a882c568f991c9fed3df45277f091626fcc2c94
|
refs/heads/master
| 2022-12-24T21:47:47.052967 | 2022-12-20T16:16:59 | 2022-12-20T16:16:59 | 232,264,447 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,096 |
py
|
from collections import deque
from collections import defaultdict
s, t = map(int, input().split())
if s == t:
print(0)
exit(0)
q = deque()
q.append((s, []))
dic = defaultdict(int)
dic[s] += 1
while q:
curr, oper = q.popleft()
if curr == t:
print(''.join(oper))
exit(0)
if curr <= t and curr * curr <= 10e9:
if dic[curr * curr] == 0:
tmp = oper[:]
tmp.append('*')
dic[curr * curr] += 1
q.append((curr * curr, tmp))
if curr <= t and curr + curr <= 10e9:
if dic[curr + curr] == 0:
tmp = oper[:]
tmp.append('+')
dic[curr + curr] += 1
q.append((curr + curr, tmp))
if curr - curr >= 0:
if dic[curr - curr] == 0:
tmp = oper[:]
tmp.append('-')
dic[curr + curr] += 1
q.append((curr - curr, tmp))
if curr != 0:
if dic[curr / curr] == 0:
tmp = oper[:]
tmp.append('/')
dic[curr / curr] += 1
q.append((curr / curr, tmp))
print(-1)
|
[
"[email protected]"
] | |
5df625db90c027af35bc72069fa3e459870d71d7
|
2ffdd45472fc20497123bffc3c9b94d9fe8c9bc8
|
/venv/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py
|
e4d1f678d0a670183e0f08c27d748656a9bc2483
|
[] |
no_license
|
mbea-int/expense-tracker-app
|
fca02a45623e24ed20d201f69c9a892161141e0c
|
47db2c98ed93efcac5330ced2b98d2ca365e6017
|
refs/heads/master
| 2023-05-10T14:29:04.935218 | 2021-06-04T15:10:00 | 2021-06-04T15:10:00 | 373,816,157 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,056 |
py
|
import collections
import logging
from pip._vendor import six
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.exceptions import (
DistributionNotFound,
InstallationError,
UnsupportedPythonVersion,
UnsupportedWheel,
)
from pip._internal.models.wheel import Wheel
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.compatibility_tags import get_supported
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.misc import (
dist_in_site_packages,
dist_in_usersite,
get_installed_distributions,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.virtualenv import running_under_virtualenv
from .candidates import (
AlreadyInstalledCandidate,
EditableCandidate,
ExtrasCandidate,
LinkCandidate,
RequiresPythonCandidate,
)
from .requirements import (
ExplicitRequirement,
RequiresPythonRequirement,
SpecifierRequirement,
)
if MYPY_CHECK_RUNNING:
from typing import (
FrozenSet,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
)
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.packaging.version import _BaseVersion
from pip._vendor.pkg_resources import Distribution
from pip._vendor.resolvelib import ResolutionImpossible
from pip._internal.cache import CacheEntry, WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.link import Link
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.resolution.base import InstallRequirementProvider
from .base import Candidate, Requirement
from .candidates import BaseCandidate
C = TypeVar("C")
Cache = Dict[Link, C]
VersionCandidates = Dict[_BaseVersion, Candidate]
logger = logging.getLogger(__name__)
class Factory(object):
def __init__(
self,
finder, # type: PackageFinder
preparer, # type: RequirementPreparer
make_install_req, # type: InstallRequirementProvider
wheel_cache, # type: Optional[WheelCache]
use_user_site, # type: bool
force_reinstall, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
py_version_info=None, # type: Optional[Tuple[int, ...]]
lazy_wheel=False, # type: bool
):
# type: (...) -> None
self._finder = finder
self.preparer = preparer
self._wheel_cache = wheel_cache
self._python_candidate = RequiresPythonCandidate(py_version_info)
self._make_install_req_from_spec = make_install_req
self._use_user_site = use_user_site
self._force_reinstall = force_reinstall
self._ignore_requires_python = ignore_requires_python
self.use_lazy_wheel = lazy_wheel
self._link_candidate_cache = {} # type: Cache[LinkCandidate]
self._editable_candidate_cache = {} # type: Cache[EditableCandidate]
if not ignore_installed:
self._installed_dists = {
canonicalize_name(dist.project_name): dist
for dist in get_installed_distributions()
}
else:
self._installed_dists = {}
@property
def force_reinstall(self):
# type: () -> bool
return self._force_reinstall
def _make_candidate_from_dist(
self,
dist, # type: Distribution
extras, # type: FrozenSet[str]
template, # type: InstallRequirement
):
# type: (...) -> Candidate
base = AlreadyInstalledCandidate(dist, template, factory=self)
if extras:
return ExtrasCandidate(base, extras)
return base
def _make_candidate_from_link(
self,
link, # type: Link
extras, # type: FrozenSet[str]
template, # type: InstallRequirement
name, # type: Optional[str]
version, # type: Optional[_BaseVersion]
):
# type: (...) -> Candidate
# TODO: Check already installed candidate, and use it if the link and
# editable flag match.
if template.editable:
if link not in self._editable_candidate_cache:
self._editable_candidate_cache[link] = EditableCandidate(
link, template, factory=self, name=name, version=version
)
base = self._editable_candidate_cache[link] # type: BaseCandidate
else:
if link not in self._link_candidate_cache:
self._link_candidate_cache[link] = LinkCandidate(
link, template, factory=self, name=name, version=version
)
base = self._link_candidate_cache[link]
if extras:
return ExtrasCandidate(base, extras)
return base
def _iter_found_candidates(
self,
ireqs, # type: Sequence[InstallRequirement]
specifier, # type: SpecifierSet
):
# type: (...) -> Iterable[Candidate]
if not ireqs:
return ()
# The InstallRequirement implementation requires us to give it a
# "template". Here we just choose the first requirement to represent
# all of them.
# Hopefully the Project model can correct this mismatch in the future.
template = ireqs[0]
name = canonicalize_name(template.req.name)
hashes = Hashes()
extras = frozenset() # type: FrozenSet[str]
for ireq in ireqs:
specifier &= ireq.req.specifier
hashes |= ireq.hashes(trust_internet=False)
extras |= frozenset(ireq.extras)
# We use this to ensure that we only yield a single candidate for
# each version (the finder's preferred one for that version). The
# requirement needs to return only one candidate per version, so we
# implement that logic here so that requirements using this helper
# don't all have to do the same thing later.
candidates = collections.OrderedDict() # type: VersionCandidates
# Get the installed version, if it matches, unless the user
# specified `--force-reinstall`, when we want the version from
# the index instead.
installed_version = None
installed_candidate = None
if not self._force_reinstall and name in self._installed_dists:
installed_dist = self._installed_dists[name]
installed_version = installed_dist.parsed_version
if specifier.contains(installed_version, prereleases=True):
installed_candidate = self._make_candidate_from_dist(
dist=installed_dist, extras=extras, template=template
)
found = self._finder.find_best_candidate(
project_name=name, specifier=specifier, hashes=hashes
)
for ican in found.iter_applicable():
if ican.version == installed_version and installed_candidate:
candidate = installed_candidate
else:
candidate = self._make_candidate_from_link(
link=ican.link,
extras=extras,
template=template,
name=name,
version=ican.version,
)
candidates[ican.version] = candidate
# Yield the installed version even if it is not found on the index.
if installed_version and installed_candidate:
candidates[installed_version] = installed_candidate
return six.itervalues(candidates)
def find_candidates(self, requirements, constraint):
# type: (Sequence[Requirement], SpecifierSet) -> Iterable[Candidate]
explicit_candidates = set() # type: Set[Candidate]
ireqs = [] # type: List[InstallRequirement]
for req in requirements:
cand, ireq = req.get_candidate_lookup()
if cand is not None:
explicit_candidates.add(cand)
if ireq is not None:
ireqs.append(ireq)
# If none of the requirements want an explicit candidate, we can ask
# the finder for candidates.
if not explicit_candidates:
return self._iter_found_candidates(ireqs, constraint)
if constraint:
name = explicit_candidates.pop().name
raise InstallationError(
"Could not satisfy constraints for {!r}: installation from "
"path or url cannot be constrained to a version".format(name)
)
return (
c
for c in explicit_candidates
if all(req.is_satisfied_by(c) for req in requirements)
)
def make_requirement_from_install_req(self, ireq, requested_extras):
# type: (InstallRequirement, Iterable[str]) -> Optional[Requirement]
if not ireq.match_markers(requested_extras):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
ireq.name,
ireq.markers,
)
return None
if not ireq.link:
return SpecifierRequirement(ireq)
if ireq.link.is_wheel:
wheel = Wheel(ireq.link.filename)
if not wheel.supported(self._finder.target_python.get_tags()):
msg = "{} is not a supported wheel on this platform.".format(
wheel.filename
)
raise UnsupportedWheel(msg)
cand = self._make_candidate_from_link(
ireq.link,
extras=frozenset(ireq.extras),
template=ireq,
name=canonicalize_name(ireq.name) if ireq.name else None,
version=None,
)
return self.make_requirement_from_candidate(cand)
def make_requirement_from_candidate(self, candidate):
# type: (Candidate) -> ExplicitRequirement
return ExplicitRequirement(candidate)
def make_requirement_from_spec(
self,
specifier, # type: str
comes_from, # type: InstallRequirement
requested_extras=(), # type: Iterable[str]
):
# type: (...) -> Optional[Requirement]
ireq = self._make_install_req_from_spec(specifier, comes_from)
return self.make_requirement_from_install_req(ireq, requested_extras)
def make_requires_python_requirement(self, specifier):
# type: (Optional[SpecifierSet]) -> Optional[Requirement]
if self._ignore_requires_python or specifier is None:
return None
return RequiresPythonRequirement(specifier, self._python_candidate)
def get_wheel_cache_entry(self, link, name):
# type: (Link, Optional[str]) -> Optional[CacheEntry]
"""Look up the link in the wheel cache.
If ``preparer.require_hashes`` is True, don't use the wheel cache,
because cached wheels, always built locally, have different hashes
than the files downloaded from the index server and thus throw false
hash mismatches. Furthermore, cached wheels at present have
nondeterministic contents due to file modification times.
"""
if self._wheel_cache is None or self.preparer.require_hashes:
return None
return self._wheel_cache.get_cache_entry(
link=link, package_name=name, supported_tags=get_supported()
)
def get_dist_to_uninstall(self, candidate):
# type: (Candidate) -> Optional[Distribution]
# TODO: Are there more cases this needs to return True? Editable?
dist = self._installed_dists.get(candidate.name)
if dist is None: # Not installed, no uninstallation required.
return None
# We're installing into global site. The current installation must
# be uninstalled, no matter it's in global or user site, because the
# user site installation has precedence over global.
if not self._use_user_site:
return dist
# We're installing into user site. Remove the user site installation.
if dist_in_usersite(dist):
return dist
# We're installing into user site, but the installed incompatible
# package is in global site. We can't uninstall that, and would let
# the new user installation to "shadow" it. But shadowing won't work
# in virtual environments, so we error out.
if running_under_virtualenv() and dist_in_site_packages(dist):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to {} in {}".format(
dist.project_name, dist.location
)
)
return None
def _report_requires_python_error(
self,
requirement, # type: RequiresPythonRequirement
template, # type: Candidate
):
# type: (...) -> UnsupportedPythonVersion
message_format = (
"Package {package!r} requires a different Python: "
"{version} not in {specifier!r}"
)
message = message_format.format(
package=template.name,
version=self._python_candidate.version,
specifier=str(requirement.specifier),
)
return UnsupportedPythonVersion(message)
def get_installation_error(self, e):
# type: (ResolutionImpossible) -> InstallationError
assert e.causes, "Installation error reported with no cause"
# If one of the things we can't solve is "we need Python X.Y",
# that is what we report.
for cause in e.causes:
if isinstance(cause.requirement, RequiresPythonRequirement):
return self._report_requires_python_error(
cause.requirement, cause.parent
)
# Otherwise, we have a set of causes which can't all be satisfied
# at once.
# The simplest case is when we have *one* cause that can't be
# satisfied. We just report that case.
if len(e.causes) == 1:
req, parent = e.causes[0]
if parent is None:
req_disp = str(req)
else:
req_disp = "{} (from {})".format(req, parent.name)
logger.critical(
"Could not find a version that satisfies the requirement %s", req_disp
)
return DistributionNotFound(
"No matching distribution found for {}".format(req)
)
# OK, we now have a list of requirements that can't all be
# satisfied at once.
# A couple of formatting helpers
def text_join(parts):
# type: (List[str]) -> str
if len(parts) == 1:
return parts[0]
return ", ".join(parts[:-1]) + " and " + parts[-1]
def readable_form(cand):
# type: (Candidate) -> str
return "{} {}".format(cand.name, cand.version)
def describe_trigger(parent):
# type: (Candidate) -> str
ireq = parent.get_install_requirement()
if not ireq or not ireq.comes_from:
return "{} {}".format(parent.name, parent.version)
if isinstance(ireq.comes_from, InstallRequirement):
return str(ireq.comes_from.name)
return str(ireq.comes_from)
triggers = []
for req, parent in e.causes:
if parent is None:
# This is a root requirement, so we can report it directly
trigger = req.format_for_error()
else:
trigger = describe_trigger(parent)
triggers.append(trigger)
if triggers:
info = text_join(triggers)
else:
info = "the requested packages"
msg = (
"Cannot install {} because these package versions "
"have conflicting dependencies.".format(info)
)
logger.critical(msg)
msg = "\nThe conflict is caused by:"
for req, parent in e.causes:
msg = msg + "\n "
if parent:
msg = msg + "{} {} depends on ".format(parent.name, parent.version)
else:
msg = msg + "The user requested "
msg = msg + req.format_for_error()
msg = (
msg
+ "\n\n"
+ "To fix this you could try to:\n"
+ "1. loosen the range of package versions you've specified\n"
+ "2. remove package versions to allow pip attempt to solve "
+ "the dependency conflict\n"
)
logger.info(msg)
return DistributionNotFound(
"ResolutionImpossible: for help visit "
"https://pip.pypa.io/en/latest/user_guide/"
"#fixing-conflicting-dependencies"
)
|
[
"[email protected]"
] | |
955105e9389475e6367dc2e9b50e0e8ddacaa43e
|
45da24ad0793ced3ce4a332486877ebdd9776388
|
/app/main/docs/schemas/survey_result_schema.py
|
357b35a9532be93bc86d6d0ed3360f8eb8b6bc82
|
[] |
no_license
|
luccasPh/clean-python-api
|
2fce7003646613ad543b9e8e4afd77bd4b49a25e
|
bc10bdc485bbec1c02c73783109c178d887514f1
|
refs/heads/master
| 2023-03-24T11:30:40.380210 | 2021-03-19T19:11:24 | 2021-03-19T19:11:24 | 343,474,537 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 688 |
py
|
save_survey_result_schema = dict(
type="object", properties=dict(answer=dict(type="string")), required=["answer"]
)
load_survey_result_schema = dict(
type="object",
properties=dict(
survey_id=dict(type="string"),
question=dict(type="string"),
answers=dict(
type="array",
items=dict(
type="object",
properties=dict(
image=dict(type="string"),
answers=dict(type="string"),
count=dict(type="number"),
percent=dict(type="number"),
),
),
),
date=dict(type="string"),
),
)
|
[
"[email protected]"
] | |
3f864156c74165482ab3c52853b2afd55e46a6de
|
361cb7a3d4add399a57cfe95089ede75bc9d5dae
|
/Python/tdw/object_init_data.py
|
1500fbab8b47672c65f4110413b4b71c4cd206b0
|
[
"BSD-2-Clause"
] |
permissive
|
celich/tdw
|
e0dc52b590e572ce0b4e3a147daad656f2a47e77
|
c4c56436b418d3abdd4a3971a391bb119c34602e
|
refs/heads/master
| 2023-04-20T19:10:37.934408 | 2021-05-05T18:22:47 | 2021-05-05T18:22:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,699 |
py
|
from typing import Dict, List, Tuple
from tdw.tdw_utils import TDWUtils
from tdw.controller import Controller
from tdw.librarian import ModelLibrarian, ModelRecord
from tdw.py_impact import AudioMaterial, PyImpact, ObjectInfo
class TransformInitData:
"""
Basic initialization parameters for an object. Can be converted to and from a list of commands.
This is similar to [`Controller.get_add_object()`](controller.md) except that it includes more parameters.
"""
LIBRARIES: Dict[str, ModelLibrarian] = dict()
for _lib_file in ModelLibrarian.get_library_filenames():
LIBRARIES[_lib_file] = ModelLibrarian(_lib_file)
def __init__(self, name: str, library: str = "models_core.json", scale_factor: Dict[str, float] = None, position: Dict[str, float] = None, rotation: Dict[str, float] = None, kinematic: bool = False, gravity: bool = True):
"""
:param name: The name of the model.
:param library: The filename of the library containing the model's record.
:param scale_factor: The [scale factor](../api/command_api.md#scale_object).
:param position: The initial position. If None, defaults to: `{"x": 0, "y": 0, "z": 0`}.
:param rotation: The initial rotation as Euler angles or a quaternion. If None, defaults to: `{"w": 1, "x": 0, "y": 0, "z": 0}`
:param kinematic: If True, the object will be [kinematic](../api/command_api.md#set_kinematic_state).
:param gravity: If True, the object won't respond to [gravity](../api/command_api.md#set_kinematic_state).
"""
if position is None:
self.position = TDWUtils.VECTOR3_ZERO
else:
self.position = position
if rotation is None:
self.rotation = {"w": 1, "x": 0, "y": 0, "z": 0}
else:
self.rotation = rotation
if scale_factor is None:
self.scale_factor = {"x": 1, "y": 1, "z": 1}
else:
self.scale_factor = scale_factor
self.name = name
self.library = library
self.kinematic = kinematic
self.gravity = gravity
def get_commands(self) -> Tuple[int, List[dict]]:
"""
:return: Tuple: The ID of the object; a list of commands to create the object: `[add_object, rotate_object_to, scale_object, set_kinematic_state, set_object_collision_detection_mode]`
"""
record = self._get_record()
object_id = Controller.get_unique_id()
commands = [{"$type": "add_object",
"name": record.name,
"url": record.get_url(),
"scale_factor": record.scale_factor,
"position": self.position,
"category": record.wcategory,
"id": object_id}]
# The rotation is a quaternion.
if "w" in self.rotation:
commands.append({"$type": "rotate_object_to",
"rotation": self.rotation,
"id": object_id})
# The rotation is in Euler angles.
else:
commands.append({"$type": "rotate_object_to_euler_angles",
"euler_angles": self.rotation,
"id": object_id})
commands.extend([{"$type": "scale_object",
"scale_factor": self.scale_factor,
"id": object_id},
{"$type": "set_kinematic_state",
"id": object_id,
"is_kinematic": self.kinematic,
"use_gravity": self.gravity}])
# Kinematic objects must be continuous_speculative.
if self.kinematic:
commands.append({"$type": "set_object_collision_detection_mode",
"id": object_id,
"mode": "continuous_speculative"})
return object_id, commands
def _get_record(self) -> ModelRecord:
"""
:return: The model metadata record for this object.
"""
return TransformInitData.LIBRARIES[self.library].get_record(name=self.name)
class RigidbodyInitData(TransformInitData):
"""
A subclass of `TransformInitData`. Includes data and commands to set the mass and physic material of the object.
"""
def __init__(self, name: str, mass: float, dynamic_friction: float, static_friction: float, bounciness: float, library: str = "models_core.json", scale_factor: Dict[str, float] = None, position: Dict[str, float] = None, rotation: Dict[str, float] = None, kinematic: bool = False, gravity: bool = True):
"""
:param name: The name of the model.
:param library: The filename of the library containing the model's record.
:param scale_factor: The [scale factor](../api/command_api.md#scale_object).
:param position: The initial position. If None, defaults to: `{"x": 0, "y": 0, "z": 0`}.
:param rotation: The initial rotation as Euler angles or a quaternion. If None, defaults to: `{"w": 1, "x": 0, "y": 0, "z": 0}`
:param kinematic: If True, the object will be [kinematic](../api/command_api.md#set_kinematic_state).
:param gravity: If True, the object won't respond to [gravity](../api/command_api.md#set_kinematic_state).
:param mass: The mass of the object.
:param dynamic_friction: The [dynamic friction](../api/command_api.md#set_physic_material) of the object.
"""
super().__init__(name=name, library=library, scale_factor=scale_factor, position=position, rotation=rotation,
kinematic=kinematic, gravity=gravity)
self.mass = mass
self.dynamic_friction = dynamic_friction
self.static_friction = static_friction
self.bounciness = bounciness
def get_commands(self) -> Tuple[int, List[dict]]:
"""
:return: Tuple: The ID of the object; a list of commands to create the object: `[add_object, rotate_object_to, scale_object, set_kinematic_state, set_object_collision_detection_mode, set_mass, set_physic_material]`
"""
object_id, commands = super().get_commands()
# Set the mass and physic material.
commands.extend([{"$type": "set_mass",
"mass": self.mass,
"id": object_id},
{"$type": "set_physic_material",
"dynamic_friction": self.dynamic_friction,
"static_friction": self.static_friction,
"bounciness": self.bounciness,
"id": object_id}])
return object_id, commands
class AudioInitData(RigidbodyInitData):
"""
A subclass of `RigidbodyInitData` that includes [audio values](py_impact.md#objectinfo).
Physics values are derived from these audio values.
"""
_DYNAMIC_FRICTION = {AudioMaterial.ceramic: 0.47,
AudioMaterial.hardwood: 0.35,
AudioMaterial.wood: 0.35,
AudioMaterial.cardboard: 0.47,
AudioMaterial.glass: 0.65,
AudioMaterial.metal: 0.43}
_STATIC_FRICTION = {AudioMaterial.ceramic: 0.47,
AudioMaterial.hardwood: 0.4,
AudioMaterial.wood: 0.4,
AudioMaterial.cardboard: 0.47,
AudioMaterial.glass: 0.65,
AudioMaterial.metal: 0.52}
AUDIO = PyImpact.get_object_info()
def __init__(self, name: str, library: str = "models_core.json", scale_factor: Dict[str, float] = None, position: Dict[str, float] = None, rotation: Dict[str, float] = None, kinematic: bool = False, gravity: bool = True, audio: ObjectInfo = None):
"""
:param name: The name of the model.
:param library: The filename of the library containing the model's record.
:param scale_factor: The [scale factor](../api/command_api.md#scale_object).
:param position: The initial position. If None, defaults to: `{"x": 0, "y": 0, "z": 0`}.
:param rotation: The initial rotation as Euler angles or a quaternion. If None, defaults to: `{"w": 1, "x": 0, "y": 0, "z": 0}`
:param kinematic: If True, the object will be [kinematic](../api/command_api.md#set_kinematic_state).
:param gravity: If True, the object won't respond to [gravity](../api/command_api.md#set_kinematic_state).
:param audio: If None, derive physics data from the audio data in `PyImpact.get_object_info()` (if the object isn't in this dictionary, this constructor will throw an error). If not None, use these values instead of the default audio values.
"""
if audio is None:
self.audio = AudioInitData.AUDIO[name]
else:
self.audio = audio
super().__init__(name=name, library=library, scale_factor=scale_factor, position=position, rotation=rotation,
kinematic=kinematic, gravity=gravity, mass=self.audio.mass,
dynamic_friction=AudioInitData._DYNAMIC_FRICTION[self.audio.material],
static_friction=AudioInitData._STATIC_FRICTION[self.audio.material],
bounciness=self.audio.bounciness)
def get_commands(self) -> Tuple[int, List[dict]]:
"""
:return: Tuple: The ID of the object; a list of commands to create the object: `[add_object, rotate_object_to, scale_object, set_kinematic_state, set_object_collision_detection_mode, set_mass, set_physic_material]`
"""
return super().get_commands()
|
[
"[email protected]"
] | |
6e316a177a030e3c355047cc7aa62c2a13b91b5f
|
20a23e195cb41138ea46fae4773444c7d87a51f0
|
/homeassistant/components/tellduslive/config_flow.py
|
3373e9cc2f7ca74aebec0c7dc1b69d4c595525e3
|
[
"Apache-2.0"
] |
permissive
|
poma/home-assistant
|
37b11a5784a4e7e960bec7bf6ea1f41c66d834ee
|
203190f705fa280564734ba5a2281592ef535ed4
|
refs/heads/dev
| 2020-04-17T17:27:35.244055 | 2019-01-23T08:06:27 | 2019-01-23T08:06:27 | 166,782,737 | 1 | 0 |
Apache-2.0
| 2019-01-21T09:11:27 | 2019-01-21T09:11:24 | null |
UTF-8
|
Python
| false | false | 5,279 |
py
|
"""Config flow for Tellduslive."""
import asyncio
import logging
import os
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.util.json import load_json
from .const import (
APPLICATION_NAME, CLOUD_NAME, DOMAIN, KEY_HOST, KEY_SCAN_INTERVAL,
KEY_SESSION, NOT_SO_PRIVATE_KEY, PUBLIC_KEY, SCAN_INTERVAL,
TELLDUS_CONFIG_FILE)
KEY_TOKEN = 'token'
KEY_TOKEN_SECRET = 'token_secret'
_LOGGER = logging.getLogger(__name__)
@config_entries.HANDLERS.register('tellduslive')
class FlowHandler(config_entries.ConfigFlow):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Init config flow."""
self._hosts = [CLOUD_NAME]
self._host = None
self._session = None
self._scan_interval = SCAN_INTERVAL
def _get_auth_url(self):
from tellduslive import Session
self._session = Session(
public_key=PUBLIC_KEY,
private_key=NOT_SO_PRIVATE_KEY,
host=self._host,
application=APPLICATION_NAME,
)
return self._session.authorize_url
async def async_step_user(self, user_input=None):
"""Let user select host or cloud."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason='already_setup')
if user_input is not None or len(self._hosts) == 1:
if user_input is not None and user_input[KEY_HOST] != CLOUD_NAME:
self._host = user_input[KEY_HOST]
return await self.async_step_auth()
return self.async_show_form(
step_id='user',
data_schema=vol.Schema({
vol.Required(KEY_HOST):
vol.In(list(self._hosts))
}))
async def async_step_auth(self, user_input=None):
"""Handle the submitted configuration."""
errors = {}
if user_input is not None:
if await self.hass.async_add_executor_job(
self._session.authorize):
host = self._host or CLOUD_NAME
if self._host:
session = {
KEY_HOST: host,
KEY_TOKEN: self._session.access_token
}
else:
session = {
KEY_TOKEN: self._session.access_token,
KEY_TOKEN_SECRET: self._session.access_token_secret
}
return self.async_create_entry(
title=host, data={
KEY_HOST: host,
KEY_SCAN_INTERVAL: self._scan_interval.seconds,
KEY_SESSION: session,
})
else:
errors['base'] = 'auth_error'
try:
with async_timeout.timeout(10):
auth_url = await self.hass.async_add_executor_job(
self._get_auth_url)
if not auth_url:
return self.async_abort(reason='authorize_url_fail')
except asyncio.TimeoutError:
return self.async_abort(reason='authorize_url_timeout')
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error generating auth url")
return self.async_abort(reason='authorize_url_fail')
_LOGGER.debug('Got authorization URL %s', auth_url)
return self.async_show_form(
step_id='auth',
errors=errors,
description_placeholders={
'app_name': APPLICATION_NAME,
'auth_url': auth_url,
},
)
async def async_step_discovery(self, user_input):
"""Run when a Tellstick is discovered."""
from tellduslive import supports_local_api
_LOGGER.info('Discovered tellstick device: %s', user_input)
if supports_local_api(user_input[1]):
_LOGGER.info('%s support local API', user_input[1])
self._hosts.append(user_input[0])
return await self.async_step_user()
async def async_step_import(self, user_input):
"""Import a config entry."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason='already_setup')
self._scan_interval = user_input[KEY_SCAN_INTERVAL]
if user_input[KEY_HOST] != DOMAIN:
self._hosts.append(user_input[KEY_HOST])
if not await self.hass.async_add_executor_job(
os.path.isfile, self.hass.config.path(TELLDUS_CONFIG_FILE)):
return await self.async_step_user()
conf = await self.hass.async_add_executor_job(
load_json, self.hass.config.path(TELLDUS_CONFIG_FILE))
host = next(iter(conf))
if user_input[KEY_HOST] != host:
return await self.async_step_user()
host = CLOUD_NAME if host == 'tellduslive' else host
return self.async_create_entry(
title=host,
data={
KEY_HOST: host,
KEY_SCAN_INTERVAL: self._scan_interval.seconds,
KEY_SESSION: next(iter(conf.values())),
})
|
[
"[email protected]"
] | |
d5a3bdb7ab7125f19315ec09c924533d2f8e7815
|
c3a4658077c689710abf5ec846c8c59cbda16a51
|
/fbgemm_gpu/codegen/split_embedding_codegen_lookup_invoker.template
|
db1b96078a37023682a79053515d5dd67e25b31a
|
[
"BSD-3-Clause"
] |
permissive
|
jiecaoyu/FBGEMM
|
6a85c5d2e9ee75e2f62bf428332c83e0366703b3
|
2c547924deafa1839483d31096de800078c35711
|
refs/heads/main
| 2023-03-16T23:29:36.266634 | 2022-06-03T21:05:49 | 2022-06-03T21:05:49 | 237,500,435 | 0 | 0 |
NOASSERTION
| 2021-11-15T23:46:24 | 2020-01-31T19:21:59 | null |
UTF-8
|
Python
| false | false | 7,507 |
template
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from .lookup_args import *
{% if is_fbcode %}
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:split_table_batched_embeddings"
)
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/fb:embedding_inplace_update")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/fb:embedding_inplace_update_cpu")
{% else %}
#import os
#torch.ops.load_library(os.path.join(os.path.join(os.path.dirname(os.path.dirname(__file__)), "fbgemm_gpu_py.so")))
{% endif %}
def invoke(
common_args: CommonArgs,
optimizer_args: OptimizerArgs,
{% if "momentum1_dev" in args.split_function_arg_names %}
momentum1: Momentum,
{% endif %}
{% if "momentum2_dev" in args.split_function_arg_names %}
momentum2: Momentum,
{% endif %}
{% if "iter" in args.split_function_arg_names %}
iter: int,
{% endif %}
) -> torch.Tensor:
if (common_args.host_weights.numel() > 0):
return torch.ops.fbgemm.split_embedding_codegen_lookup_{{ optimizer }}_function_cpu(
# common_args
host_weights=common_args.host_weights,
weights_placements=common_args.weights_placements,
weights_offsets=common_args.weights_offsets,
D_offsets=common_args.D_offsets,
total_D=common_args.total_D,
max_D=common_args.max_D,
hash_size_cumsum=common_args.hash_size_cumsum,
total_hash_size_bits=common_args.total_hash_size_bits,
indices=common_args.indices,
offsets=common_args.offsets,
pooling_mode=common_args.pooling_mode,
indice_weights=common_args.indice_weights,
feature_requires_grad=common_args.feature_requires_grad,
# optimizer_args
gradient_clipping = optimizer_args.gradient_clipping,
max_gradient=optimizer_args.max_gradient,
stochastic_rounding=optimizer_args.stochastic_rounding,
{% if "learning_rate" in args.split_function_arg_names %}
learning_rate=optimizer_args.learning_rate,
{% endif %}
{% if "eps" in args.split_function_arg_names %}
eps=optimizer_args.eps,
{% endif %}
{% if "beta1" in args.split_function_arg_names %}
beta1=optimizer_args.beta1,
{% endif %}
{% if "beta2" in args.split_function_arg_names %}
beta2=optimizer_args.beta2,
{% endif %}
{% if "weight_decay" in args.split_function_arg_names %}
weight_decay=optimizer_args.weight_decay,
{% endif %}
{% if "weight_decay_mode" in args.split_function_arg_names %}
weight_decay_mode=optimizer_args.weight_decay_mode,
{% endif %}
{% if "eta" in args.split_function_arg_names %}
eta=optimizer_args.eta,
{% endif %}
{% if "momentum" in args.split_function_arg_names %}
momentum=optimizer_args.momentum,
{% endif %}
# momentum1
{% if "momentum1_dev" in args.split_function_arg_names %}
momentum1_host=momentum1.host,
momentum1_offsets=momentum1.offsets,
momentum1_placements=momentum1.placements,
{% endif %}
# momentum2
{% if "momentum2_dev" in args.split_function_arg_names %}
momentum2_host=momentum2.host,
momentum2_offsets=momentum2.offsets,
momentum2_placements=momentum2.placements,
{% endif %}
# iter
{% if "iter" in args.split_function_arg_names %}
iter=iter,
{% endif %}
)
else:
return torch.ops.fbgemm.split_embedding_codegen_lookup_{{ optimizer }}_function(
# common_args
{% if not dense %}
placeholder_autograd_tensor=common_args.placeholder_autograd_tensor,
{% endif %}
dev_weights=common_args.dev_weights,
uvm_weights=common_args.uvm_weights,
lxu_cache_weights=common_args.lxu_cache_weights,
weights_placements=common_args.weights_placements,
weights_offsets=common_args.weights_offsets,
D_offsets=common_args.D_offsets,
total_D=common_args.total_D,
max_D=common_args.max_D,
hash_size_cumsum=common_args.hash_size_cumsum,
total_hash_size_bits=common_args.total_hash_size_bits,
indices=common_args.indices,
offsets=common_args.offsets,
pooling_mode=common_args.pooling_mode,
indice_weights=common_args.indice_weights,
feature_requires_grad=common_args.feature_requires_grad,
lxu_cache_locations=common_args.lxu_cache_locations,
# optimizer_args
gradient_clipping = optimizer_args.gradient_clipping,
max_gradient=optimizer_args.max_gradient,
stochastic_rounding=optimizer_args.stochastic_rounding,
{% if "learning_rate" in args.split_function_arg_names %}
learning_rate=optimizer_args.learning_rate,
{% endif %}
{% if "eps" in args.split_function_arg_names %}
eps=optimizer_args.eps,
{% endif %}
{% if "beta1" in args.split_function_arg_names %}
beta1=optimizer_args.beta1,
{% endif %}
{% if "beta2" in args.split_function_arg_names %}
beta2=optimizer_args.beta2,
{% endif %}
{% if "weight_decay" in args.split_function_arg_names %}
weight_decay=optimizer_args.weight_decay,
{% endif %}
{% if "weight_decay_mode" in args.split_function_arg_names %}
weight_decay_mode=optimizer_args.weight_decay_mode,
{% endif %}
{% if "eta" in args.split_function_arg_names %}
eta=optimizer_args.eta,
{% endif %}
{% if "momentum" in args.split_function_arg_names %}
momentum=optimizer_args.momentum,
{% endif %}
# momentum1
{% if "momentum1_dev" in args.split_function_arg_names %}
momentum1_dev=momentum1.dev,
momentum1_uvm=momentum1.uvm,
momentum1_offsets=momentum1.offsets,
momentum1_placements=momentum1.placements,
{% endif %}
# momentum2
{% if "momentum2_dev" in args.split_function_arg_names %}
momentum2_dev=momentum2.dev,
momentum2_uvm=momentum2.uvm,
momentum2_offsets=momentum2.offsets,
momentum2_placements=momentum2.placements,
{% endif %}
# iter
{% if "iter" in args.split_function_arg_names %}
iter=iter,
{% endif %}
output_dtype=common_args.output_dtype,
)
|
[
"[email protected]"
] | |
1b37fa5b76c1aa2a32d1eb90b32391cad1c509f5
|
cc72013ede1b3bb02c32a3d0d199be4f7986c173
|
/ch10/DieViewColor.py
|
58b5529ab431c7372c00faa9de35170a11b1cd23
|
[] |
no_license
|
alextickle/zelle-exercises
|
b87d2a1476189954565f5cc97ee1448200eb00d4
|
b784ff9ed9b2cb1c56e31c1c63f3e2b52fa37875
|
refs/heads/master
| 2021-01-19T00:33:19.132238 | 2017-09-14T23:35:35 | 2017-09-14T23:35:35 | 87,182,609 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,375 |
py
|
# DieViewColor.py
from graphics import *
class DieViewColor:
def __init__(self, win, center, size):
self.win = win
self.background = "white" # color of die face
self.foreground = "black" # color of pips
self.psize = 0.1 * size # radius of each pip
hsize = size / 2.0
offset = 0.6 * hsize
cx, cy = center.getX(), center.getY()
p1 = Point(cx - hsize, cy - hsize)
p2 = Point(cx + hsize, cy + hsize)
rect = Rectangle(p1, p2)
rect.draw(win)
rect.setFill(self.background)
self.pip1 = self.__makePip(cx - offset, cy - offset)
self.pip2 = self.__makePip(cx - offset, cy)
self.pip3 = self.__makePip(cx - offset, cy + offset)
self.pip4 = self.__makePip(cx, cy)
self.pip5 = self.__makePip(cx + offset, cy - offset)
self.pip6 = self.__makePip(cx + offset, cy)
self.pip7 = self.__makePip(cx + offset, cy + offset)
self.setValue(1)
def __makePip(self, x, y):
"draws a pip at (x, y)"
pip = Circle(Point(x, y), self.psize)
pip.setFill(self.background)
pip.setOutline(self.background)
pip.draw(self.win)
return pip
def setColor(self, color):
self.foreground = color
def setValue(self, value):
# turn all pips off
self.pip1.setFill(self.background)
self.pip2.setFill(self.background)
self.pip3.setFill(self.background)
self.pip4.setFill(self.background)
self.pip5.setFill(self.background)
self.pip6.setFill(self.background)
self.pip7.setFill(self.background)
# turn correct pips on
if value == 1:
self.pip4.setFill(self.foreground)
elif value == 2:
self.pip1.setFill(self.foreground)
self.pip7.setFill(self.foreground)
elif value == 3:
self.pip1.setFill(self.foreground)
self.pip7.setFill(self.foreground)
self.pip4.setFill(self.foreground)
elif value == 4:
self.pip1.setFill(self.foreground)
self.pip3.setFill(self.foreground)
self.pip5.setFill(self.foreground)
self.pip7.setFill(self.foreground)
elif value == 5:
self.pip1.setFill(self.foreground)
self.pip3.setFill(self.foreground)
self.pip4.setFill(self.foreground)
self.pip5.setFill(self.foreground)
self.pip7.setFill(self.foreground)
elif value == 6:
self.pip1.setFill(self.foreground)
self.pip2.setFill(self.foreground)
self.pip3.setFill(self.foreground)
self.pip5.setFill(self.foreground)
self.pip6.setFill(self.foreground)
self.pip7.setFill(self.foreground)
|
[
"[email protected]"
] | |
5625481302911c3189bc36b544673943c69da279
|
4d327de5447519d3c00e6572f74362380783006f
|
/source/res/scripts/client/web_client_api/vehicles/__init__.py
|
95c17e15dd4bdcde0d9f7fef090313d9995c6676
|
[] |
no_license
|
XFreyaX/WorldOfTanks-Decompiled
|
706ac55d919b766aa89f90c97a75672bf2142611
|
5025466edd0dd3e5e50a6c60feb02ae793f6adac
|
refs/heads/master
| 2021-09-21T15:10:32.655452 | 2018-08-28T07:34:00 | 2018-08-28T07:34:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,142 |
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/web_client_api/vehicles/__init__.py
import nations
from items import vehicles
from web_client_api import w2c, w2capi, Field, W2CSchema
class _VehicleInfoSchema(W2CSchema):
vehicle_id = Field(type=(int, long))
@w2capi(name='vehicles', key='action')
class VehiclesWebApi(W2CSchema):
@w2c(_VehicleInfoSchema, 'vehicle_info')
def vehicleInfo(self, cmd):
try:
vehicle = vehicles.getVehicleType(cmd.vehicle_id)
except Exception:
res = {'error': 'vehicle_id is invalid.'}
else:
res = {'vehicle': {'vehicle_id': vehicle.compactDescr,
'tag': vehicle.name,
'name': vehicle.userString,
'short_name': vehicle.shortUserString,
'nation': nations.NAMES[vehicle.id[0]],
'type': vehicles.getVehicleClassFromVehicleType(vehicle),
'tier': vehicle.level,
'is_premium': bool('premium' in vehicle.tags)}}
return res
|
[
"[email protected]"
] | |
ac7c2e6ab2caeed5008a2a2f19cd2c660df5ee43
|
94d1e805521575afb7b6256af1dd6de65a50ada9
|
/problem_10/problem_10.py
|
8fd3318a4bddafb0d3830f22cb0c2895b05fdc79
|
[] |
no_license
|
John-W-Stevens/Euler100
|
fe2004786f64172e02ba18fbe33d95ceb68abf59
|
6f193a47e9e019b99ee9b188d2227587f5a3f4b3
|
refs/heads/master
| 2022-11-26T07:23:36.505138 | 2020-07-28T17:36:39 | 2020-07-28T17:36:39 | 274,224,709 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 262 |
py
|
import time
from prime_sieve import prime_sieve
def problem_10():
return sum(prime_sieve(2000000))
start = time.time()
solution = problem_10()
print(f"{solution} found in {time.time() - start} seconds.")
# 142913828922 found in 0.07372689247131348 seconds.
|
[
"[email protected]"
] | |
995600023a7f11305386d13654b30082fec5afdb
|
c9ece5470d98941a64e5253d17d56a135d89d735
|
/source/conf.py
|
bae026a6515a948f92df365243c821c56604b0ca
|
[
"MIT"
] |
permissive
|
ketgo/ml-notes
|
33d0f4cea17d1d12ac278fa1dc7afee37a737791
|
0351a798d36f5a698038e7f7741cc9d8ad881498
|
refs/heads/main
| 2023-05-15T05:52:21.157263 | 2021-06-15T14:25:47 | 2021-06-15T14:25:47 | 361,756,506 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,910 |
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Machine Learning Notes'
copyright = '2021, Ketan Goyal'
author = 'Ketan Goyal'
# The full version, including alpha/beta/rc tags
release = 'v1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_book_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
[
"[email protected]"
] | |
aacaad190f4ee572df306c9bcfe3bbcada4c13aa
|
aca209472c7288d69adf57124c197baf98c7a6e7
|
/OpenCV讀者資源/讀者資源/程式實例/ch17/ch17_17.py
|
53ef8f1ca63e66e9178e09f7833c2d6e7c58b0fd
|
[] |
no_license
|
Hank-Liao-Yu-Chih/document
|
712790325e48b9d8115d04b5cc2a90cd78431e61
|
fafe616678cd224e70936296962dcdbbf55e38b3
|
refs/heads/master
| 2022-09-22T12:40:33.284033 | 2022-09-08T00:33:41 | 2022-09-08T00:33:41 | 102,203,601 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,288 |
py
|
# ch17_17.py
import cv2
import numpy as np
src = cv2.imread('hand.jpg')
cv2.imshow("src",src)
src_gray = cv2.cvtColor(src,cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(src_gray,50,255,cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(binary,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
mask = np.zeros(src_gray.shape,np.uint8) # 建立遮罩
mask = cv2.drawContours(mask,[cnt],-1,(255,255,255),-1)
cv2.imshow("mask",mask)
# 在src_gray影像的mask遮罩區域找尋最大像素與最小像素值
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(src_gray,mask=mask)
print(f"最小像素值 = {minVal}")
print(f"最小像素值座標 = {minLoc}")
print(f"最大像素值 = {maxVal}")
print(f"最大像素值座標 = {maxLoc}")
cv2.circle(src,minLoc,20,[0,255,0],3) # 最小像素值用綠色圓
cv2.circle(src,maxLoc,20,[0,0,255],3) # 最大像素值用紅色圓
# 建立遮罩未來可以顯示此感興趣的遮罩區域
mask1 = np.zeros(src.shape,np.uint8) # 建立遮罩
mask1 = cv2.drawContours(mask1,[cnt],-1,(255,255,255),-1)
cv2.imshow("mask1",mask1)
dst = cv2.bitwise_and(src,mask1) # 顯示感興趣區域
cv2.imshow("dst",dst)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
ef3afca60f71a786bd2173251e5ebcc5770444c2
|
2496bd44a435dbc839b3687d17cc5efdbec8cbdc
|
/app/templates/app/spiders/politicl_whole_pages_spider.py
|
e57e404bb4207ac809ce933e6b2999cff647b108
|
[] |
no_license
|
trujunzhang/generator-djzhang-targets
|
e1b0655ef6a2e9f46f3d548268ab1657b7947d5e
|
395b7c3de11bb5104ff6e86672b290267949ec0f
|
refs/heads/master
| 2021-01-21T14:29:05.382901 | 2016-07-20T01:30:40 | 2016-07-20T01:30:40 | 59,209,227 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,822 |
py
|
# -*- coding: utf-8 -*-
import logging
import scrapy
class <%= appclassname%>sWatchSpider(scrapy.Spider):
name = "<%= appname%>_whole_pages"
def __init__(self, name=None, **kwargs):
from cw<%= appname%>.database_factory import DatabaseFactory, CollectionTypes
database_factory = DatabaseFactory(kwargs['host'], kwargs['port'],
kwargs['user'], kwargs['passwd'],
kwargs['db'], kwargs['collection_name'])
self._cache_db = database_factory.get_database(CollectionTypes.cache)
self._history_db = database_factory.get_database(CollectionTypes.history)
self._page_db = database_factory.get_database(CollectionTypes.page)
from cw<%= appname%>.spiders.dispatch.spider_whole_pages_dispatch import SpiderWholePageDispatch
self.whole_pages_dispatch = SpiderWholePageDispatch(self._page_db)
# Dynamic the domains and start url.
self.allowed_domains = self.whole_pages_dispatch.get_allowed_domains()
page_url = self.whole_pages_dispatch.get_next_page_url()
if page_url:
self.start_urls = [page_url]
else:
logging.debug("Not found the page currently, the schedulared task end!")
super(<%= appclassname%>sWatchSpider, self).__init__(name, **kwargs)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
return super(<%= appclassname%>sWatchSpider, cls).from_crawler(crawler,
args,
host=crawler.settings.get('SQL_HOST'),
port=crawler.settings.get('SQL_PORT'),
user=crawler.settings.get('SQL_USER'),
passwd=crawler.settings.get('SQL_PASSWD'),
db=crawler.settings.get('SQL_DB'),
collection_name=crawler.settings.get(
'SQL_COLLECTION_NAME')
)
# This methond is entry point
def parse(self, response):
# Step 1: parsing the pagination.
self.whole_pages_dispatch.parse_from_pagination(response.url, response, self._cache_db, self._history_db)
# Step 2: Check the next page from the page database.
url = self.whole_pages_dispatch.get_next_page_url()
if url:
pass
else:
logging.debug("Scraped the {} pages currently, the schedulared task end!".format(10))
|
[
"[email protected]"
] | |
d1c7d9c016e677c1750841c3f81b7d5f6137af08
|
d6ed05e23faa20beb5e47624870608a9219ea81c
|
/TuningTools_old/scripts/analysis_scripts/official/Trigger_201801XX_data17_v8/export_tuning.py
|
05a362cf393f6ed47403b90ba3e57f3436a5e7ff
|
[] |
no_license
|
kaducovas/ringer
|
f6495088c0d54d622dcc707333b4c2fbf132d65f
|
603311caab016ad0ef052ea4fcc605c5ac4e494b
|
refs/heads/master
| 2020-06-16T21:37:15.228364 | 2019-07-08T01:29:57 | 2019-07-08T01:29:57 | 195,477,531 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,368 |
py
|
#!/usr/bin/env python
from RingerCore import LoggingLevel, expandFolders, Logger, mkdir_p
from TuningTools import CrossValidStatAnalysis, RingerOperation
from pprint import pprint
import os
mainLogger = Logger.getModuleLogger( __name__ )
basepath = 'data/crossval/'
crossval = [
[basepath],
[basepath],
[basepath],
[basepath],
]
####################### Data 2017 #########################
# 25 bins
config = 5
ref = 'SP'
filenameWeights = [
'TrigL2CaloRingerElectronTightConstants',
'TrigL2CaloRingerElectronMediumConstants',
'TrigL2CaloRingerElectronLooseConstants',
'TrigL2CaloRingerElectronVeryLooseConstants',
]
filenameThres = [
'TrigL2CaloRingerElectronTightThresholds',
'TrigL2CaloRingerElectronMediumThresholds',
'TrigL2CaloRingerElectronLooseThresholds',
'TrigL2CaloRingerElectronVeryLooseThresholds',
]
####################### Extract Ringer Configuration #########################
from TuningTools import CreateSelectorFiles, TrigMultiVarHypo_v2
export = CreateSelectorFiles( model = TrigMultiVarHypo_v2(toPickle=True) )
export( crossval, filenameWeights, filenameThres, ref, config )
|
[
"[email protected]"
] | |
9be03fd3eaa84cb66043be2f7fc92759213a1f45
|
527f721ed6080c29f15e410672ef6c30e7f2dca1
|
/owllook/spiders/qidian_all_novels.py
|
f174be9ab66c80592fab0ca3181edb6825ea4f23
|
[
"Apache-2.0"
] |
permissive
|
PaulPaulYang/owllook
|
e06b7daddf2c2326cb6ceedd25ef669368637aa4
|
05e16b69466c1c69b12a195e28163df4f30b35d6
|
refs/heads/master
| 2021-04-09T10:17:59.049159 | 2018-03-13T00:21:36 | 2018-03-13T00:21:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,488 |
py
|
#!/usr/bin/env python
"""
Created by howie.hu at 25/02/2018.
Target URI: https://www.qidian.com/all
Param:?page=1
"""
from pymongo import MongoClient
from talonspider import Spider, Item, TextField, AttrField, Request
from talospider.utils import get_random_user_agent
class MongoDb:
_db = None
MONGODB = {
'MONGO_HOST': '127.0.0.1',
'MONGO_PORT': '',
'MONGO_USERNAME': '',
'MONGO_PASSWORD': '',
'DATABASE': 'owllook'
}
def client(self):
# motor
self.mongo_uri = 'mongodb://{account}{host}:{port}/'.format(
account='{username}:{password}@'.format(
username=self.MONGODB['MONGO_USERNAME'],
password=self.MONGODB['MONGO_PASSWORD']) if self.MONGODB['MONGO_USERNAME'] else '',
host=self.MONGODB['MONGO_HOST'] if self.MONGODB['MONGO_HOST'] else 'localhost',
port=self.MONGODB['MONGO_PORT'] if self.MONGODB['MONGO_PORT'] else 27017)
return MongoClient(self.mongo_uri)
@property
def db(self):
if self._db is None:
self._db = self.client()[self.MONGODB['DATABASE']]
return self._db
class QidianNovelsItem(Item):
target_item = TextField(css_select='ul.all-img-list>li')
novel_url = AttrField(css_select='div.book-img-box>a', attr='href')
novel_name = TextField(css_select='div.book-mid-info>h4')
novel_author = TextField(css_select='div.book-mid-info>p.author>a.name')
novel_author_home_url = AttrField(css_select='div.book-mid-info>p.author>a.name', attr='href')
def tal_novel_url(self, novel_url):
return 'http:' + novel_url
def tal_novel_author(self, novel_author):
if isinstance(novel_author, list):
novel_author = novel_author[0].text
return novel_author
def tal_novel_author_home_url(self, novel_author_home_url):
if isinstance(novel_author_home_url, list):
novel_author_home_url = novel_author_home_url[0].get('href').strip()
return 'http:' + novel_author_home_url
class QidianNovelsSpider(Spider):
start_urls = ['https://www.qidian.com/all?page=1']
headers = {
"User-Agent": get_random_user_agent()
}
set_mul = True
request_config = {
'RETRIES': 3,
'DELAY': 0,
'TIMEOUT': 10
}
all_novels_col = MongoDb().db.all_novels
def parse(self, res):
urls = ['https://www.qidian.com/all?page={i}'.format(i=i) for i in range(1, 41645)]
for url in urls:
headers = {
"User-Agent": get_random_user_agent()
}
yield Request(url, request_config=self.request_config, headers=headers, callback=self.parse_item)
def parse_item(self, res):
items_data = QidianNovelsItem.get_items(html=res.html)
for item in items_data:
data = {
'novel_url': item.novel_url,
'novel_name': item.novel_name,
'novel_author': item.novel_author,
'novel_author_home_url': item.novel_author_home_url,
'spider': 'qidian'
}
if self.all_novels_col.find_one({"novel_name": item.novel_name}) is None:
self.all_novels_col.insert_one(data)
print(item.novel_name + ' - 抓取成功')
if __name__ == '__main__':
# 其他多item示例:https://gist.github.com/howie6879/3ef4168159e5047d42d86cb7fb706a2f
QidianNovelsSpider.start()
|
[
"[email protected]"
] | |
173616fa9d03a512d598ed7c89c3962b5cf28731
|
97cb7589aeb1c5c473301b96ba1c4782608fe7a0
|
/backend/eleven11_2189/urls.py
|
f9bfe9afd5639d03eb87d2f687f87ada287e8751
|
[] |
no_license
|
crowdbotics-apps/eleven11-2189
|
4e39bf581c43904cdd9638690309e3569ab724af
|
6ef42908a47a96444ef2db70a3838504b642e9f8
|
refs/heads/master
| 2022-12-07T17:57:14.979964 | 2019-04-11T19:09:53 | 2019-04-11T19:09:53 | 180,860,812 | 0 | 1 | null | 2022-12-06T15:23:48 | 2019-04-11T19:09:49 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,050 |
py
|
"""eleven11_2189 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
admin.site.site_header = 'eleven11'
admin.site.site_title = 'eleven11 Admin Portal'
admin.site.index_title = 'eleven11 Admin'
|
[
"[email protected]"
] | |
f038988a23b7f81f82ce721941fcced27304b394
|
51a0cc11641f572749a23148df5ef735494f979b
|
/tinysync/persistence.py
|
a2494c4cc66f2fb3fb548fdf4905944cb2eb6aa9
|
[
"Unlicense"
] |
permissive
|
mikaelho/tinysync
|
f6e02acc61119b88c6f1d38a3d7be261294d9d44
|
70197b8d90723d4793f3039f305241f986430d75
|
refs/heads/master
| 2020-01-23T21:42:15.110098 | 2019-12-11T13:17:37 | 2019-12-11T13:17:37 | 74,687,703 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,523 |
py
|
import json
import importlib
from collections.abc import MutableMapping
from contextlib import contextmanager
from copy import deepcopy
import dictdiffer
from tinysync.util import *
@contextmanager
def do_not_track(obj):
previous_value = obj._tracker.handler.track
obj._tracker.handler.track = False
yield
obj._tracker.handler.track = previous_value
@contextmanager
def do_not_save(obj):
previous_value = obj._tracker.handler.save_changes
obj._tracker.handler.save_changes = False
yield
obj._tracker.handler.save_changes = previous_value
class Persistence():
def load(self):
"""Load whole structure from persistence provider."""
def load_specific(self, key):
"""Load a specific part of the structure, indicated by key."""
def change_advisory(self, change):
"""Information about a change."""
def dump(to_save, handler, conflict_callback, initial=False):
"""Persist the given structure.
Initial save may be different in some cases.
Returns a list of conflicts from the persistence layer or None if no conflicts.
Conflicts are reported as a list of (path, new value) tuples. """
class AbstractFile(Persistence):
file_format = 'abstract'
def __init__(self, filename):
self.format = format if format else self.default_format
self.filename = filename + '.' + self.file_format
def load(self):
try:
with open(self.filename, encoding='utf-8') as fp:
return self.loader(fp)
except (EOFError, FileNotFoundError):
return None
def load_specific(self, key):
""" For file-based persistence, key is ignored,
thus in effect identical to calling load().
"""
return self.load()
def dump(self, to_save, handler=None, conflict_callback=None, initial=False):
with open(self.filename, 'w', encoding='utf-8') as fp:
self.dumper(to_save, fp)
class SafeYamlFile(AbstractFile):
file_format = 'yaml'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
globals()['yaml'] = importlib.import_module('yaml')
def loader(self, fp):
return yaml.safe_load(fp)
def dumper(self, to_save, fp):
class TrackerSafeDumper(yaml.SafeDumper):
def represent_data(self, data):
if hasattr(data, '__subject__'):
data = data.__subject__
return super().represent_data(data)
yaml.dump(
to_save, fp,
default_flow_style=False,
allow_unicode=True,
Dumper=TrackerSafeDumper)
class LazyPersistence(Persistence):
"""Persistence options that assume that the
structure starts with a dict, and separate parts of the structure can be updated and loaded by
key, instead of the whole structure.
Subclass __init__ functions should set a self.db
value to be used in the other operations.
"""
def __init__(self):
self.changed_keys = self.deleted_keys = set()
def change_advisory(self, change):
assert hasattr(change.root, '__getitem__')
if len(change.path) == 0:
if change.func_name == '__setitem__':
self.changed_keys.add(change.args[0])
if change.func_name == '__delitem__':
self.deleted_keys.add(change.args[0])
else:
self.changed_keys.add(change.path[0])
class JsonDBM(LazyPersistence):
def __init__(self, filename):
super().__init__()
globals()['dbm'] = importlib.import_module('dbm')
self.filename = filename + '.dbm'
self.db = dbm.open(self.filename, 'n')
def __del__(self):
self.db.close()
def load(self):
try:
if len(self.db) == 0:
return None
return_value = {}
for key in self.db:
return_value[key.decode()] = LazyLoadMarker()
return return_value
except (EOFError, FileNotFoundError):
return None
def load_specific(self, key):
return json.loads(self.db[key].decode())
def dump(self, to_save, handler=None, conflict_callback=None, initial=False):
assert hasattr(to_save, '__getitem__')
if initial:
self.changed_keys = (key for key in to_save)
for key in self.changed_keys:
self.db[key] = json.dumps(to_save[key])
self.changed_keys = set()
for key in self.deleted_keys:
del self.db[key]
self.deleted_keys = set()
class CouchDB(LazyPersistence):
""" Save structure to CouchDB, or a variant
like Cloudant.
Root must be a dict, likewise the elements
contained in the root dict, which are further
polluted by CouchDB `_id` and `_rev` elements
(where _id == key in the root dict).
"""
server_address = None
def __init__(self, database_url):
""" Initializes a CouchDB persistence provider, with the assumption that one provider corresponds to one CouchDB database and one Python data structure.
Parameters:
* `database` parameter is either:
* a plain CouchDB database name
* a url that starts with 'http' and includes the database name, e.g.: https://username:[email protected]/database.
If only the database name is defined, connection is made to the default 'localhost:5984' with no authentication. If only database name is defined and the class-level `url` attribute is also defined, the two are combined.
"""
super().__init__()
globals()['couchdb'] = importlib.import_module('couchdb')
if not database_url.startswith('http') and self.server_address is not None:
import urllib.parse
database_url = urllib.parse.urljoin(self.server_address, database_url)
self.name = database_url.split('/')[-1]
server_url = database_url[:-len(self.name)]
self.server = couchdb.Server(couchdb.client.DEFAULT_BASE_URL if server_url == '' else server_url)
try:
self.db = self.server[self.name]
except couchdb.http.ResourceNotFound:
self.db = self.server.create(self.name)
self.last_known_good = {}
def load(self):
if len(self.db) == 0:
return None
return_value = {}
for key in self.db:
return_value[key] = LazyLoadMarker()
return return_value
def load_specific(self, key):
loaded = dict(self.db[key])
self.last_known_good[key] = loaded
return loaded
#del doc['_id']
#self.revs[key] = doc['_rev']
#del doc['_rev']
#return doc
def dump(self, to_save, handler=None, conflict_callback=None, initial=False):
assert hasattr(to_save, '__getitem__')
eprint('changed', self.changed_keys)
# Must not trigger new saves to remote
with do_not_save(to_save):
conflicts = []
if initial:
self.changed_keys = (key for key in to_save)
for key in self.changed_keys:
eprint('saving', key)
doc = to_save[key]
assert isinstance(doc, MutableMapping)
try:
with do_not_track(to_save):
doc['_id'] = key
(_, rev) = self.db.save(deepcopy(doc))
doc['_rev'] = rev
except couchdb.ResourceConflict:
self.handle_conflict(to_save, key, doc, conflict_callback)
self.changed_keys = set()
for key in self.deleted_keys:
doc = to_save[key]
doc['_id'] = key
try:
self.db.delete(key)
except couchdb.ResourceConflict:
return self.add_to_conflicts(conflicts, key)
self.deleted_keys = set()
def handle_conflict(self, to_save, key, local_doc, conflict_callback):
last_good = self.last_known_good.get(key, {})
remote_doc = self.load_specific(key)
last_rev = remote_doc.pop('_rev')
with do_not_track(to_save):
local_doc.pop('_rev', None)
remote_diff = list(dictdiffer.diff(last_good, remote_doc))
local_diff = list(dictdiffer.diff(last_good, local_doc))
one_way = dictdiffer.patch(remote_diff, last_good)
one_way = dictdiffer.patch(local_diff, one_way)
other_way = dictdiffer.patch(local_diff, last_good)
other_way = dictdiffer.patch(remote_diff, other_way)
eprint(one_way, other_way)
if one_way == other_way:
eprint('resolved')
one_way['_rev'] = last_rev
self.db.save(one_way)
with do_not_track(to_save):
to_save[key] = one_way
else:
eprint('no resolve')
remote_doc['_rev'] = last_rev
to_save[key] = remote_doc
def clean(self):
""" Convenience function that deletes the underlying CouchDB database. """
self.server.delete(self.name)
|
[
"[email protected]"
] | |
0706c7aeac1ba92e470ffd84b8d64a2767b38f86
|
e5a511e346f5be8a82fe9cb2edf457aa7e82859c
|
/Python/cppsecrets.com/program 51.py
|
c8ba027af76fa15a1501d402fc650146b31d90eb
|
[] |
no_license
|
nekapoor7/Python-and-Django
|
8397561c78e599abc8755887cbed39ebef8d27dc
|
8fa4d15f4fa964634ad6a89bd4d8588aa045e24f
|
refs/heads/master
| 2022-10-10T20:23:02.673600 | 2020-06-11T09:06:42 | 2020-06-11T09:06:42 | 257,163,996 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 180 |
py
|
"""Python Program to Find the Second Largest Number in a List"""
list1 = list(map(int,input().split()))
seclarge = max(ele for ele in list1 if ele != max(list1))
print(seclarge)
|
[
"[email protected]"
] | |
f744e68c36103e0914079f70b40c162e9e20f715
|
b6c7ff1b2f048d3523b591ae56227be88f701b2c
|
/preprocess_gisaid.py
|
10f50d0ea9f1371d72612ea6d263407fcf59b8de
|
[
"Apache-2.0"
] |
permissive
|
majagarbulinska/pyro-cov
|
98ef7fc9716692fccb4d9028c81c1e4e47f49e8e
|
fdbd37843618a3269b24430b8e66536583773046
|
refs/heads/master
| 2023-08-23T05:02:17.030999 | 2021-11-02T14:40:33 | 2021-11-02T14:40:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,255 |
py
|
# Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import argparse
import datetime
import json
import logging
import os
import pickle
import warnings
from collections import Counter, defaultdict
from pyrocov import pangolin
from pyrocov.geo import gisaid_normalize
from pyrocov.mutrans import START_DATE
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(relativeCreated) 9d %(message)s", level=logging.INFO)
DATE_FORMATS = {4: "%Y", 7: "%Y-%m", 10: "%Y-%m-%d"}
def parse_date(string):
fmt = DATE_FORMATS.get(len(string))
if fmt is None:
# Attempt to fix poorly formated dates like 2020-09-1.
parts = string.split("-")
parts = parts[:1] + [f"{int(p):>02d}" for p in parts[1:]]
string = "-".join(parts)
fmt = DATE_FORMATS[len(string)]
return datetime.datetime.strptime(string, fmt)
FIELDS = ["virus_name", "accession_id", "collection_date", "location", "add_location"]
def main(args):
logger.info(f"Filtering {args.gisaid_file_in}")
if not os.path.exists(args.gisaid_file_in):
raise OSError(f"Missing {args.gisaid_file_in}; you may need to request a feed")
os.makedirs("results", exist_ok=True)
columns = defaultdict(list)
stats = defaultdict(Counter)
covv_fields = ["covv_" + key for key in FIELDS]
with open(args.gisaid_file_in) as f:
for i, line in enumerate(f):
# Optimize for faster reading.
line, _ = line.split(', "sequence": ', 1)
line += "}"
# Filter out bad data.
datum = json.loads(line)
if len(datum["covv_collection_date"]) < 7:
continue # Drop rows with no month information.
date = parse_date(datum["covv_collection_date"])
if date < args.start_date:
date = args.start_date # Clip rows before start date.
lineage = datum["covv_lineage"]
if lineage in (None, "None", "", "XA"):
continue # Drop rows with unknown or ambiguous lineage.
try:
lineage = pangolin.compress(lineage)
lineage = pangolin.decompress(lineage)
assert lineage
except (ValueError, AssertionError) as e:
warnings.warn(str(e))
continue
# Fix duplicate locations.
datum["covv_location"] = gisaid_normalize(datum["covv_location"])
# Collate.
columns["lineage"].append(lineage)
for covv_key, key in zip(covv_fields, FIELDS):
columns[key].append(datum[covv_key])
columns["day"].append((date - args.start_date).days)
# Aggregate statistics.
stats["date"][datum["covv_collection_date"]] += 1
stats["location"][datum["covv_location"]] += 1
stats["lineage"][lineage] += 1
if i % args.log_every == 0:
print(".", end="", flush=True)
if i >= args.truncate:
break
num_dropped = i + 1 - len(columns["day"])
logger.info(f"dropped {num_dropped}/{i+1} = {num_dropped/(i+1)/100:0.2g}% rows")
logger.info(f"saving {args.columns_file_out}")
with open(args.columns_file_out, "wb") as f:
pickle.dump(dict(columns), f)
logger.info(f"saving {args.stats_file_out}")
with open(args.stats_file_out, "wb") as f:
pickle.dump(dict(stats), f)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Preprocess GISAID data")
parser.add_argument("--gisaid-file-in", default="results/gisaid.json")
parser.add_argument("--columns-file-out", default="results/gisaid.columns.pkl")
parser.add_argument("--stats-file-out", default="results/gisaid.stats.pkl")
parser.add_argument("--subset-file-out", default="results/gisaid.subset.tsv")
parser.add_argument("--subset-dir-out", default="results/fasta")
parser.add_argument("--start-date", default=START_DATE)
parser.add_argument("-l", "--log-every", default=1000, type=int)
parser.add_argument("--truncate", default=int(1e10), type=int)
args = parser.parse_args()
args.start_date = parse_date(args.start_date)
main(args)
|
[
"[email protected]"
] | |
138fac8d3869d2983222c3b9d013138ed4ff6bbd
|
9834a788f25842496a5a19f9c6fb49a6123fb131
|
/neo/SmartContract/tests/test_notify_event.py
|
bf8b305348a30f3803098657da6a62c6949144a4
|
[
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
jseagrave21/neo-python
|
c35d62c5f25d78ddf024180028e312b1c71ee85a
|
84c84784daafdd11a8da125c5a8704ab04d874a8
|
refs/heads/development
| 2021-06-14T09:00:59.269994 | 2019-02-17T18:15:02 | 2019-02-17T18:15:02 | 139,907,465 | 0 | 5 |
MIT
| 2022-09-19T15:46:29 | 2018-07-05T22:40:49 |
Python
|
UTF-8
|
Python
| false | false | 9,832 |
py
|
from unittest import TestCase
from neo.SmartContract.SmartContractEvent import SmartContractEvent, NotifyEvent
from neocore.UInt160 import UInt160
from neocore.UInt256 import UInt256
from neocore.BigInteger import BigInteger
from neo.IO.MemoryStream import StreamManager
from neocore.IO.BinaryWriter import BinaryWriter
from neo.SmartContract.ContractParameter import ContractParameter, ContractParameterType
class EventTestCase(TestCase):
contract_hash = UInt160(data=bytearray(b'\x11\xc4\xd1\xf4\xfb\xa6\x19\xf2b\x88p\xd3n:\x97s\xe8tp['))
event_tx = UInt256(data=bytearray(b'\x90\xe4\xf1\xbbb\x8e\xf1\x07\xde\xe9\xf0\xd2\x12\xd1w\xbco\x844\x07=\x1b\xa7\x1f\xa7\x94`\x0b\xb4\x88|K'))
addr_to = b')\x96S\xb5\xe3e\xcb3\xb4\xea:\xd1\xd7\xe1\xb3\xf5\xe6\x81N/'
addr_from = b'4\xd0=k\x80TF\x9e\xa8W\x83\xfa\x9eIv\x0b\x9bs\x9d\xb6'
def test_1_serialize_runtime_log(self):
sc = SmartContractEvent(SmartContractEvent.RUNTIME_LOG, ContractParameter(ContractParameterType.Array, []), self.contract_hash, 99999, self.event_tx, True, False)
stream = StreamManager.GetStream()
writer = BinaryWriter(stream)
sc.Serialize(writer)
out = bytes(stream.getvalue())
self.assertEqual(out, b'\x19SmartContract.Runtime.Log\x11\xc4\xd1\xf4\xfb\xa6\x19\xf2b\x88p\xd3n:\x97s\xe8tp[\x9f\x86\x01\x00\x90\xe4\xf1\xbbb\x8e\xf1\x07\xde\xe9\xf0\xd2\x12\xd1w\xbco\x844\x07=\x1b\xa7\x1f\xa7\x94`\x0b\xb4\x88|K')
StreamManager.ReleaseStream(stream)
new_event = SmartContractEvent.FromByteArray(out)
self.assertEqual(new_event.event_type, sc.event_type)
self.assertEqual(new_event.contract_hash, sc.contract_hash)
self.assertEqual(new_event.test_mode, sc.test_mode)
self.assertEqual(new_event.tx_hash, sc.tx_hash)
self.assertEqual(new_event.block_number, sc.block_number)
def test_2_serialize_notify_no_payload(self):
sc = SmartContractEvent(SmartContractEvent.RUNTIME_NOTIFY, ContractParameter(ContractParameterType.Array, []), self.contract_hash, 99, self.event_tx, True, False)
stream = StreamManager.GetStream()
writer = BinaryWriter(stream)
sc.Serialize(writer)
out = bytes(stream.getvalue())
self.assertEqual(out, b'\x1cSmartContract.Runtime.Notify\x11\xc4\xd1\xf4\xfb\xa6\x19\xf2b\x88p\xd3n:\x97s\xe8tp[c\x00\x00\x00\x90\xe4\xf1\xbbb\x8e\xf1\x07\xde\xe9\xf0\xd2\x12\xd1w\xbco\x844\x07=\x1b\xa7\x1f\xa7\x94`\x0b\xb4\x88|K')
StreamManager.ReleaseStream(stream)
new_event = SmartContractEvent.FromByteArray(out)
self.assertEqual(new_event.event_type, sc.event_type)
self.assertEqual(new_event.contract_hash, sc.contract_hash)
self.assertEqual(new_event.test_mode, sc.test_mode)
self.assertEqual(new_event.tx_hash, sc.tx_hash)
self.assertEqual(new_event.block_number, sc.block_number)
def test_2_serialize_single_notify_payload(self):
sc = NotifyEvent(SmartContractEvent.RUNTIME_NOTIFY, ContractParameter(ContractParameterType.Array, [ContractParameter(ContractParameterType.String, b'hello')]), self.contract_hash, 99, self.event_tx, True, False)
stream = StreamManager.GetStream()
writer = BinaryWriter(stream)
sc.Serialize(writer)
out = bytes(stream.getvalue())
self.assertEqual(out, b'\x1cSmartContract.Runtime.Notify\x11\xc4\xd1\xf4\xfb\xa6\x19\xf2b\x88p\xd3n:\x97s\xe8tp[c\x00\x00\x00\x90\xe4\xf1\xbbb\x8e\xf1\x07\xde\xe9\xf0\xd2\x12\xd1w\xbco\x844\x07=\x1b\xa7\x1f\xa7\x94`\x0b\xb4\x88|K\x05hello')
StreamManager.ReleaseStream(stream)
new_event = SmartContractEvent.FromByteArray(out)
self.assertEqual(new_event.event_type, sc.event_type)
self.assertEqual(new_event.contract_hash, sc.contract_hash)
self.assertEqual(new_event.test_mode, sc.test_mode)
self.assertEqual(new_event.tx_hash, sc.tx_hash)
self.assertEqual(new_event.block_number, sc.block_number)
self.assertEqual(new_event.notify_type, b'hello')
self.assertEqual(new_event.AddressFrom, None)
self.assertEqual(new_event.AddressTo, None)
self.assertEqual(new_event.Amount, 0)
self.assertEqual(new_event.is_standard_notify, False)
def test_3_serialize_single_transfer_notify_payload(self):
sc = NotifyEvent(SmartContractEvent.RUNTIME_NOTIFY, ContractParameter(ContractParameterType.Array, [ContractParameter(ContractParameterType.String, b'transfer')]), self.contract_hash, 99, self.event_tx, True, False)
stream = StreamManager.GetStream()
writer = BinaryWriter(stream)
sc.Serialize(writer)
out = bytes(stream.getvalue())
StreamManager.ReleaseStream(stream)
new_event = SmartContractEvent.FromByteArray(out)
self.assertEqual(new_event.event_type, sc.event_type)
self.assertEqual(new_event.contract_hash, sc.contract_hash)
self.assertEqual(new_event.test_mode, sc.test_mode)
self.assertEqual(new_event.tx_hash, sc.tx_hash)
self.assertEqual(new_event.block_number, sc.block_number)
self.assertEqual(new_event.notify_type, b'transfer')
self.assertEqual(new_event.AddressFrom, None)
self.assertEqual(new_event.AddressTo, None)
self.assertEqual(new_event.Amount, 0)
self.assertEqual(new_event.is_standard_notify, False)
self.assertEqual(new_event.ShouldPersist, False)
def test_4_serialize_full_transfer_notify_payload(self):
payload = ContractParameter(ContractParameterType.Array, [
ContractParameter(ContractParameterType.String, b'transfer'),
ContractParameter(ContractParameterType.ByteArray, self.addr_to),
ContractParameter(ContractParameterType.ByteArray, self.addr_from),
ContractParameter(ContractParameterType.Integer, 123000)
])
sc = NotifyEvent(SmartContractEvent.RUNTIME_NOTIFY, payload, self.contract_hash, 91349, self.event_tx, True, False)
stream = StreamManager.GetStream()
writer = BinaryWriter(stream)
sc.Serialize(writer)
out = bytes(stream.getvalue())
StreamManager.ReleaseStream(stream)
new_event = SmartContractEvent.FromByteArray(out)
self.assertEqual(new_event.event_type, sc.event_type)
self.assertEqual(new_event.contract_hash, sc.contract_hash)
self.assertEqual(new_event.test_mode, sc.test_mode)
self.assertEqual(new_event.tx_hash, sc.tx_hash)
self.assertEqual(new_event.block_number, sc.block_number)
self.assertEqual(new_event.notify_type, b'transfer')
self.assertEqual(new_event.AddressTo, 'ALb8FEhEmtSqv97fuNVuoLmcmrSKckffRf')
self.assertEqual(new_event.AddressFrom, 'AKZmSGPD7ytJBbxpRPmobYGLNxdWH3Jiqs')
self.assertEqual(new_event.Amount, 123000)
self.assertEqual(new_event.is_standard_notify, True)
def test_5_serialize_full_refund_payload(self):
payload = ContractParameter(ContractParameterType.Array, [
ContractParameter(ContractParameterType.String, b'refund'),
ContractParameter(ContractParameterType.ByteArray, self.addr_to),
ContractParameter(ContractParameterType.Integer, 123000)
])
sc = NotifyEvent(SmartContractEvent.RUNTIME_NOTIFY, payload, self.contract_hash, 91349, self.event_tx, True, False)
stream = StreamManager.GetStream()
writer = BinaryWriter(stream)
sc.Serialize(writer)
out = bytes(stream.getvalue())
StreamManager.ReleaseStream(stream)
new_event = SmartContractEvent.FromByteArray(out)
self.assertEqual(new_event.event_type, sc.event_type)
self.assertEqual(new_event.contract_hash, sc.contract_hash)
self.assertEqual(new_event.test_mode, sc.test_mode)
self.assertEqual(new_event.tx_hash, sc.tx_hash)
self.assertEqual(new_event.block_number, sc.block_number)
self.assertEqual(new_event.notify_type, b'refund')
self.assertEqual(new_event.AddressTo, 'AKZmSGPD7ytJBbxpRPmobYGLNxdWH3Jiqs')
self.assertEqual(new_event.addr_from, sc.contract_hash)
self.assertEqual(new_event.Amount, 123000)
self.assertEqual(new_event.is_standard_notify, True)
def test_6_serialize_full_approve_payload(self):
payload = ContractParameter(ContractParameterType.Array, [
ContractParameter(ContractParameterType.String, b'approve'),
ContractParameter(ContractParameterType.ByteArray, self.addr_to),
ContractParameter(ContractParameterType.ByteArray, self.addr_from),
ContractParameter(ContractParameterType.ByteArray, b'x\xe0\x01')
])
sc = NotifyEvent(SmartContractEvent.RUNTIME_NOTIFY, payload, self.contract_hash, 91349, self.event_tx, True, False)
stream = StreamManager.GetStream()
writer = BinaryWriter(stream)
sc.Serialize(writer)
out = bytes(stream.getvalue())
StreamManager.ReleaseStream(stream)
new_event = SmartContractEvent.FromByteArray(out)
self.assertEqual(new_event.event_type, sc.event_type)
self.assertEqual(new_event.contract_hash, sc.contract_hash)
self.assertEqual(new_event.test_mode, sc.test_mode)
self.assertEqual(new_event.tx_hash, sc.tx_hash)
self.assertEqual(new_event.block_number, sc.block_number)
self.assertEqual(new_event.notify_type, b'approve')
self.assertEqual(new_event.AddressFrom, 'AKZmSGPD7ytJBbxpRPmobYGLNxdWH3Jiqs')
self.assertEqual(new_event.AddressTo, 'ALb8FEhEmtSqv97fuNVuoLmcmrSKckffRf')
self.assertEqual(new_event.Amount, 123000)
self.assertEqual(new_event.is_standard_notify, True)
self.assertEqual(new_event.ShouldPersist, True)
|
[
"[email protected]"
] | |
a5423765373db001d6e72a7896e8a2040d1b9c3a
|
7fbf91c595f3adb67e29ab879a0b215581d260bf
|
/知识点/04-LiaoXueFeng-master/08-function.py
|
b768bd1aef41bc822d278fa44ad012d0abe7dc69
|
[] |
no_license
|
Randyedu/python
|
69947b3836e62d0081d92591ae2acd9a54eadb9a
|
5f9e7bec295ae05eadde0f661e7039c2bd08f725
|
refs/heads/master
| 2021-04-26T22:20:22.555128 | 2018-03-02T07:01:27 | 2018-03-02T07:01:27 | 124,074,741 | 1 | 0 | null | 2018-03-06T12:23:42 | 2018-03-06T12:23:42 | null |
UTF-8
|
Python
| false | false | 3,389 |
py
|
print(abs(0))
print(max(1,4,5,6))
print(int('2322'))
print(int(13.23))
print(int(float('13.98')))
print(float('23.24'))
print(str(121))
print(bool(1))
print(bool(0))
print(bool(-1))
print(bool(''))
# 函数名其实就是指向一个函数对象的引用,完全可以把函数名赋给一个变量
# 相当于给这个函数起了一个“别名”
a = abs # 变量a指向abs函数
print(a(-1)) # 所以也可以通过a调用abs函数
n1 = 255
n2 = 1000
print(hex(n1))
print(hex(n2))
'''
在Python中,定义一个函数要使用def语句,依次写出函数名、括号、括号中的参数和冒号:,然后,在缩进块中编写函数体,函数的返回值用return语句返回。
'''
def my_abs(x):
# 数据类型检查可以用内置函数isinstance()实现
if not isinstance(x, (int, float)):
raise TypeError('bad operand type')
if x >= 0:
return x
else:
return -x
print(my_abs(-9.7))
import math
def move(x, y, step, angle = 0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx, ny
x, y = move(100,100,60,math.pi/6)
print(x,y)
# 原来返回值是一个tuple
r = move(100,100,60,math.pi/6)
print(r)
print(r[0],r[1])
def quadratic(a,b,c):
B = (b * b - 4 * a * c)
if B >= 0:
ans1 = (-1 * b + math.sqrt(B)) / (2 * a)
ans2 = (-1 * b - math.sqrt(B)) / (2 * a)
return ans1,ans2
print(quadratic(2,3,1))
print(quadratic(1,3,-4))
def power(x,n=2):
s = 1
while n > 0:
n = n - 1
s = s * x
return s
print(power(5))
print(power(15))
print(power(5,4))
def add_end(L=[]):
L.append('END')
return L
print(add_end([1,2,3]))
print(add_end())
print(add_end())
print(add_end())
def addend(L=None):
if L is None:
L = []
L.append('END')
return L
print(addend())
print(addend())
print(addend())
def calc(numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc([1,2,3,4,5]))
def calc2(* numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc2(12,32,32,42))
nums = [1,2,3]
print(calc2(nums[0],nums[1],nums[2]))
print(calc2(*nums))
def person(name,age,**kw):
if 'city' in kw:
print('has city', '==',end = '')
if 'job' in kw:
print('has job', '==',end = '')
print('name:',name,'age:',age,'other:',kw)
person('Min',30)
person('Bob',35,city='Beijing')
person('Bob',35,city='Beijing',job='Ern')
extra = {'city':'Beijing','job':'Eng'}
person('Jack',24,**extra)
def fact(n):
if n == 1:
return 1
return n * fact(n-1)
print(fact(5))
print(fact(100))
print(fact(100))
# 解决递归调用栈溢出的方法是通过尾递归优化,事实上尾递归和循环的效果是一样的,所以,把循环看成是一种特殊的尾递归函数也是可以的。
# 尾递归是指,在函数返回的时候,调用自身本身,并且,return语句不能包含表达式。
# 这样,编译器或者解释器就可以把尾递归做优化,使递归本身无论调用多少次,都只占用一个栈帧,不会出现栈溢出的情况。
def fec(n):
return fac_iter(n,1)
def fac_iter(num,product):
if num==1:
return product
return fac_iter(num-1,num*product)
print(fact(6))
def move(n,a,b,c):
if n==1:
print(a,'-->', c)
else:
move(n-1,a,c,b) #move the n-1 from a to b
move(1,a,b,c) #now,a has just one dish,so just move it to c
move(n-1,b,a,c) #now,move the n-1 dishes from b to c
move(4,'A','B','C')
|
[
"[email protected]"
] | |
8709663ccf77b94eea9421338c940157369ddfd2
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_9/plybla001/question2.py
|
8bf37650e344a8354441a930101703d6225fb85c
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 497 |
py
|
"""Format Lines
B.Player
11/05/2014"""
import textwrap
#Variables
inName=input("Enter the input filename:\n")
outName=input("Enter the output filename:\n")
width=eval(input("Enter the line width:\n"))
paras=[]
i=0
inFile=open(inName,'r')
data=inFile.read()
inFile.close()
paras=data.split("\n\n")
outFile=open(outName,'w')
for para in paras:
text=textwrap.wrap(para,width)
for line in text:
print(line,file=outFile)
print("",file=outFile)
outFile.close()
|
[
"[email protected]"
] | |
ef1fcbe676a4d21f2e0203d263d60155be7e015b
|
b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb
|
/samples/openapi3/client/3_0_3_unit_test/python-experimental/unit_test_api/model/uniqueitems_validation.py
|
93644ef0e5d68fe6ea17ddaf07330762ffafefac
|
[
"Apache-2.0"
] |
permissive
|
FallenRiteMonk/openapi-generator
|
f8b98940219eecf14dc76dced4b0fbd394522aa3
|
b6576d11733ecad6fa4a0a616e1a06d502a771b7
|
refs/heads/master
| 2023-03-16T05:23:36.501909 | 2022-09-02T01:46:56 | 2022-09-02T01:46:56 | 164,609,299 | 0 | 0 |
Apache-2.0
| 2019-01-08T09:08:56 | 2019-01-08T09:08:56 | null |
UTF-8
|
Python
| false | false | 1,878 |
py
|
# coding: utf-8
"""
openapi 3.0.3 sample spec
sample spec for testing openapi functionality, built from json schema tests for draft6 # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
class UniqueitemsValidation(
schemas.AnyTypeSchema,
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
class MetaOapg:
additional_properties = schemas.AnyTypeSchema
unique_items = True
def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties:
# dict_instance[name] accessor
if not hasattr(self.MetaOapg, 'properties') or name not in self.MetaOapg.properties.__annotations__:
return super().__getitem__(name)
try:
return super().__getitem__(name)
except KeyError:
return schemas.unset
def __new__(
cls,
*args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
) -> 'UniqueitemsValidation':
return super().__new__(
cls,
*args,
_configuration=_configuration,
**kwargs,
)
|
[
"[email protected]"
] | |
048e3031fe6ef4f1e0319fdb0ea5bfdbbc6db368
|
b6472217400cfce4d12e50a06cd5cfc9e4deee1f
|
/sites/top/api/rest/JipiaoAgentorderSuccessRequest.py
|
676b4a43ce63c3e49f0980160f008cd5cbf13a17
|
[] |
no_license
|
topwinner/topwinner
|
2d76cab853b481a4963826b6253f3fb0e578a51b
|
83c996b898cf5cfe6c862c9adb76a3d6a581f164
|
refs/heads/master
| 2021-01-22T22:50:09.653079 | 2012-08-26T19:11:16 | 2012-08-26T19:11:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 320 |
py
|
'''
Created by auto_sdk on 2012-08-26 16:43:44
'''
from top.api.base import RestApi
class JipiaoAgentorderSuccessRequest(RestApi):
def __init__(self,domain,port):
RestApi.__init__(self,domain, port)
self.order_id = None
self.success_info = None
def getapiname(self):
return 'taobao.jipiao.agentorder.success'
|
[
"[email protected]"
] | |
57d3d0f895c149d41283f64b93f30b73b305c96c
|
1db7398d89e70b20bc1d0f0b401c49588d14afc7
|
/processor/process.py
|
bde5f1301cc13b014b81fd6003c1a29ea5343ee2
|
[] |
no_license
|
ypsprimer/3d-segmentaion
|
4e676e0c2981baaf1fee4269cfab852e415699aa
|
387d3e813652ab634e0f1dbf162b0cb7acc7d86d
|
refs/heads/master
| 2023-02-17T12:48:49.536512 | 2021-01-06T09:17:30 | 2021-01-06T09:17:30 | 327,245,107 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 196 |
py
|
from utils import timerecord
class Process(object):
def __init__(self):
pass
@timerecord
def start(self):
pass
@timerecord
def validate(self):
pass
|
[
"[email protected]"
] | |
c8fec17e5b347c8c924de7503f6a568f1749dc32
|
11ff14c118240e87c4804d0373e4656d0683d479
|
/test_case/test_setup_set_number_SSID_Y1.py
|
81870cae736291ee784a668fa5ff6ce79adf2f7f
|
[] |
no_license
|
wxmmavis/OS3.1
|
e3028d9c79d5a1a17449fea6380fcdda902bdec7
|
26d954344207a82d2298821c3c4f01302393dc7e
|
refs/heads/master
| 2020-03-25T20:07:11.225493 | 2018-08-13T03:20:57 | 2018-08-13T03:20:57 | 144,115,963 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,783 |
py
|
# -*- coding: utf-8 -*-
import configparser
import logging
import os
import time
import pytest
#########################
# import module
#########################
import sys
import conftest
sys.path.append("..")
import modules.login_router
import modules.initialize
from modules.login_router import *
from modules.initialize_new import *
from tools import *
#########################
from selenium import webdriver
lr = login_router()
setup = initialize()
t = tools()
projectpath = os.path.dirname(os.getcwd())
caseFail = projectpath + '/errorpng/caseFail/'
test_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
config_file = projectpath + '/configure/' + 'testconfig.ini'
filename = os.path.basename(__file__).split('.')[0]
t.log(filename)
config = configparser.ConfigParser()
config.read(config_file, encoding='UTF-8')
default_ip = config.get('Default', 'default_ip')
default_pw = config.get('Default', 'default_pw')
test_ssid = config.get('SSID', 'ssid_number')
logging.info(__file__)
def setSSID(driver):
if setup.setssid(driver, test_ssid) == 1:
if setup.initialize_pw(driver, default_pw) == 1:
return setup.complete(driver)
class Test_Initialize_SSID:
def setup(self):
conftest.browser()
self.driver = conftest.driver
# self.driver = webdriver.Chrome()
self.driver.maximize_window()
if lr.open_url(self.driver, 'http://' + default_ip) == 2:
if setup.homepage(self.driver) == 1:
pass
def teardown(self):
self.driver.close()
self.driver.quit()
def test_set_number_SSID(self):
print(u'测试设置初始化ssid为纯数字')
assert setSSID(self.driver) == 1
if __name__ == '__main__':
pytest.main(os.path.basename(__file__))
|
[
"[email protected]"
] | |
afcf3ac520a9802ad38358b8327d26de164327aa
|
26f78ba56388765f2fe2dc8fa23ddea097209ec5
|
/Leetcode/动态规划/474. 一和零.py
|
dd727fe7fd84c669fd8a61dffce5cfc26c80993c
|
[] |
no_license
|
johnkle/FunProgramming
|
3ef2ff32a1a378e1c780138ec9bab630c9ba83c7
|
a60e0d17a1e9f0bc1959d7a95737fc4a0362d735
|
refs/heads/master
| 2023-07-18T16:05:56.493458 | 2021-09-08T19:01:19 | 2021-09-08T19:01:19 | 402,861,226 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 399 |
py
|
#0-1背包,继续练习
class Solution:
def findMaxForm(self, strs, m, n):
dp = [[0]*(n+1) for _ in range(m+1)]
dp[0][0] = 0
for s in strs:
s0 = s.count('0')
s1 = s.count('1')
for i in range(m,s0-1,-1):
for j in range(n,s1-1,-1):
dp[i][j] = max(dp[i][j],dp[i-s0][j-s1]+1)
return dp[-1][-1]
|
[
"[email protected]"
] | |
8d859693c383abf39b50819dc10746d43d512812
|
d5ba475a6a782b0eed5d134b66eb8c601c41421c
|
/terrascript/data/cobbler.py
|
f4a918a0abcb0709ac72a20c788773778d0400b9
|
[
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
amlodzianowski/python-terrascript
|
ab42a06a5167e53ad8093b656a9bf14a03cb031d
|
142b1a4d1164d1012ac8865d12fdcc72f1e7ae75
|
refs/heads/master
| 2021-05-19T11:59:47.584554 | 2020-03-26T07:13:47 | 2020-03-26T07:13:47 | 251,688,045 | 0 | 0 |
BSD-2-Clause
| 2020-03-31T18:00:22 | 2020-03-31T18:00:22 | null |
UTF-8
|
Python
| false | false | 65 |
py
|
# terrascript/data/cobbler.py
import terrascript
__all__ = []
|
[
"[email protected]"
] | |
6efc9f01d813d133e726173325dab5542ec82946
|
18c886d2c325094a93c33b4c526adb0ad8490033
|
/backend/src/file_storage_system/urls.py
|
d6372f51bd2c39983e5440a0df82842e7449aa34
|
[
"MIT"
] |
permissive
|
Rezwanul-Haque/file-storage-system
|
32d9c6636cf928b9056db142aa7fd307da308f51
|
677023d99098df7609f807463d4c7cea20390b5c
|
refs/heads/master
| 2020-08-10T03:00:38.171912 | 2019-10-12T19:56:27 | 2019-10-12T19:56:27 | 214,240,133 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 719 |
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView
from .routers import router
urlpatterns = [
path('', TemplateView.as_view(template_name=settings.FRONTEND_PATH + '/index.html'), name='Home'),
path('admin/', admin.site.urls),
path('api/v1/', include(router.urls)),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
df73632f1a9dcabdbb2986891435e09b7daa2d9f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02659/s217242852.py
|
074a9193bfc0ab04d84dca7db41c625c18df6ec3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 81 |
py
|
a,b=[i for i in input().split()]
a=int(a)
b=round(float(b)*100)
print(a*b//100)
|
[
"[email protected]"
] | |
95a1259734b02079f64fbd0116caec9a887bbf13
|
8015f1c62a2cb4efd21aa8938336913bf8117868
|
/bamap/ba3961.pngMap.py
|
8aea15f00aeac1a928f75f7f25fd41caf0236335
|
[] |
no_license
|
GamerNoTitle/Beepers-and-OLED
|
675b5e3c179df0f0e27b42bf594c43860d03b9af
|
afe1340e5394ae96bda5f9022a8a66824368091e
|
refs/heads/master
| 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,468 |
py
|
ba3961.pngMap = [
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111010000000000000010011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000101111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111100111111111111111111111111111111111111000000000000000011111111111111111111111111111111110001111111111111111111',
'11111111111111000011111111111111111111111111111111111111000000000000000011111111111111111111111111111111111110000011111111111111',
'11111111111010000011111111111111111111111111111111111110000000000000000001111111111111111111111111111111111110100001111111111111',
'11111111110000111111111111111111111111111111111000000000000000000000000000000010111111111111111111111111111111100000001111111111',
'11111111100001111111111111111111111111111111110000000000000000000000000000000000011111111111111111111111111111111100000111111111',
'11111110000101111111111111111111111111111111000000000000000000000000000000000000000011111111111111111111111111111111000001111111',
'11111000001111111111111111111111111111111110000000000000000000000000000000000000000001111111111111111111111111111111100000111111',
'11110000001111111111111111111111111111111000000000000000000000000000000000000000000000111111111111111111111111111111110000001111',
'11100000111111111111111111111111111111111100000000000000000000000000000000000000000000111111111111111111111111111111111100001111',
]
|
[
"[email protected]"
] | |
86e973af71e61872eeb8ac5d60cae1f947c5e04d
|
0a74687347990348d798d4647c6bcfaa61b8e901
|
/mysite/migrations/0005_auto_20161128_2215.py
|
848a7d1d6547177f7882986f759bfdc7b8448955
|
[] |
no_license
|
bufubaoni/learndjango
|
7b0db8c291c8306615d223cf4ca0c190c0fc074a
|
a4a750eb65f684d189d98b92310e749e9e51f07f
|
refs/heads/master
| 2021-01-18T22:30:04.911193 | 2016-12-06T14:19:40 | 2016-12-06T14:19:40 | 72,538,496 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 657 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-28 14:15
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import river.models.fields.state
class Migration(migrations.Migration):
dependencies = [
('mysite', '0004_remove_customuser_mobile'),
]
operations = [
migrations.AlterField(
model_name='mymodel',
name='state',
field=river.models.fields.state.StateField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='states', to='river.State', verbose_name='\u72b6\u6001'),
),
]
|
[
"[email protected]"
] | |
41669d5377e34fa82c2deb81c173293e150f926a
|
ddd82d37cea1981d053acda347b654bd6ad44655
|
/medium/balanced_brackets.py
|
b2d42ccf7a9a65f5a158961104ca36f9c0788e75
|
[] |
no_license
|
jriall/algoexpert
|
e675c73f3005effc6026eeaa20e59d92de06d3b1
|
76ab8dd7f446fb46ad3742c376b46ad7d65f35cb
|
refs/heads/main
| 2023-06-22T10:37:28.988383 | 2021-07-18T16:21:13 | 2021-07-18T16:21:13 | 359,125,430 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,148 |
py
|
# Balanced Brackets
# Write a function that takes in a string made up of brackets ((, [, {, ), ],
# and }) and other optional characters. The function should return a boolean
# representing whether the string is balanced with regards to brackets.
# A string is said to be balanced if it has as many opening brackets of a
# certain type as it has closing brackets of that type and if no bracket is
# unmatched. Note that an opening bracket can't match a corresponding closing
# bracket that comes before it, and similarly, a closing bracket can't match a
# corresponding opening bracket that comes after it. Also, brackets can't
# overlap each other as in [(]).
# Sample Input
# string = "([])(){}(())()()"
# Sample Output
# true // it's balanced
# Solution
def balanced_brackets(string):
left_brackets = []
brackets = {')': '(', '}': '{', ']': '['}
for char in string:
if char in brackets.values():
left_brackets.append(char)
elif char in brackets.keys():
if len(left_brackets) and left_brackets[-1] == brackets[char]:
left_brackets.pop()
else:
return False
return not bool(len(left_brackets))
|
[
"[email protected]"
] | |
d59b1696d9b316d38a13becca0d19ec5d48f1ff8
|
80e152f49b355b3e07faaab6b468ca8dda6aa097
|
/python/tkinter-mvc/view.py
|
0937cd75dfe2b0985fe9fe1c4dcba39686577eb6
|
[] |
no_license
|
Pitrified/snippet
|
13ad9222f584570b10abb23a122b010b088eb366
|
1d7e5657014b00612cde87b78d5506a9e8b6adfc
|
refs/heads/master
| 2023-05-25T16:14:42.133900 | 2023-04-19T18:20:32 | 2023-04-19T18:20:32 | 174,192,523 | 2 | 0 | null | 2023-05-01T23:48:48 | 2019-03-06T17:47:16 |
Python
|
UTF-8
|
Python
| false | false | 758 |
py
|
import logging
import tkinter as tk
from side_panel import SidePanel
from plot_panel import PlotPanel
class View:
def __init__(self, root):
log = logging.getLogger(f"c.{__name__}.init")
log.info("Start init")
self.root = root
# setup grid for root
self.root.grid_rowconfigure(0, weight=1)
self.root.grid_columnconfigure(0, weight=1)
self.root.grid_columnconfigure(1, weight=0)
# create children widget
self.plot_panel = PlotPanel(self.root, bg="SeaGreen1")
self.side_panel = SidePanel(self.root, bg="dark orange")
# grid children widget
self.plot_panel.grid(row=0, column=0, sticky="nsew")
self.side_panel.grid(row=0, column=1, sticky="nsew")
|
[
"[email protected]"
] | |
0badc9960b65fd791bebaa445d38fe065c983323
|
e748e6d96aace1c9149327f384e0de07d743715a
|
/arcade/python/fixResult.py
|
413e64fd1cb385c4f5500c3c5feff061bc0588f6
|
[] |
no_license
|
jorzel/codefights
|
cdfc4cb32261b064ffc605bfd927bf237885b5d2
|
28b62a2ae3809f0eb487198044c0fe74be09d4e8
|
refs/heads/master
| 2022-04-28T06:54:26.170503 | 2022-03-23T22:22:20 | 2022-03-23T22:22:20 | 110,818,719 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 970 |
py
|
"""
Easy
Recovery
100
Your teacher asked you to implement a function that calculates the Answer to the Ultimate Question of Life, the Universe, and Everything and returns it as an array of integers. After several hours of hardcore coding you managed to write such a function, and it produced a quite reasonable result. However, when you decided to compare your answer with results of your classmates, you discovered that the elements of your result are roughly 10 times greater than the ones your peers got.
You don't have time to investigate the problem, so you need to implement a function that will fix the given array for you. Given result, return an array of the same length, where the ith element is equal to the ith element of result with the last digit dropped.
Example
For result = [42, 239, 365, 50], the output should be
fixResult(result) = [4, 23, 36, 5].
"""
def fixResult(result):
def fix(x):
return x / 10
return map(fix, result)
|
[
"[email protected]"
] | |
33a5e14279e69788ae7b7410ebb3830620626b14
|
d7f223ec944de8ef95304cb3db50be4e46e0d6e5
|
/unusual/unusual/settings.py
|
2e320c5edbb28e9cf46ecb645db09029bffce948
|
[] |
no_license
|
shashisp/mh
|
f90fb13db4951656dbcc7fa2330ce229e5b4d8fb
|
01fa451cbd5b7a3080edbc03608997b00a2bfc12
|
refs/heads/master
| 2016-09-06T09:33:25.887683 | 2015-05-13T21:40:13 | 2015-05-13T21:40:13 | 34,963,230 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,515 |
py
|
"""
Django settings for unusual project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from config import *
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v@h8$i4!3u2$un&it*ix(_l*&f$@#iu%vid*wb@fnn*l04vyu2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'campaigns',
'tastypie',
'social.apps.django_app.default',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'unusual.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'unusual.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_TWITTER_KEY = 'update me'
SOCIAL_AUTH_TWITTER_SECRET = 'update me'
|
[
"[email protected]"
] | |
b41f5d1985b55fb07976eb105e68b0c7ea27bfd3
|
d3f448d238b435b48d8f27f17a34b3e39a70dc29
|
/python-client/test/test_pay_outs_bankwire_api.py
|
2f9c2ce64dc7aaa4547dae3eb1e7e642fe99e264
|
[] |
no_license
|
pedroguirao/swagger
|
1fc29b6d9bcc193bf8ce85f6d8a6074f4c37150d
|
5ffea6203b5fcd3f201c2ede76d354302a6fb0ee
|
refs/heads/master
| 2020-06-07T16:15:08.659567 | 2019-06-21T07:51:49 | 2019-06-21T07:51:49 | 193,055,538 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 952 |
py
|
# coding: utf-8
"""
MarketPay API
API for Smart Contracts and Payments # noqa: E501
OpenAPI spec version: v2.01
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.pay_outs_bankwire_api import PayOutsBankwireApi # noqa: E501
from swagger_client.rest import ApiException
class TestPayOutsBankwireApi(unittest.TestCase):
"""PayOutsBankwireApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.pay_outs_bankwire_api.PayOutsBankwireApi() # noqa: E501
def tearDown(self):
pass
def test_pay_outs_bankwire_get(self):
"""Test case for pay_outs_bankwire_get
"""
pass
def test_pay_outs_bankwire_post(self):
"""Test case for pay_outs_bankwire_post
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
9dfc8507b84a9ac6b1fa0dfd07a24ad87dbdb2a9
|
0dddc0508138396c740901be4a0f9eebefb8fded
|
/ax/utils/sensitivity/tests/test_sensitivity.py
|
41e9271145555e51ade46bd6ed3fdc41d2ec0bd8
|
[
"MIT"
] |
permissive
|
facebook/Ax
|
473beb143016f95f4ec381ed1bd95b32c1ca31f8
|
6443cee30cbf8cec290200a7420a3db08e4b5445
|
refs/heads/main
| 2023-09-01T09:29:13.684709 | 2023-08-31T21:49:30 | 2023-08-31T21:49:30 | 169,880,381 | 2,207 | 315 |
MIT
| 2023-09-14T21:26:51 | 2019-02-09T15:23:44 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 14,244 |
py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import cast
from unittest.mock import patch, PropertyMock
import torch
from ax.modelbridge.base import ModelBridge
from ax.modelbridge.registry import Models
from ax.modelbridge.torch import TorchModelBridge
from ax.models.torch.botorch import BotorchModel
from ax.utils.common.testutils import TestCase
from ax.utils.sensitivity.derivative_gp import posterior_derivative
from ax.utils.sensitivity.derivative_measures import GpDGSMGpMean, GpDGSMGpSampling
from ax.utils.sensitivity.sobol_measures import (
_get_input_dimensionality,
_get_model_per_metric,
ax_parameter_sens,
compute_sobol_indices_from_model_list,
ProbitLinkMean,
SobolSensitivityGPMean,
SobolSensitivityGPSampling,
)
from ax.utils.testing.core_stubs import get_branin_experiment
from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel, GPyTorchModel
from botorch.models.model_list_gp_regression import ModelListGP
from gpytorch.distributions import MultivariateNormal
from torch import Tensor
def get_modelbridge(modular: bool = False) -> ModelBridge:
exp = get_branin_experiment(with_batch=True)
exp.trials[0].run()
return (Models.BOTORCH_MODULAR if modular else Models.BOTORCH)(
# Model bridge kwargs
experiment=exp,
data=exp.fetch_data(),
)
class SensitivityAnanlysisTest(TestCase):
def setUp(self) -> None:
self.model = get_modelbridge().model.model
def testDgsmGpMean(self) -> None:
bounds = torch.tensor([(0.0, 1.0) for _ in range(2)]).t()
sensitivity_mean = GpDGSMGpMean(self.model, bounds=bounds, num_mc_samples=10)
gradients_measure = sensitivity_mean.gradient_measure()
gradients_absolute_measure = sensitivity_mean.gradient_absolute_measure()
gradients_square_measure = sensitivity_mean.gradients_square_measure()
self.assertIsInstance(gradients_measure, Tensor)
self.assertIsInstance(gradients_absolute_measure, Tensor)
self.assertIsInstance(gradients_square_measure, Tensor)
self.assertEqual(gradients_measure.shape, torch.Size([2]))
self.assertEqual(gradients_absolute_measure.shape, torch.Size([2]))
self.assertEqual(gradients_square_measure.shape, torch.Size([2]))
sensitivity_mean_bootstrap = GpDGSMGpMean(
self.model, bounds=bounds, num_mc_samples=10, num_bootstrap_samples=10
)
gradients_measure = sensitivity_mean_bootstrap.gradient_measure()
gradients_absolute_measure = (
sensitivity_mean_bootstrap.gradient_absolute_measure()
)
gradients_square_measure = sensitivity_mean_bootstrap.gradients_square_measure()
self.assertIsInstance(gradients_measure, Tensor)
self.assertIsInstance(gradients_absolute_measure, Tensor)
self.assertIsInstance(gradients_square_measure, Tensor)
self.assertEqual(gradients_measure.shape, torch.Size([2, 3]))
self.assertEqual(gradients_absolute_measure.shape, torch.Size([2, 3]))
self.assertEqual(gradients_square_measure.shape, torch.Size([2, 3]))
def testDgsmGpSampling(self) -> None:
bounds = torch.tensor([(0.0, 1.0) for _ in range(2)]).t()
sensitivity_sampling = GpDGSMGpSampling(
self.model, bounds=bounds, num_mc_samples=10, num_gp_samples=10
)
gradients_measure = sensitivity_sampling.gradient_measure()
gradients_absolute_measure = sensitivity_sampling.gradient_absolute_measure()
gradients_square_measure = sensitivity_sampling.gradients_square_measure()
self.assertIsInstance(gradients_measure, Tensor)
self.assertIsInstance(gradients_absolute_measure, Tensor)
self.assertIsInstance(gradients_square_measure, Tensor)
self.assertEqual(gradients_measure.shape, torch.Size([2, 3]))
self.assertEqual(gradients_absolute_measure.shape, torch.Size([2, 3]))
self.assertEqual(gradients_square_measure.shape, torch.Size([2, 3]))
sensitivity_sampling_bootstrap = GpDGSMGpSampling(
self.model,
bounds=bounds,
num_mc_samples=10,
num_gp_samples=10,
num_bootstrap_samples=10,
)
gradients_measure = sensitivity_sampling_bootstrap.gradient_measure()
gradients_absolute_measure = (
sensitivity_sampling_bootstrap.gradient_absolute_measure()
)
gradients_square_measure = (
sensitivity_sampling_bootstrap.gradients_square_measure()
)
self.assertIsInstance(gradients_measure, Tensor)
self.assertIsInstance(gradients_absolute_measure, Tensor)
self.assertIsInstance(gradients_square_measure, Tensor)
self.assertEqual(gradients_measure.shape, torch.Size([2, 5]))
self.assertEqual(gradients_absolute_measure.shape, torch.Size([2, 5]))
self.assertEqual(gradients_square_measure.shape, torch.Size([2, 5]))
def testSobolGpMean(self) -> None:
bounds = torch.tensor([(0.0, 1.0) for _ in range(2)]).t()
sensitivity_mean = SobolSensitivityGPMean(
self.model, num_mc_samples=10, bounds=bounds, second_order=True
)
first_order = sensitivity_mean.first_order_indices()
total_order = sensitivity_mean.total_order_indices()
second_order = sensitivity_mean.second_order_indices()
self.assertIsInstance(first_order, Tensor)
self.assertIsInstance(total_order, Tensor)
self.assertIsInstance(second_order, Tensor)
self.assertEqual(first_order.shape, torch.Size([2]))
self.assertEqual(total_order.shape, torch.Size([2]))
self.assertEqual(second_order.shape, torch.Size([1]))
sensitivity_mean_bootstrap = SobolSensitivityGPMean(
self.model,
num_mc_samples=10,
bounds=bounds,
second_order=True,
num_bootstrap_samples=10,
input_qmc=True,
)
first_order = sensitivity_mean_bootstrap.first_order_indices()
total_order = sensitivity_mean_bootstrap.total_order_indices()
second_order = sensitivity_mean_bootstrap.second_order_indices()
self.assertIsInstance(first_order, Tensor)
self.assertIsInstance(total_order, Tensor)
self.assertIsInstance(second_order, Tensor)
self.assertEqual(first_order.shape, torch.Size([2, 3]))
self.assertEqual(total_order.shape, torch.Size([2, 3]))
self.assertEqual(second_order.shape, torch.Size([1, 3]))
sensitivity_mean_bootstrap = SobolSensitivityGPMean(
self.model,
num_mc_samples=10,
bounds=bounds,
second_order=True,
num_bootstrap_samples=10,
link_function=ProbitLinkMean,
)
first_order = sensitivity_mean_bootstrap.first_order_indices()
self.assertEqual(first_order.shape, torch.Size([2, 3]))
with self.assertRaises(ValueError):
sensitivity_mean = SobolSensitivityGPMean(
self.model, num_mc_samples=10, bounds=bounds, second_order=False
)
first_order = sensitivity_mean.first_order_indices()
total_order = sensitivity_mean.total_order_indices()
second_order = sensitivity_mean.second_order_indices()
# testing compute_sobol_indices_from_model_list
num_models = 3
num_mc_samples = 10
for order in ["first", "total"]:
with self.subTest(order=order):
indices = compute_sobol_indices_from_model_list(
[self.model for _ in range(num_models)],
bounds=bounds,
order=order,
num_mc_samples=num_mc_samples,
input_qmc=True,
)
self.assertEqual(indices.shape, (num_models, 2))
if order == "total":
self.assertTrue((indices >= 0).all())
sobol_gp_mean = SobolSensitivityGPMean(
self.model,
bounds=bounds,
num_mc_samples=num_mc_samples,
input_qmc=True,
)
base_indices = getattr(sobol_gp_mean, f"{order}_order_indices")()
# can compare values because we sample with deterministic seeds
self.assertTrue(
torch.allclose(
indices,
base_indices.unsqueeze(0).expand(num_models, 2),
)
)
# testing ax sensitivity utils
# model_bridge = cast(TorchModelBridge, get_modelbridge())
for modular in [False, True]:
model_bridge = cast(TorchModelBridge, get_modelbridge(modular=modular))
with self.assertRaisesRegex(
NotImplementedError,
"but only TorchModelBridge is supported",
):
# pyre-ignore
ax_parameter_sens(1, model_bridge.outcomes)
with patch.object(model_bridge, "model", return_value=None):
with self.assertRaisesRegex(
NotImplementedError,
r"but only Union\[BotorchModel, ModularBoTorchModel\] is supported",
):
ax_parameter_sens(model_bridge, model_bridge.outcomes)
torch_model = cast(BotorchModel, model_bridge.model)
if not modular:
with self.assertRaisesRegex(
NotImplementedError,
"but only ModelList is supported",
):
# only applies if the number of outputs of model is greater than 1
with patch.object(
BatchedMultiOutputGPyTorchModel,
"num_outputs",
new_callable=PropertyMock,
) as mock:
mock.return_value = 2
ax_parameter_sens(model_bridge, model_bridge.outcomes)
# since only ModelList is supported for BotorchModel:
gpytorch_model = ModelListGP(cast(GPyTorchModel, torch_model.model))
torch_model.model = gpytorch_model
input_dim = _get_input_dimensionality(gpytorch_model)
self.assertEqual(input_dim, 2)
for order in ["first", "total"]:
with self.subTest(order=order):
ind_dict = ax_parameter_sens(
model_bridge,
input_qmc=True,
num_mc_samples=num_mc_samples,
order=order,
)
self.assertIsInstance(ind_dict, dict)
ind_tnsr = compute_sobol_indices_from_model_list(
_get_model_per_metric(torch_model, model_bridge.outcomes),
torch.tensor(torch_model.search_space_digest.bounds).T,
input_qmc=True,
num_mc_samples=num_mc_samples,
order=order,
)
self.assertIsInstance(ind_tnsr, Tensor)
# can compare values because we sample with deterministic seeds
for i, row in enumerate(ind_dict):
for j, col in enumerate(ind_dict[row]):
self.assertAlmostEqual(ind_dict[row][col], ind_tnsr[i, j])
def testSobolGpSampling(self) -> None:
bounds = torch.tensor([(0.0, 1.0) for _ in range(2)]).t()
sensitivity_sampling = SobolSensitivityGPSampling(
self.model,
num_mc_samples=10,
num_gp_samples=10,
bounds=bounds,
second_order=True,
)
first_order = sensitivity_sampling.first_order_indices()
total_order = sensitivity_sampling.total_order_indices()
second_order = sensitivity_sampling.second_order_indices()
self.assertIsInstance(first_order, Tensor)
self.assertIsInstance(total_order, Tensor)
self.assertIsInstance(second_order, Tensor)
self.assertEqual(first_order.shape, torch.Size([2, 3]))
self.assertEqual(total_order.shape, torch.Size([2, 3]))
self.assertEqual(second_order.shape, torch.Size([1, 3]))
sensitivity_sampling_bootstrap = SobolSensitivityGPSampling(
self.model,
num_mc_samples=10,
num_gp_samples=10,
bounds=bounds,
second_order=True,
num_bootstrap_samples=10,
)
first_order = sensitivity_sampling_bootstrap.first_order_indices()
total_order = sensitivity_sampling_bootstrap.total_order_indices()
second_order = sensitivity_sampling_bootstrap.second_order_indices()
self.assertIsInstance(first_order, Tensor)
self.assertIsInstance(total_order, Tensor)
self.assertIsInstance(second_order, Tensor)
self.assertEqual(first_order.shape, torch.Size([2, 5]))
self.assertEqual(total_order.shape, torch.Size([2, 5]))
self.assertEqual(second_order.shape, torch.Size([1, 5]))
with self.assertRaises(ValueError):
sensitivity_sampling = SobolSensitivityGPSampling(
self.model,
num_mc_samples=10,
num_gp_samples=10,
bounds=bounds,
second_order=False,
)
first_order = sensitivity_sampling.first_order_indices()
total_order = sensitivity_sampling.total_order_indices()
second_order = sensitivity_sampling.second_order_indices()
def testDerivativeGp(self) -> None:
test_x = torch.rand(2, 2)
posterior = posterior_derivative(self.model, test_x, kernel_type="matern_l1")
self.assertIsInstance(posterior, MultivariateNormal)
with self.assertRaises(ValueError):
posterior = posterior_derivative(self.model, test_x, kernel_type="xyz")
|
[
"[email protected]"
] | |
9e0d43d533ea80c344c09fe7da04441b491f3e53
|
1fa04a3baf82a2469b1140a7fd350a5df011faf5
|
/waglee/wagblog/migrations/0005_blogtagindexpage.py
|
c381938022e9a03738cedc0350f87c7b121859e3
|
[] |
no_license
|
Serrones/wag_tutorial
|
8446b33c3b71657402a9af214ae1d9f8f99d9694
|
f357a8dabf5ade3f6dc80c17795cf6f3e721b381
|
refs/heads/master
| 2020-03-23T21:12:20.069601 | 2018-08-05T01:28:39 | 2018-08-05T01:28:39 | 142,089,256 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 737 |
py
|
# Generated by Django 2.0.7 on 2018-08-05 00:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('wagblog', '0004_auto_20180805_0031'),
]
operations = [
migrations.CreateModel(
name='BlogTagIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
[
"[email protected]"
] | |
91a98cf0b0ec3197205d09b8d01d1dfaf9f62a62
|
bc48af1e1583bae19311102a327b4bc6d094df2f
|
/Zadania2/Zad2.10.py
|
0629cc3161588aa1d7d609efafb3d6bc94bba254
|
[] |
no_license
|
jakubwida/UJ_Sem5_2016_Python_Course
|
d90093882213ad900ea937dd2bac2bef0623d818
|
0e0e612dd1af3ae1ceea926c3c3ab0f8f116fa9f
|
refs/heads/master
| 2021-01-10T22:20:37.335498 | 2017-01-30T20:16:36 | 2017-01-30T20:16:36 | 70,318,167 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 80 |
py
|
line = " jeden \n dw.a \n \n \n trzy \n cztery "
print(len(line.split()))
|
[
"[email protected]"
] | |
ed6c12aa427e645c201da748065b7311809ba026
|
ea62d65d17eb32915c668f1c8560e570b3454218
|
/coltrane/migrations/0001_initial.py
|
fc40ae7eafac48e1b823160be3fdd1c714f0eb50
|
[] |
no_license
|
arwelsh/palewi.re
|
11b68a3416aac07843c9f94a9a8cd45e37355514
|
ba33b0b205e61f8f166d2a10057f6e669c714191
|
refs/heads/master
| 2021-01-18T05:07:01.471888 | 2016-06-27T04:36:13 | 2016-06-27T04:36:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 26,052 |
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Ticker'
db.create_table(u'coltrane_ticker', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('pub_date', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'coltrane', ['Ticker'])
# Adding model 'Slogan'
db.create_table(u'coltrane_slogan', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250)),
))
db.send_create_signal(u'coltrane', ['Slogan'])
# Adding model 'Category'
db.create_table(u'coltrane_category', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('post_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'coltrane', ['Category'])
# Adding model 'Post'
db.create_table(u'coltrane_post', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('wordpress_id', self.gf('django.db.models.fields.IntegerField')(unique=True, null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=300)),
('body_markup', self.gf('django.db.models.fields.TextField')()),
('body_html', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('enable_comments', self.gf('django.db.models.fields.BooleanField')(default=True)),
('status', self.gf('django.db.models.fields.IntegerField')(default=1)),
('tags', self.gf('tagging.fields.TagField')()),
))
db.send_create_signal(u'coltrane', ['Post'])
# Adding M2M table for field categories on 'Post'
m2m_table_name = db.shorten_name(u'coltrane_post_categories')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'coltrane.post'], null=False)),
('category', models.ForeignKey(orm[u'coltrane.category'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'category_id'])
# Adding model 'Beer'
db.create_table(u'coltrane_beer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('tags', self.gf('tagging.fields.TagField')(max_length=1000)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
('brewery', self.gf('django.db.models.fields.CharField')(max_length=250)),
))
db.send_create_signal(u'coltrane', ['Beer'])
# Adding model 'Book'
db.create_table(u'coltrane_book', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('tags', self.gf('tagging.fields.TagField')(max_length=1000)),
('isbn', self.gf('django.db.models.fields.CharField')(unique=True, max_length=20)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250)),
('authors', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
))
db.send_create_signal(u'coltrane', ['Book'])
# Adding model 'Link'
db.create_table(u'coltrane_link', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('tags', self.gf('tagging.fields.TagField')(max_length=1000)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'coltrane', ['Link'])
# Adding model 'Commit'
db.create_table(u'coltrane_commit', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('tags', self.gf('tagging.fields.TagField')(max_length=1000)),
('repository', self.gf('django.db.models.fields.CharField')(max_length=100)),
('branch', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'coltrane', ['Commit'])
# Adding model 'Location'
db.create_table(u'coltrane_location', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('tags', self.gf('tagging.fields.TagField')(max_length=1000)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('latitude', self.gf('django.db.models.fields.FloatField')(null=True)),
('longitude', self.gf('django.db.models.fields.FloatField')(null=True)),
))
db.send_create_signal(u'coltrane', ['Location'])
# Adding model 'Movie'
db.create_table(u'coltrane_movie', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('tags', self.gf('tagging.fields.TagField')(max_length=1000)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
('rating', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal(u'coltrane', ['Movie'])
# Adding model 'Photo'
db.create_table(u'coltrane_photo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('tags', self.gf('tagging.fields.TagField')(max_length=1000)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'coltrane', ['Photo'])
# Adding model 'Shout'
db.create_table(u'coltrane_shout', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('tags', self.gf('tagging.fields.TagField')(max_length=1000)),
('message', self.gf('django.db.models.fields.TextField')(max_length=140)),
))
db.send_create_signal(u'coltrane', ['Shout'])
# Adding model 'Track'
db.create_table(u'coltrane_track', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('tags', self.gf('tagging.fields.TagField')(max_length=1000)),
('artist_name', self.gf('django.db.models.fields.CharField')(max_length=250)),
('track_name', self.gf('django.db.models.fields.CharField')(max_length=250)),
('track_mbid', self.gf('django.db.models.fields.CharField')(max_length=36, blank=True)),
('artist_mbid', self.gf('django.db.models.fields.CharField')(max_length=36, blank=True)),
))
db.send_create_signal(u'coltrane', ['Track'])
# Adding model 'TopDomain'
db.create_table(u'coltrane_topdomain', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('count', self.gf('django.db.models.fields.IntegerField')()),
('stratum', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'coltrane', ['TopDomain'])
# Adding model 'TopTag'
db.create_table(u'coltrane_toptag', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tag', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['tagging.Tag'], unique=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('count', self.gf('django.db.models.fields.IntegerField')()),
('stratum', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'coltrane', ['TopTag'])
def backwards(self, orm):
# Deleting model 'Ticker'
db.delete_table(u'coltrane_ticker')
# Deleting model 'Slogan'
db.delete_table(u'coltrane_slogan')
# Deleting model 'Category'
db.delete_table(u'coltrane_category')
# Deleting model 'Post'
db.delete_table(u'coltrane_post')
# Removing M2M table for field categories on 'Post'
db.delete_table(db.shorten_name(u'coltrane_post_categories'))
# Deleting model 'Beer'
db.delete_table(u'coltrane_beer')
# Deleting model 'Book'
db.delete_table(u'coltrane_book')
# Deleting model 'Link'
db.delete_table(u'coltrane_link')
# Deleting model 'Commit'
db.delete_table(u'coltrane_commit')
# Deleting model 'Location'
db.delete_table(u'coltrane_location')
# Deleting model 'Movie'
db.delete_table(u'coltrane_movie')
# Deleting model 'Photo'
db.delete_table(u'coltrane_photo')
# Deleting model 'Shout'
db.delete_table(u'coltrane_shout')
# Deleting model 'Track'
db.delete_table(u'coltrane_track')
# Deleting model 'TopDomain'
db.delete_table(u'coltrane_topdomain')
# Deleting model 'TopTag'
db.delete_table(u'coltrane_toptag')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'coltrane.beer': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Beer'},
'brewery': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
u'coltrane.book': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Book'},
'authors': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
u'coltrane.category': {
'Meta': {'ordering': "['title']", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'coltrane.commit': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Commit'},
'branch': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'repository': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
u'coltrane.link': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Link'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
u'coltrane.location': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Location'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
u'coltrane.movie': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Movie'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
u'coltrane.photo': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Photo'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
u'coltrane.post': {
'Meta': {'ordering': "['-pub_date']", 'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_markup': ('django.db.models.fields.TextField', [], {}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['coltrane.Category']", 'symmetrical': 'False'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '300'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tags': ('tagging.fields.TagField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'wordpress_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'coltrane.shout': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Shout'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '140'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
u'coltrane.slogan': {
'Meta': {'ordering': "['title']", 'object_name': 'Slogan'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'coltrane.ticker': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Ticker'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {})
},
u'coltrane.topdomain': {
'Meta': {'ordering': "('-count', 'name')", 'object_name': 'TopDomain'},
'count': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'stratum': ('django.db.models.fields.IntegerField', [], {})
},
u'coltrane.toptag': {
'Meta': {'ordering': "('-count', 'name')", 'object_name': 'TopTag'},
'count': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'stratum': ('django.db.models.fields.IntegerField', [], {}),
'tag': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['tagging.Tag']", 'unique': 'True'})
},
u'coltrane.track': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Track'},
'artist_mbid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'artist_name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}),
'track_mbid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'track_name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['coltrane']
|
[
"[email protected]"
] | |
64ab8a1cb73d26acf8f6970397a240a58e447d17
|
c70b5f5c87867d692e347e382bdc6723500066b8
|
/miscellanea/extract_crd_withMP2.py
|
0ce7e1ff12d6771187a99ea1e738fbfd0a11e128
|
[] |
no_license
|
Steboss/mdgx_python_api
|
39f5249f9a0c7b7a5361a29b60910b1d949e96e2
|
b35311d10d986cafe679ad8ee0f058ce603d627c
|
refs/heads/master
| 2021-05-12T19:23:54.946988 | 2018-02-07T11:52:12 | 2018-02-07T11:52:12 | 117,087,085 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,420 |
py
|
#JAN 2017 Stefano Bosisio
#Here I extract the mp2 energies from the mp2_output folder
import os,sys,re
def rst7writer(coords,natoms):
#coords is the piece of output with all the coordinate
#file_name: the name of the file we have to ave the crd like structure_0.crd
#Crd file
outputcrd = open("tmp.rst7","w")
outputcrd.write("LIG\n")
outputcrd.write(" %d\n" %natoms)
#Now write the coordinates rst7 file
print("writing coordinate file %s" % outputcrd)
position = 0
counter = 0
for f in coords:
coordx = float(f.split()[3])
coordy = float(f.split()[4])
coordz = float(f.split()[5])
if coordx<0 or coordx>10:
space=" "
crdX = "%.7f" % coordx
cX = space+crdX
else:
space=" "
crdX = "%.7f" % coordx
cX = space+crdX
if coordy<0 or coordy>10:
space=" "
crdY = "%.7f" % coordy
cY = space+crdY
else:
space=" "
crdY = "%.7f" % coordy
cY = space+crdY
if coordz<0 or coordz>10:
space=" "
crdZ = "%.7f" % coordz
cZ = space+crdZ
else:
space=" "
crdZ = "%.7f" % coordz
cZ = space+crdZ
if counter ==1 :
outputcrd.write("%s%s%s\n" %(cX,cY,cZ))
counter=0
else:
outputcrd.write("%s%s%s" %(cX,cY,cZ))
counter+=1
outputcrd.close()
def mdcrdwriter(coords,natoms,out_mdcrd):
#print the name of the file
#extract energies
counter = 0 # elements on line
position = 0
count_coords = 0
total_coords = natoms*3
elems=""
for f in coords:
coordx = float(f.split()[3])
coordy = float(f.split()[4])
coordz = float(f.split()[5])
count_coords+=3
if coordx<0 or coordx>10:
space=" "
crdX = "%.3f" % coordx
cX = space+crdX
else:
space=" "
crdX = "%.3f" % coordx
cX = space+crdX
if coordy<0 or coordy>10:
space=" "
crdY = "%.3f" % coordy
cY = space+crdY
else:
space=" "
crdY = "%.3f" % coordy
cY = space+crdY
if coordz<0 or coordz>10:
space=" "
crdZ = "%.3f" % coordz
cZ = space+crdZ
else:
space=" "
crdZ = "%.3f" % coordz
cZ = space+crdZ
elems+="%s" % (cX)
counter+=1
if counter==10:
elems+="\n"
out_mdcrd.write(elems)
elems=""
counter=0
elems+="%s" %(cY)
counter+=1
if counter==10:
elems+="\n"
out_mdcrd.write(elems)
elems=""
counter=0
elems+="%s" %(cZ)
counter+=1
if count_coords==total_coords:
elems+="\n"
out_mdcrd.write(elems)
elif counter==10:
elems+="\n"
out_mdcrd.write(elems)
elems=""
counter=0
#out_mdcrd.write("\n")
#######################MAIN###################################
#deal with I/O
file_input = sys.argv[1]
mdcrd_file = sys.argv[2]
top_file = sys.argv[3]
reader = open(file_input,"r").readlines()
if os.path.exists(mdcrd_file):
mdcrd = open(mdcrd_file,"a")
else:
mdcrd = open(mdcrd_file,"a")
mdcrd.write("LIG\n")
#now read it and the last mp2= is the value we want
for line in reader:
if "EUMP2" in line:
en_line = line
#take the value of energy, which is after some split
#e.g.' E2 = -0.3224128066D+01 EUMP2 = -0.98809822517423D+03\n'
en_string = en_line.strip().split("EUMP2")[1].split("=")[1]
#then substitue the D with E otherwise we cannot convert in float
en_val=float(re.sub(r"D","E",en_string))
#convert the eenergy to kcal/mol?
output_energy = open("quantum_energy.dat","a")
output_energy.write("%.10f\n" % en_val)
indexes = []
#now create the mdcrd file
#now collect the index to know here the standard optimized structire is
for i, line in enumerate(reader,0):
if "Standard orientation:" in line:
indexes.append(i)
#number of atoms:
natoms = 0
charge_idx = []
for i,line in enumerate(reader,0): #the number of atoms come from lines
if "Charge" in line:
charge_idx.append(i+1)
for i,line in enumerate(reader[charge_idx[0]:],0):
if line==" \n":
break
else:
natoms+=1
last_idx = indexes[-1] + 5
end_coords = last_idx + natoms
coords = reader[last_idx:end_coords] ##this is the fragment of the file with the coordinates
mdcrdwriter(coords,natoms,mdcrd)
rst7writer(coords,natoms)
if os.path.exists("amber_energy.dat"):
amber = open("amber_energy.dat","a")
else:
amber = open("amber_energy.dat","a")
###Evaluate amber energies
#Amber energies
cmd =""" echo "0.000" > dummy.dat """
os.system(cmd)
cmd =""" cat> job.in << EOF
ALGORITHM=NONE
NSTRUCTURES=1
COORDINATE_FORMAT=RESTART
EOF"""
os.system(cmd)
#cmd
cmd = """paramfit -i job.in -p %s -c tmp.rst7 -q dummy.dat | grep "Calculated energy with initial parameters" | awk '{print $10'} >> amber_energy.dat""" %(top_file)
os.system(cmd)
os.system("wait")
print(cmd)
cmd = "rm tmp.rst7 job.in dummy.dat"
os.system(cmd)
|
[
"[email protected]"
] | |
583d36baee1172132ed3c0eed658f4aa8a7df6ad
|
ca75f7099b93d8083d5b2e9c6db2e8821e63f83b
|
/z2/part2/batch/jm/parser_errors_2/293445611.py
|
10a81ed57c337b5a643dca205553d6da1debcf26
|
[
"MIT"
] |
permissive
|
kozakusek/ipp-2020-testy
|
210ed201eaea3c86933266bd57ee284c9fbc1b96
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
refs/heads/master
| 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 |
MIT
| 2020-06-09T21:15:38 | 2020-05-08T10:10:47 |
C
|
UTF-8
|
Python
| false | false | 3,612 |
py
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 293445611
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 4, 3, 2)
assert board is not None
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_move(board, 1, 0, 1) == 1
assert gamma_free_fields(board, 1) == 6
assert gamma_move(board, 2, 1, 2) == 1
assert gamma_move(board, 2, 3, 3) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_move(board, 3, 0, 2) == 1
assert gamma_busy_fields(board, 3) == 2
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 3, 1, 3) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 3, 0) == 1
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_free_fields(board, 2) == 6
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 3, 3, 3) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 1, 3, 1) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_free_fields(board, 3) == 2
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 3, 3, 4) == 0
assert gamma_free_fields(board, 3) == 2
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_busy_fields(board, 1) == 5
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 3, 1, 3) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 4, 1) == 1
assert gamma_move(board, 2, 4, 0) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_busy_fields(board, 1) == 7
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 2, 1, 3) == 1
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 1, 3, 2) == 1
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_free_fields(board, 2) == 4
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_golden_move(board, 1, 0, 1) == 0
assert gamma_move(board, 2, 3, 2) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_busy_fields(board, 2) == 3
assert gamma_move(board, 3, 4, 2) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 3, 3) == 0
assert gamma_move(board, 2, 2, 3) == 1
assert gamma_move(board, 3, 3, 4) == 0
assert gamma_busy_fields(board, 3) == 2
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_free_fields(board, 1) == 4
gamma_delete(board)
|
[
"[email protected]"
] | |
885060592f2ca151104f0b9dfc2f441d2f441320
|
420f0b5c7f625b8473fa24716f17be8c7b27fe3e
|
/darling_ansible/python_venv/lib/python3.7/site-packages/oci/database/models/db_home_summary.py
|
8b7a7ebe4b2fbc6dd03ca5027ac8dc3456a1e1e6
|
[
"Apache-2.0"
] |
permissive
|
revnav/sandbox
|
896d1fab59eee40f8652dff6a0aceb691b47ea4c
|
f9c8422233d093b76821686b6c249417502cf61d
|
refs/heads/master
| 2023-08-17T11:17:20.449740 | 2021-09-09T00:49:52 | 2021-09-09T00:49:52 | 268,660,804 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,988 |
py
|
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DbHomeSummary(object):
"""
A directory where Oracle Database software is installed. A bare metal or Exadata DB system can have multiple Database Homes
and each Database Home can run a different supported version of Oracle Database. A virtual machine DB system can have only one Database Home.
For more information, see `Bare Metal and Virtual Machine DB Systems`__ and `Exadata DB Systems`__.
To use any of the API operations, you must be authorized in an IAM policy. If you're not authorized, talk to an
administrator. If you're an administrator who needs to write policies to give users access,
see `Getting Started with Policies`__.
**Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
__ https://docs.cloud.oracle.com/Content/Database/Concepts/overview.htm
__ https://docs.cloud.oracle.com/Content/Database/Concepts/exaoverview.htm
__ https://docs.cloud.oracle.com/Content/Identity/Concepts/policygetstarted.htm
"""
#: A constant which can be used with the lifecycle_state property of a DbHomeSummary.
#: This constant has a value of "PROVISIONING"
LIFECYCLE_STATE_PROVISIONING = "PROVISIONING"
#: A constant which can be used with the lifecycle_state property of a DbHomeSummary.
#: This constant has a value of "AVAILABLE"
LIFECYCLE_STATE_AVAILABLE = "AVAILABLE"
#: A constant which can be used with the lifecycle_state property of a DbHomeSummary.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a DbHomeSummary.
#: This constant has a value of "TERMINATING"
LIFECYCLE_STATE_TERMINATING = "TERMINATING"
#: A constant which can be used with the lifecycle_state property of a DbHomeSummary.
#: This constant has a value of "TERMINATED"
LIFECYCLE_STATE_TERMINATED = "TERMINATED"
#: A constant which can be used with the lifecycle_state property of a DbHomeSummary.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new DbHomeSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this DbHomeSummary.
:type id: str
:param compartment_id:
The value to assign to the compartment_id property of this DbHomeSummary.
:type compartment_id: str
:param display_name:
The value to assign to the display_name property of this DbHomeSummary.
:type display_name: str
:param last_patch_history_entry_id:
The value to assign to the last_patch_history_entry_id property of this DbHomeSummary.
:type last_patch_history_entry_id: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this DbHomeSummary.
Allowed values for this property are: "PROVISIONING", "AVAILABLE", "UPDATING", "TERMINATING", "TERMINATED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param db_system_id:
The value to assign to the db_system_id property of this DbHomeSummary.
:type db_system_id: str
:param vm_cluster_id:
The value to assign to the vm_cluster_id property of this DbHomeSummary.
:type vm_cluster_id: str
:param db_version:
The value to assign to the db_version property of this DbHomeSummary.
:type db_version: str
:param db_home_location:
The value to assign to the db_home_location property of this DbHomeSummary.
:type db_home_location: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this DbHomeSummary.
:type lifecycle_details: str
:param time_created:
The value to assign to the time_created property of this DbHomeSummary.
:type time_created: datetime
"""
self.swagger_types = {
'id': 'str',
'compartment_id': 'str',
'display_name': 'str',
'last_patch_history_entry_id': 'str',
'lifecycle_state': 'str',
'db_system_id': 'str',
'vm_cluster_id': 'str',
'db_version': 'str',
'db_home_location': 'str',
'lifecycle_details': 'str',
'time_created': 'datetime'
}
self.attribute_map = {
'id': 'id',
'compartment_id': 'compartmentId',
'display_name': 'displayName',
'last_patch_history_entry_id': 'lastPatchHistoryEntryId',
'lifecycle_state': 'lifecycleState',
'db_system_id': 'dbSystemId',
'vm_cluster_id': 'vmClusterId',
'db_version': 'dbVersion',
'db_home_location': 'dbHomeLocation',
'lifecycle_details': 'lifecycleDetails',
'time_created': 'timeCreated'
}
self._id = None
self._compartment_id = None
self._display_name = None
self._last_patch_history_entry_id = None
self._lifecycle_state = None
self._db_system_id = None
self._vm_cluster_id = None
self._db_version = None
self._db_home_location = None
self._lifecycle_details = None
self._time_created = None
@property
def id(self):
"""
**[Required]** Gets the id of this DbHomeSummary.
The `OCID`__ of the Database Home.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The id of this DbHomeSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this DbHomeSummary.
The `OCID`__ of the Database Home.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param id: The id of this DbHomeSummary.
:type: str
"""
self._id = id
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this DbHomeSummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this DbHomeSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this DbHomeSummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this DbHomeSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this DbHomeSummary.
The user-provided name for the Database Home. The name does not need to be unique.
:return: The display_name of this DbHomeSummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this DbHomeSummary.
The user-provided name for the Database Home. The name does not need to be unique.
:param display_name: The display_name of this DbHomeSummary.
:type: str
"""
self._display_name = display_name
@property
def last_patch_history_entry_id(self):
"""
Gets the last_patch_history_entry_id of this DbHomeSummary.
The `OCID`__ of the last patch history. This value is updated as soon as a patch operation is started.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The last_patch_history_entry_id of this DbHomeSummary.
:rtype: str
"""
return self._last_patch_history_entry_id
@last_patch_history_entry_id.setter
def last_patch_history_entry_id(self, last_patch_history_entry_id):
"""
Sets the last_patch_history_entry_id of this DbHomeSummary.
The `OCID`__ of the last patch history. This value is updated as soon as a patch operation is started.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param last_patch_history_entry_id: The last_patch_history_entry_id of this DbHomeSummary.
:type: str
"""
self._last_patch_history_entry_id = last_patch_history_entry_id
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this DbHomeSummary.
The current state of the Database Home.
Allowed values for this property are: "PROVISIONING", "AVAILABLE", "UPDATING", "TERMINATING", "TERMINATED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this DbHomeSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this DbHomeSummary.
The current state of the Database Home.
:param lifecycle_state: The lifecycle_state of this DbHomeSummary.
:type: str
"""
allowed_values = ["PROVISIONING", "AVAILABLE", "UPDATING", "TERMINATING", "TERMINATED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def db_system_id(self):
"""
Gets the db_system_id of this DbHomeSummary.
The `OCID`__ of the DB system.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The db_system_id of this DbHomeSummary.
:rtype: str
"""
return self._db_system_id
@db_system_id.setter
def db_system_id(self, db_system_id):
"""
Sets the db_system_id of this DbHomeSummary.
The `OCID`__ of the DB system.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param db_system_id: The db_system_id of this DbHomeSummary.
:type: str
"""
self._db_system_id = db_system_id
@property
def vm_cluster_id(self):
"""
Gets the vm_cluster_id of this DbHomeSummary.
The `OCID`__ of the VM cluster.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The vm_cluster_id of this DbHomeSummary.
:rtype: str
"""
return self._vm_cluster_id
@vm_cluster_id.setter
def vm_cluster_id(self, vm_cluster_id):
"""
Sets the vm_cluster_id of this DbHomeSummary.
The `OCID`__ of the VM cluster.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param vm_cluster_id: The vm_cluster_id of this DbHomeSummary.
:type: str
"""
self._vm_cluster_id = vm_cluster_id
@property
def db_version(self):
"""
**[Required]** Gets the db_version of this DbHomeSummary.
The Oracle Database version.
:return: The db_version of this DbHomeSummary.
:rtype: str
"""
return self._db_version
@db_version.setter
def db_version(self, db_version):
"""
Sets the db_version of this DbHomeSummary.
The Oracle Database version.
:param db_version: The db_version of this DbHomeSummary.
:type: str
"""
self._db_version = db_version
@property
def db_home_location(self):
"""
**[Required]** Gets the db_home_location of this DbHomeSummary.
The location of the Oracle Database Home.
:return: The db_home_location of this DbHomeSummary.
:rtype: str
"""
return self._db_home_location
@db_home_location.setter
def db_home_location(self, db_home_location):
"""
Sets the db_home_location of this DbHomeSummary.
The location of the Oracle Database Home.
:param db_home_location: The db_home_location of this DbHomeSummary.
:type: str
"""
self._db_home_location = db_home_location
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this DbHomeSummary.
Additional information about the current lifecycleState.
:return: The lifecycle_details of this DbHomeSummary.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this DbHomeSummary.
Additional information about the current lifecycleState.
:param lifecycle_details: The lifecycle_details of this DbHomeSummary.
:type: str
"""
self._lifecycle_details = lifecycle_details
@property
def time_created(self):
"""
Gets the time_created of this DbHomeSummary.
The date and time the Database Home was created.
:return: The time_created of this DbHomeSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this DbHomeSummary.
The date and time the Database Home was created.
:param time_created: The time_created of this DbHomeSummary.
:type: datetime
"""
self._time_created = time_created
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
[
"[email protected]"
] | |
6babd3556c4d789cc9deff9a19367bcaf4eda064
|
d7551c200a4859690cc73fb60f8a5cb3c0363478
|
/XiaoZzi/RequestsLearn/Requests_authen.py
|
71b5aada064859fe7bc0e75b36c93a33af5ea7cf
|
[] |
no_license
|
OverCastCN/Python_Learn
|
8b74ce4027b6ebfdd7af739551b41606bd04ff70
|
e7bb003e262f82977e8b2b2e14f32ff16fb0bd03
|
refs/heads/master
| 2021-01-01T05:58:37.654962 | 2019-06-26T12:47:38 | 2019-06-26T12:47:38 | 97,323,526 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 637 |
py
|
# -*- coding:utf-8 -*-
import requests
base_url = 'https://api.github.com'
def construct_url(end):
return '/'.join([base_url,end])
def basic_auth():
"""
基本认证
:return:
"""
response = requests.get(url=construct_url('user'),auth=('imoocdemo','imoocdemo123'))
print response.headers
print response.request.headers
def basic_oauth():
"""
AOUTH认证
:return:
"""
headers = {'Authorization':'token Basic aW1vb2NkZW1vOmltb29jZGVtbzEyMw=='}
#user/emails
response = requests.get(construct_url(construct_url('user/emails',headers=headers)))
print response.request.headers
|
[
"[email protected]"
] | |
046541be0496fc236dae614f273af615bdc2f130
|
4e2d9f918ece774b72848128046787b178f0ee8e
|
/NAO/MyCode/test_module.py
|
2ec0990b618d54bad50243ebe61974ac9a99361b
|
[
"Apache-2.0"
] |
permissive
|
mlzboy/python
|
de6c210ae34db12e2b3299ce98e084f05954f15f
|
0006a2f9493008400e89fcc952f9e0a182053b64
|
refs/heads/master
| 2020-03-26T01:07:29.747935 | 2018-06-14T11:43:44 | 2018-06-14T11:43:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,236 |
py
|
# -*- encoding: UTF-8 -*-
""" 测试ALSoundLocalization用法
update by Ian in 2017-9-3 14:51:11
解决方法:
1. setParameter设置灵敏度参数
2. subscribe订阅这个事件
3. getData获得订阅的数据
"""
from naoqi import ALModule
from naoqi import ALProxy
from naoqi import ALBroker
import sys
import time
import argparse
class Motion(ALModule):
"""控制机器人的动作"""
def __init__(self, name,angle=50.0):
ALModule.__init__(self, name) # 需要先调用父类的初始化方法
self.life = ALProxy('ALAutonomousLife')
self.motion = ALProxy('ALMotion')
self.posture = ALProxy('ALRobotPosture')
self.memory = ALProxy('ALMemory')
# 初始化
self.angle = angle
self.headangle = 0
self.life.setState('disabled') # 设置禁用状态,关闭一切反射
self.motion.wakeUp() # 唤醒机器人
self.posture.goToPosture("StandInit", 0.5) # 姿态初始化
self.motion.setStiffnesses("Head", 1.0) # 设置刚度,不设置不能转动
def SoundLocalization(self):
self.sound = ALProxy('ALSoundLocalization')
self.sound.setParameter('Sensitivity',0.5)
self.sound.subscribe('ALSoundLocalization/SoundLocated') # 订阅这个事件,或者说启动这个模块
try:
while True:
time.sleep(1)
data = self.memory.getData('ALSoundLocalization/SoundLocated')
print data
except KeyboardInterrupt:
print
print "Interrupted by user, shutting down"
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="192.168.1.102",
help="192.168.1.101")
parser.add_argument("--port", type=int, default=9559,
help="9987")
parser.add_argument("--facesize", type=float, default=0.1,
help="0.2")
args = parser.parse_args()
# 设置代理
myBroker = ALBroker("myBroker", "0.0.0.0", 0, args.ip, args.port)
mymotion = Motion('mymotion')
mymotion.SoundLocalization()
myBroker.shutdown()
|
[
"[email protected]"
] | |
e4e76e5aff929e10786d99463ac91740f4203e29
|
4b4df51041551c9a855468ddf1d5004a988f59a2
|
/leetcode_python/Binary_Search/peak-index-in-a-mountain-array.py
|
af188fdd21026b7c3572007599786f903c62ea64
|
[] |
no_license
|
yennanliu/CS_basics
|
99b7ad3ef6817f04881d6a1993ec634f81525596
|
035ef08434fa1ca781a6fb2f9eed3538b7d20c02
|
refs/heads/master
| 2023-09-03T13:42:26.611712 | 2023-09-03T12:46:08 | 2023-09-03T12:46:08 | 66,194,791 | 64 | 40 | null | 2022-08-20T09:44:48 | 2016-08-21T11:11:35 |
Python
|
UTF-8
|
Python
| false | false | 3,776 |
py
|
"""
852. Peak Index in a Mountain Array
Easy
Let's call an array arr a mountain if the following properties hold:
arr.length >= 3
There exists some i with 0 < i < arr.length - 1 such that:
arr[0] < arr[1] < ... arr[i-1] < arr[i]
arr[i] > arr[i+1] > ... > arr[arr.length - 1]
Given an integer array arr that is guaranteed to be a mountain, return any i such that arr[0] < arr[1] < ... arr[i - 1] < arr[i] > arr[i + 1] > ... > arr[arr.length - 1].
Example 1:
Input: arr = [0,1,0]
Output: 1
Example 2:
Input: arr = [0,2,1,0]
Output: 1
Example 3:
Input: arr = [0,10,5,2]
Output: 1
Constraints:
3 <= arr.length <= 104
0 <= arr[i] <= 106
arr is guaranteed to be a mountain array.
Follow up: Finding the O(n) is straightforward, could you find an O(log(n)) solution?
"""
# V0
# IDEA : PROBLEM UNDERSTANDING
# SAME AS LC 162 Find Peak Element
class Solution(object):
def peakIndexInMountainArray(self, arr):
if len(arr) < 3:
return False
for i in range(len(arr)):
if arr[i] > arr[i+1]:
return i
return -1
# V0'
# IDEA : BINARY SEARCH
class Solution(object):
def peakIndexInMountainArray(self, arr):
if len(arr) < 3:
return False
# binary search
l = 0
r = len(arr) - 1
while r >= l:
mid = l + (r-l)//2
#print ("l = " + str(l) + " r = " + str(r) + " mid = " + str(mid))
if arr[mid] > arr[mid-1] and arr[mid] > arr[mid+1]:
return mid
elif arr[mid] < arr[mid+1]:
l = mid + 1
else:
r = mid - 1
return -1
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/80721162
# IDEA : BINARY SEARCH
class Solution(object):
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
left, right = 0, len(A) - 1
while left < right:
mid = (left + right) / 2
if A[mid - 1] < A[mid] and A[mid] < A[mid + 1]:
left = mid
elif A[mid - 1] > A[mid] and A[mid] > A[mid + 1]:
right = mid
else:
break
return mid
# V1'
# https://blog.csdn.net/fuxuemingzhu/article/details/80721162
# IDEA : BINARY SEARCH
class Solution:
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
N = len(A)
left, right = 0, N
while left < right:
mid = left + (right - left) // 2
if A[mid - 1] < A[mid] and A[mid] > A[mid + 1]:
return mid
if A[mid] < A[mid + 1]:
left = mid + 1
else:
right = mid
return -1
# V1''
# https://blog.csdn.net/fuxuemingzhu/article/details/80721162
# IDEA : MAX
class Solution:
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
return A.index(max(A))
# V1'''
# https://blog.csdn.net/fuxuemingzhu/article/details/80721162
# IDEA : FIRST DECREASE
class Solution:
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
for i in range(len(A) - 1):
if A[i + 1] < A[i]:
return i
return -1
# V2
# Time: O(logn)
# Space: O(1)
class Solution(object):
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
left, right = 0, len(A)
while left < right:
mid = left + (right-left)//2
if A[mid] > A[mid+1]:
right = mid
else:
left = mid+1
return left
|
[
"[email protected]"
] | |
90bbd942185a79a402e133decd2102a506901894
|
42c48f3178a48b4a2a0aded547770027bf976350
|
/google/ads/google_ads/v3/proto/enums/offline_user_data_job_failure_reason_pb2.py
|
f880a390b39fa5c4521bfcfd0452428d2cbd3add
|
[
"Apache-2.0"
] |
permissive
|
fiboknacky/google-ads-python
|
e989464a85f28baca1f28d133994c73759e8b4d6
|
a5b6cede64f4d9912ae6ad26927a54e40448c9fe
|
refs/heads/master
| 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 |
Apache-2.0
| 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null |
UTF-8
|
Python
| false | true | 4,516 |
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/enums/offline_user_data_job_failure_reason.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/enums/offline_user_data_job_failure_reason.proto',
package='google.ads.googleads.v3.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v3.enumsB$OfflineUserDataJobFailureReasonProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v3/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V3.Enums\312\002\035Google\\Ads\\GoogleAds\\V3\\Enums\352\002!Google::Ads::GoogleAds::V3::Enums'),
serialized_pb=_b('\nNgoogle/ads/googleads_v3/proto/enums/offline_user_data_job_failure_reason.proto\x12\x1dgoogle.ads.googleads.v3.enums\x1a\x1cgoogle/api/annotations.proto\"\xad\x01\n#OfflineUserDataJobFailureReasonEnum\"\x85\x01\n\x1fOfflineUserDataJobFailureReason\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12%\n!INSUFFICIENT_MATCHED_TRANSACTIONS\x10\x02\x12\x1d\n\x19INSUFFICIENT_TRANSACTIONS\x10\x03\x42\xf9\x01\n!com.google.ads.googleads.v3.enumsB$OfflineUserDataJobFailureReasonProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v3/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V3.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V3\\Enums\xea\x02!Google::Ads::GoogleAds::V3::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_OFFLINEUSERDATAJOBFAILUREREASONENUM_OFFLINEUSERDATAJOBFAILUREREASON = _descriptor.EnumDescriptor(
name='OfflineUserDataJobFailureReason',
full_name='google.ads.googleads.v3.enums.OfflineUserDataJobFailureReasonEnum.OfflineUserDataJobFailureReason',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INSUFFICIENT_MATCHED_TRANSACTIONS', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INSUFFICIENT_TRANSACTIONS', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=184,
serialized_end=317,
)
_sym_db.RegisterEnumDescriptor(_OFFLINEUSERDATAJOBFAILUREREASONENUM_OFFLINEUSERDATAJOBFAILUREREASON)
_OFFLINEUSERDATAJOBFAILUREREASONENUM = _descriptor.Descriptor(
name='OfflineUserDataJobFailureReasonEnum',
full_name='google.ads.googleads.v3.enums.OfflineUserDataJobFailureReasonEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_OFFLINEUSERDATAJOBFAILUREREASONENUM_OFFLINEUSERDATAJOBFAILUREREASON,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=144,
serialized_end=317,
)
_OFFLINEUSERDATAJOBFAILUREREASONENUM_OFFLINEUSERDATAJOBFAILUREREASON.containing_type = _OFFLINEUSERDATAJOBFAILUREREASONENUM
DESCRIPTOR.message_types_by_name['OfflineUserDataJobFailureReasonEnum'] = _OFFLINEUSERDATAJOBFAILUREREASONENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OfflineUserDataJobFailureReasonEnum = _reflection.GeneratedProtocolMessageType('OfflineUserDataJobFailureReasonEnum', (_message.Message,), dict(
DESCRIPTOR = _OFFLINEUSERDATAJOBFAILUREREASONENUM,
__module__ = 'google.ads.googleads_v3.proto.enums.offline_user_data_job_failure_reason_pb2'
,
__doc__ = """Container for enum describing reasons why an offline user data job
failed to be processed.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.enums.OfflineUserDataJobFailureReasonEnum)
))
_sym_db.RegisterMessage(OfflineUserDataJobFailureReasonEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"[email protected]"
] | |
4f5233f3264f0115617603c98efc2cfada27bc57
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/algorithm/expert_algo/1_4.py
|
a1211fd3edb23bc5a84495a1f63b80ed903e29a5
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,980 |
py
|
Count of subsequences of length 4 in form (x, x, x+1, x+1) | Set 2
Given a large number in form of string **str** of size **N** , the task is to
count the subsequence of length 4 whose digit are in the form of **(x, x, x+1,
x+1)**.
**Example:**
> **Input:** str = “1515732322”
> **Output:** 3
> **Explanation:**
> For the given input string str = “1515732322”, there are 3 subsequence
> {1122}, {1122}, and {1122} which are in the given form of (x, x, x+1, x+1).
>
> **Input:** str = “224353”
> **Output:** 1
> **Explanation:**
> For the given input string str = “224353”, there is only 1 subsequence
> possible {2233} in the given form of (x, x, x+1, x+1).
Recommended: Please try your approach on _**_{IDE}_**_ first, before moving on
to the solution.
**Prefix Sum Approach:** Please refer to the Set 1, for the Prefix sum
approach.
**Dynamic Programming Approach:** This problem can be solved using Dynamic
Programming.
We will be using 2 arrays as **count1[][j]** and **count2[][10]** such that
**count1[i][10]** will store the count of consecutive equal element of **digit
j** at current index **i** traversing string from left and **count2[i][j]**
will store the count of consecutive equal element of **digit j** at current
index i traversing string from right. Below are the steps:
* Initialize two count array **count1[][10]** for filling table from left to right and **count2[][10]** for filling table from right to left of input string.
* Traverse input string and fill the both count1 and count2 array.
* Recurrence Relation for **count1[][]** is given by:
> count1[i][j] += count1[i – 1][j]
> where count1[i][j] is the count of two adjacent at index i for digit j
* Recurrence Relation for **count2[][]** is given by:
> count2[i][j] += count1[i + 1][j]
> where count2[i][j] is the count of two adjacent at index i for digit j
* Initialize a variable **ans to 0** that stores the resultant count of stable numbers.
* Traverse the input string and get the count of numbers from **count1[][]** and **count2[][]** array such that difference between number from **count1[][]** and **count2[][]** array is 1 and store it in variable **c1** and **c2.**
* Finally update result(say **ans** ) with **(c1 * ((c2 * (c2 – 1) / 2)))**.
* Print the answer **ans** calculated above.
Below is the implementation of above approach:
## C++
__
__
__
__
__
__
__
// C++ program for the above approach
#include <bits/stdc++.h>
using namespace std;
// Function to count the numbers
int countStableNum(string str, int N)
{
// Array that stores the
// digits from left to right
int count1[N][10];
// Array that stores the
// digits from right to left
int count2[N][10];
// Initially both array store zero
for (int i = 0; i < N; i++)
for (int j = 0; j < 10; j++)
count1[i][j] = count2[i][j] = 0;
// Fill the table for count1 array
for (int i = 0; i < N; i++) {
if (i != 0) {
for (int j = 0; j < 10; j++) {
count1[i][j] += count1[i - 1][j];
}
}
// Update the count of current character
count1[i][str[i] - '0']++;
}
// Fill the table for count2 array
for (int i = N - 1; i >= 0; i--) {
if (i != N - 1) {
for (int j = 0; j < 10; j++) {
count2[i][j] += count2[i + 1][j];
}
}
// Update the count of cuuent character
count2[i][str[i] - '0']++;
}
// Variable that stores the
// count of the numbers
int ans = 0;
// Traverse Input string and get the
// count of digits from count1 and
// count2 array such that difference
// b/w digit is 1 & store it int c1 &c2.;
// And store it in variable c1 and c2
for (int i = 1; i < N - 1; i++) {
if (str[i] == '9')
continue;
int c1 = count1[i - 1][str[i] - '0'];
int c2 = count2[i + 1][str[i] - '0' + 1];
if (c2 == 0)
continue;
// Update the ans
ans = (ans
+ (c1 * ((c2 * (c2 - 1) / 2))));
}
// Return the final count
return ans;
}
// Driver Code
int main()
{
// Given String
string str = "224353";
int N = str.length();
// Function Call
cout << countStableNum(str, N);
return 0;
}
---
__
__
## Java
__
__
__
__
__
__
__
// Java program for the above approach
import java.io.*;
class GFG{
// Function to count the numbers
static int countStableNum(String str, int N)
{
// Array that stores the
// digits from left to right
int count1[][] = new int[N][10];
// Array that stores the
// digits from right to left
int count2[][] = new int[N][10];
// Initially both array store zero
for(int i = 0; i < N; i++)
for(int j = 0; j < 10; j++)
count1[i][j] = count2[i][j] = 0;
// Fill the table for count1 array
for(int i = 0; i < N; i++)
{
if (i != 0)
{
for(int j = 0; j < 10; j++)
{
count1[i][j] += count1[i - 1][j];
}
}
// Update the count of current character
count1[i][str.charAt(i) - '0']++;
}
// Fill the table for count2 array
for(int i = N - 1; i >= 0; i--)
{
if (i != N - 1)
{
for(int j = 0; j < 10; j++)
{
count2[i][j] += count2[i + 1][j];
}
}
// Update the count of cuuent character
count2[i][str.charAt(i) - '0']++;
}
// Variable that stores the
// count of the numbers
int ans = 0;
// Traverse Input string and get the
// count of digits from count1 and
// count2 array such that difference
// b/w digit is 1 & store it int c1 &c2.;
// And store it in variable c1 and c2
for(int i = 1; i < N - 1; i++)
{
if (str.charAt(i) == '9')
continue;
int c1 = count1[i - 1][str.charAt(i) - '0'];
int c2 = count2[i + 1][str.charAt(i) - '0' + 1];
if (c2 == 0)
continue;
// Update the ans
ans = (ans + (c1 * ((c2 * (c2 - 1) / 2))));
}
// Return the final count
return ans;
}
// Driver code
public static void main(String[] args)
{
// Given String
String str = "224353";
int N = str.length();
// Function call
System.out.println(countStableNum(str, N));
}
}
// This code is contributed by Pratima Pandey
---
__
__
## Python3
__
__
__
__
__
__
__
# Python3 program for the above approach
# Function to count the numbers
def countStableNum(Str, N):
# Array that stores the
# digits from left to right
count1 = [[0 for j in range(10)]
for i in range(N)]
# Array that stores the
# digits from right to left
count2 = [[0 for j in range(10)]
for i in range(N)]
# Initially both array store zero
for i in range(N):
for j in range(10):
count1[i][j], count2[i][j] = 0, 0
# Fill the table for count1 array
for i in range(N):
if (i != 0):
for j in range(10):
count1[i][j] = (count1[i][j] +
count1[i - 1][j])
# Update the count of current character
count1[i][ord(Str[i]) - ord('0')] += 1
# Fill the table for count2 array
for i in range(N - 1, -1, -1):
if (i != N - 1):
for j in range(10):
count2[i][j] += count2[i + 1][j]
# Update the count of cuuent character
count2[i][ord(Str[i]) -
ord('0')] = count2[i][ord(Str[i]) -
ord('0')] + 1
# Variable that stores the
# count of the numbers
ans = 0
# Traverse Input string and get the
# count of digits from count1 and
# count2 array such that difference
# b/w digit is 1 & store it int c1 &c2.;
# And store it in variable c1 and c2
for i in range(1, N - 1):
if (Str[i] == '9'):
continue
c1 = count1[i - 1][ord(Str[i]) - ord('0')]
c2 = count2[i + 1][ord(Str[i]) - ord('0') +
1]
if (c2 == 0):
continue
# Update the ans
ans = (ans + (c1 * ((c2 * (c2 - 1) // 2))))
# Return the final count
return ans
# Driver code
# Given String
Str = "224353"
N = len(Str)
# Function call
print(countStableNum(Str, N))
# This code is contributed by divyeshrabadiya07
---
__
__
## C#
__
__
__
__
__
__
__
// C# program for the above approach
using System;
class GFG{
// Function to count the numbers
static int countStableNum(String str, int N)
{
// Array that stores the
// digits from left to right
int [,]count1 = new int[N, 10];
// Array that stores the
// digits from right to left
int [,]count2 = new int[N, 10];
// Initially both array store zero
for(int i = 0; i < N; i++)
for(int j = 0; j < 10; j++)
count1[i, j] = count2[i, j] = 0;
// Fill the table for count1 array
for(int i = 0; i < N; i++)
{
if (i != 0)
{
for(int j = 0; j < 10; j++)
{
count1[i, j] += count1[i - 1, j];
}
}
// Update the count of current character
count1[i, str[i] - '0']++;
}
// Fill the table for count2 array
for(int i = N - 1; i >= 0; i--)
{
if (i != N - 1)
{
for(int j = 0; j < 10; j++)
{
count2[i, j] += count2[i + 1, j];
}
}
// Update the count of cuuent character
count2[i, str[i] - '0']++;
}
// Variable that stores the
// count of the numbers
int ans = 0;
// Traverse Input string and get the
// count of digits from count1 and
// count2 array such that difference
// b/w digit is 1 & store it int c1 &c2.;
// And store it in variable c1 and c2
for(int i = 1; i < N - 1; i++)
{
if (str[i] == '9')
continue;
int c1 = count1[i - 1, str[i] - '0'];
int c2 = count2[i + 1, str[i] - '0' + 1];
if (c2 == 0)
continue;
// Update the ans
ans = (ans + (c1 * ((c2 * (c2 - 1) / 2))));
}
// Return the readonly count
return ans;
}
// Driver code
public static void Main(String[] args)
{
// Given String
String str = "224353";
int N = str.Length;
// Function call
Console.WriteLine(countStableNum(str, N));
}
}
// This code is contributed by Amit Katiyar
---
__
__
**Output:**
1
**Time Complexity:** _O(N)_
**Auxiliary Space Complexity:** _O(N)_
Attention reader! Don’t stop learning now. Get hold of all the important DSA
concepts with the **DSA Self Paced Course** at a student-friendly price and
become industry ready. To complete your preparation from learning a language
to DS Algo and many more, please refer **Complete Interview Preparation
Course** **.**
My Personal Notes _arrow_drop_up_
Save
|
[
"[email protected]"
] | |
a12dfe975bc42d0d73e84e9188862ac2be40e096
|
5593b35f326748f18053e7ea042c98fe6b70a850
|
/tqt/__version__.py
|
9e07edd14c5281467200b0a4457633f34448cb95
|
[
"BSD-3-Clause"
] |
permissive
|
sicdl/TQT
|
7dfe3bce2bb5dace9a467945512e65525a0c3be9
|
27b73fcf27ddfb67cd28f6ed27e49341f27c9f16
|
refs/heads/main
| 2023-04-14T18:28:23.224689 | 2021-04-22T14:46:46 | 2021-04-22T14:46:46 | 362,503,682 | 0 | 0 |
BSD-3-Clause
| 2021-04-28T14:45:14 | 2021-04-28T14:45:13 | null |
UTF-8
|
Python
| false | false | 238 |
py
|
__title__ = 'tqt'
__description__ = 'torch implement of TQT'
__version__ = '1.0.1'
__author__ = 'Pannenets F'
__author_email__ = '[email protected]'
__license__ = 'BSD-3-Clause License'
__url__ = 'https://github.com/PannenetsF/TQT'
|
[
"[email protected]"
] | |
c1c728eb324e6d3190e3c0541dabc42773348998
|
f58936d3d01b014131b9038616d6f3573dd94f44
|
/preprocessing_tools/extended_ner/food/api/foodpanda_city.py
|
14db425c4522951eefdf3a40d8de32d04fbc493b
|
[] |
no_license
|
niteshsurtani/Personal_Assistant
|
69f591d03200ad9f8e66533f6968c7fb2a1d8667
|
363a65b3e3438b5824d8abb6caee53a70eefa024
|
refs/heads/master
| 2022-11-20T00:13:09.289399 | 2017-10-13T17:32:00 | 2017-10-13T17:32:00 | 106,283,072 | 0 | 1 | null | 2020-07-23T18:18:29 | 2017-10-09T12:50:48 |
Python
|
UTF-8
|
Python
| false | false | 2,007 |
py
|
from mysql.connector import MySQLConnection, Error
from dbconfig import read_db_config
def insertOneCity(city_id, name, data_resource):
query = "INSERT INTO city(city_id, name, data_resource) " \
"VALUES(%s, %s, %s)"
args = (city_id, name, data_resource)
try:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute(query, args)
conn.commit()
except Error as error:
print error
finally:
cursor.close()
conn.close()
print "CITY DATA INSERTED!!!"
def insertManyCities(city_info):
query = "INSERT INTO city(city_id, name, data_resource) " \
"VALUES(%s, %s, %s)"
try:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.executemany(query, city_info)
conn.commit()
except Error as error:
print error
finally:
cursor.close()
conn.close()
print "CITY DATA INSERTED!!!"
def findCityById(id):
try:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute("SELECT * FROM city WHERE city_id = " + id)
row = cursor.fetchone()
print row
except Error as error:
print error
finally:
cursor.close()
conn.close()
def findAllCities():
rows = []
try:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute("SELECT name FROM city")
rows = cursor.fetchall()
# for row in rows:
# print row
except Error as error:
print error
finally:
cursor.close()
conn.close()
return rows
def findCityByName(query):
rows = []
try:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
if query:
query = query + "%"
command = "SELECT * FROM city WHERE name LIKE '" + query +"'"
#print command
cursor.execute(command)
rows = cursor.fetchall()
#for row in rows:
# print row
except Error as error:
print error
finally:
cursor.close()
conn.close()
return rows
|
[
"[email protected]"
] | |
ce0fbe43952534a1980d29bcfbbed01aa5c538c6
|
6c137e70bb6b1b618fbbceddaeb74416d387520f
|
/lantz/lantz/drivers/examples/foreign_example.py
|
682536c5bd86a50a87407085d922a159115462a4
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
zhong-lab/code
|
fe497c75662f8c3b7ab3c01e7e351bff6d5e8d15
|
b810362e06b44387f0768353c602ec5d29b551a2
|
refs/heads/master
| 2023-01-28T09:46:01.448833 | 2022-06-12T22:53:47 | 2022-06-12T22:53:47 | 184,670,765 | 2 | 7 |
BSD-2-Clause
| 2022-12-08T21:46:15 | 2019-05-02T23:37:39 |
Python
|
UTF-8
|
Python
| false | false | 2,046 |
py
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.example.foreign_example
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Foreign library example.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import ctypes as ct
from lantz import Feat, Action, DictFeat
from lantz.foreign import LibraryDriver
from lantz.errors import InstrumentError
class ForeignTemplate(LibraryDriver):
"""Template for Drivers using a library.
"""
LIBRARY_NAME = 'mylibrary.dll'
def _return_handler(self, func_name, ret_value):
if ret_value != 0:
raise InstrumentError('{} ({})'.format(ret_value, _ERRORS[ret_value]))
return ret_value
@Feat()
def idn(self):
return self.query('*IDN?')
@Feat(units='V', limits=(10,))
def amplitude(self):
"""Amplitude.
"""
return float(self.query('?AMP'))
@amplitude.setter
def amplitude(self, value):
self.query('!AMP {:.1f}'.format(value))
@DictFeat(values={True: '1', False: '0'}, keys=list(range(1,9)))
def dout(self, key):
"""Digital output state.
"""
return self.query('?DOU {}'.format(key))
@dout.setter
def dout(self, key, value):
self.query('!DOU {} {}'.format(key, value))
@Action()
def do_something(self):
"""Help for do_something
"""
return self.lib.something()
if __name__ == '__main__':
import argparse
import lantz.log
parser = argparse.ArgumentParser(description='Test Kentech HRI')
parser.add_argument('-i', '--interactive', action='store_true',
default=False, help='Show interactive GUI')
args = parser.parse_args()
lantz.log.log_to_socket(lantz.log.DEBUG)
with ForeignTemplate() as inst:
if args.interactive:
from lantz.ui.app import start_test_app
start_test_app(inst)
else:
# Add your test code here
print('Non interactive mode')
|
[
"none"
] |
none
|
868777082c196ad7aceaa2c788c04575c894c324
|
7af848e1aab6f1c4362fd7588c80efec566ef9f3
|
/mlinsights/mlmodel/classification_kmeans.py
|
0417e2bffb55232f10b10ab4e229a5d36b8595fe
|
[
"MIT"
] |
permissive
|
alexisjihyeross/mlinsights
|
2e8873645c3e4883aa4ff422b0543fba36712109
|
74a834714a96e2e78b8dfc3b750a9d605df14834
|
refs/heads/master
| 2021-04-14T23:05:22.115689 | 2020-03-11T11:43:35 | 2020-03-11T11:43:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,360 |
py
|
"""
@file
@brief Combines a *k-means* followed by a predictor.
"""
import inspect
import numpy
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
from sklearn.base import BaseEstimator, ClassifierMixin, clone
class ClassifierAfterKMeans(BaseEstimator, ClassifierMixin):
"""
Applies a *k-means* (see :epkg:`sklearn:cluster:KMeans`)
for each class, then adds the distance to each cluster
as a feature for a classifier.
See notebook :ref:`logisticregressionclusteringrst`.
"""
def __init__(self, estimator=None, clus=None, **kwargs):
"""
@param estimator :epkg:`sklearn:linear_model:LogisiticRegression`
by default
@param clus clustering applied on each class,
by default k-means with two classes
@param kwargs sent to :meth:`set_params
<mlinsights.mlmodel.classification_kmeans.ClassifierAfterKMeans.set_params>`,
see its documentation to understand how to specify parameters
"""
ClassifierMixin.__init__(self)
BaseEstimator.__init__(self)
if estimator is None:
estimator = LogisticRegression()
if clus is None:
clus = KMeans(n_clusters=2)
self.estimator = estimator
self.clus = clus
if not hasattr(clus, "transform"):
raise AttributeError("clus does not have a transform method.")
if kwargs:
self.set_params(**kwargs)
def fit(self, X, y, sample_weight=None):
"""
Runs a *k-means* on each class
then trains a classifier on the
extended set of features.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
Attributes
----------
labels_: dictionary of clustering models
clus_: array of clustering models
estimator_: trained classifier
"""
classes = set(y)
self.labels_ = list(sorted(classes))
self.clus_ = {}
sig = inspect.signature(self.clus.fit)
for cl in classes:
m = clone(self.clus)
Xcl = X[y == cl]
if sample_weight is None or 'sample_weight' not in sig.parameters:
w = None
m.fit(Xcl)
else:
w = sample_weight[y == cl]
m.fit(Xcl, sample_weight=w)
self.clus_[cl] = m
extX = self.transform_features(X)
self.estimator_ = self.estimator.fit(
extX, y, sample_weight=sample_weight)
return self
def transform_features(self, X):
"""
Applies all the clustering objects
on every observations and extends the list of
features.
@param X features
@return extended features
"""
preds = []
for _, v in sorted(self.clus_.items()):
p = v.transform(X)
preds.append(p)
return numpy.hstack(preds)
def predict(self, X):
"""
Runs the predictions.
"""
extX = self.transform_features(X)
return self.estimator.predict(extX)
def predict_proba(self, X):
"""
Converts predictions into probabilities.
"""
extX = self.transform_features(X)
return self.estimator.predict_proba(extX)
def decision_function(self, X):
"""
Calls *decision_function*.
"""
extX = self.transform_features(X)
return self.estimator.decision_function(extX)
def get_params(self, deep=True):
"""
Returns the parameters for both
the clustering and the classifier.
@param deep unused here
@return dict
:meth:`set_params <mlinsights.mlmodel.classification_kmeans.ClassifierAfterKMeans.set_params>`
describes the pattern parameters names follow.
"""
res = {}
for k, v in self.clus.get_params().items():
res["c_" + k] = v
for k, v in self.estimator.get_params().items():
res["e_" + k] = v
return res
def set_params(self, **values):
"""
Sets the parameters before training.
Every parameter prefixed by ``'e_'`` is an estimator
parameter, every parameter prefixed by ``'c_'`` is for
the :epkg:`sklearn:cluster:KMeans`.
@param values valeurs
@return dict
"""
pc, pe = {}, {}
for k, v in values.items():
if k.startswith('e_'):
pe[k[2:]] = v
elif k.startswith('c_'):
pc[k[2:]] = v
else:
raise ValueError("Unexpected parameter name '{0}'".format(k))
self.clus.set_params(**pc)
self.estimator.set_params(**pe)
|
[
"[email protected]"
] | |
de847436be859ecbfdcdf2f2eda99f6677a367d1
|
8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a
|
/XML/XML_to_python_objects__lxml.objectify__examples/from_text.py
|
6b1cf58b002108f1d034790570a56300f1bfc679
|
[
"CC-BY-4.0"
] |
permissive
|
stepik/SimplePyScripts
|
01092eb1b2c1c33756427abb2debbd0c0abf533f
|
3259d88cb58b650549080d6f63b15910ae7e4779
|
refs/heads/master
| 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,372 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://lxml.de/objectify.html
from datetime import datetime
# pip install lxml
from lxml import objectify
text = """\
<Response>
<Data>
<Report>
<LeaderList>
<Leader ActualDate="2009-12-01" FIO="Шxxxxxxx Аxxxxx Шxxxxxx" INN="5xxxxxxxxx" Position="генеральный директор"/>
<Leader ActualDate="2008-10-07" FIO="Вxxxxxx Аxxxxxx Аxxxxxxx" Position="генеральный директор"/>
<Leader ActualDate="2007-04-17" FIO="Оxxxxxxxx Сxxxxx Вxxxxxxx" Position="генеральный директор"/>
<Leader ActualDate="2004-12-06" FIO="Кxxxxxxx Аxxxxxxx Нxxxxxx" Position="генеральный директор"/>
</LeaderList>
</Report>
</Data>
<ResultInfo ExecutionTime="140" ResultType="True"/>
</Response>
"""
def to_date(date_str):
return datetime.strptime(date_str, '%Y-%m-%d')
root = objectify.fromstring(text)
items = root.Data.Report.LeaderList.Leader
leader = max(items, key=lambda x: to_date(x.attrib['ActualDate']))
print(leader.attrib['FIO']) # Шxxxxxxx Аxxxxx Шxxxxxx
print(leader.attrib['ActualDate']) # 2009-12-01
print(leader.attrib['Position']) # генеральный директор
|
[
"[email protected]"
] | |
f2fdd4d9d42fb5073ee6fa1301c3897d15d7f1b5
|
d833487ba7a78e900ce535d60c123986ab5ebfee
|
/Linked Lists/6. Shift Linked List/Solution.py
|
23371037e6dda9955911381ecedcd6a0e8bca809
|
[] |
no_license
|
ceteongvanness/Algorithm-Python
|
b71af3bca4d2573f4a0d18dc991012b996746d6a
|
0151e7bac3f4032bbc76caa209bb63cdfa8a581e
|
refs/heads/master
| 2023-01-31T04:17:22.425719 | 2020-12-14T05:21:15 | 2020-12-14T05:21:15 | 263,880,376 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 840 |
py
|
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
# O(n + m) time | O(1) space - where n is the number of nodes in the first
def shiftLinkedList(head, k):
# O(n) time | O(1) space - where n is the number of nodes in the Linked List
listLength = 1
listTail = head
while listTail.next is not None:
listTail = listTail.next
listLength += 1
offset = abs(k) % listLength
if offset == 0:
return head
newTailPosition = listLength - offset if k > 0 else offset
newTail = head
for i in range(1, newTailPosition):
newTail = newTail.next
newHead = newTail.next
newTail.next = None
listTail.next = head
return newHead
# This is the class of the input linked list.
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
|
[
"[email protected]"
] | |
ad6c6f5bcb9bf132c2d476669c31b7aa91444dc5
|
e00d41c9f4045b6c6f36c0494f92cad2bec771e2
|
/server/database/mariadb/comar/service.py
|
9552b147c9c78b884a728527fbac16017ea92eb4
|
[] |
no_license
|
pisilinux/main
|
c40093a5ec9275c771eb5fb47a323e308440efef
|
bfe45a2e84ea43608e77fb9ffad1bf9850048f02
|
refs/heads/master
| 2023-08-19T00:17:14.685830 | 2023-08-18T20:06:02 | 2023-08-18T20:06:02 | 37,426,721 | 94 | 295 | null | 2023-09-14T08:22:22 | 2015-06-14T19:38:36 |
Python
|
UTF-8
|
Python
| false | false | 698 |
py
|
# -*- coding: utf-8 -*-
from comar.service import *
import os
serviceType="server"
serviceDesc=_({"en": "MariaDB Database Server",
"tr": "MariaDB Veritabanı Sunucusu"})
PIDFILE="/run/mysqld/mysqld.pid"
DAEMON="/usr/bin/mysqld"
@synchronized
def start():
startService(command=DAEMON,
pidfile=PIDFILE,
detach=True,
donotify=True)
#os.system("pidof mariadb_server + /usr/bin/mysqld > /run/mysqld/mysqld.pid")
@synchronized
def stop():
stopService(pidfile=PIDFILE,
donotify=True)
try:
os.unlink(PIDFILE)
except OSError:
pass
def status():
return isServiceRunning(PIDFILE)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.