hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
273f5601e7028883ce023d07d960ba6fa3bb289d | 1,716 | py | Python | M-SPRING/heuristic/shortest_paths.py | CN-UPB/SPRING | 1cb74919689e832987cb2c9b490eec7f09a64f52 | [
"Apache-2.0"
]
| 3 | 2019-09-27T08:07:11.000Z | 2021-11-19T11:27:39.000Z | M-SPRING/heuristic/shortest_paths.py | CN-UPB/SPRING | 1cb74919689e832987cb2c9b490eec7f09a64f52 | [
"Apache-2.0"
]
| null | null | null | M-SPRING/heuristic/shortest_paths.py | CN-UPB/SPRING | 1cb74919689e832987cb2c9b490eec7f09a64f52 | [
"Apache-2.0"
]
| null | null | null | import math
# return the delay of the specified path (= list of nodes)
def path_delay(links, path):
delay = 0
# go along nodes of the path and increment delay for each traversed link
for i in range(len(path) - 1):
# skip connections on same node without a link (both inst at same node)
if path[i] != path[i + 1]:
delay += links.delay[(path[i], path[i+1])]
return delay
# floyd-warshall algorithm
def all_pairs_shortest_paths(nodes, links):
shortest_paths = {} # key: (src, dest), value: (path, weight, delay)
# initialize shortest paths
for v1 in nodes.ids:
for v2 in nodes.ids:
# path from node to itself has weight 0
if v1 == v2:
shortest_paths[(v1, v2)] = ([v1, v2], 0)
# path via direct link
elif (v1, v2) in links.ids:
shortest_paths[(v1, v2)] = ([v1, v2], links.weight((v1, v2)))
# other paths are initialized with infinite weight
else:
shortest_paths[(v1, v2)] = ([v1, v2], math.inf)
# indirect paths via intermediate node k
for k in nodes.ids:
for v1 in nodes.ids:
for v2 in nodes.ids:
# use k if it reduces the path weight
if shortest_paths[(v1, v2)][1] > shortest_paths[(v1, k)][1] + shortest_paths[(k, v2)][1]:
# new path via intermediate node k (when adding the two paths, k is excluded from the second path)
new_path = shortest_paths[(v1, k)][0] + shortest_paths[(k, v2)][0][1:]
new_weight = shortest_paths[(v1, k)][1] + shortest_paths[(k, v2)][1]
shortest_paths[(v1, v2)] = (new_path, new_weight)
# update dictionary to include delay for each path
shortest_paths = {k:(v[0], v[1], path_delay(links, v[0])) for k,v in shortest_paths.items()}
return shortest_paths
| 36.510638 | 104 | 0.648601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 651 | 0.379371 |
273f6a820dd8f3aeae5864ba60eb32fd5d0541ad | 2,398 | py | Python | testing/statistic.py | methk/RadixDLT-IoTSimulation | 886ba589c8e7be08c95cf3636438c10e97e16752 | [
"MIT"
]
| null | null | null | testing/statistic.py | methk/RadixDLT-IoTSimulation | 886ba589c8e7be08c95cf3636438c10e97e16752 | [
"MIT"
]
| 3 | 2021-03-09T21:18:22.000Z | 2021-09-02T01:05:44.000Z | testing/statistic.py | methk/RadixDLT-IoTSimulation | 886ba589c8e7be08c95cf3636438c10e97e16752 | [
"MIT"
]
| 1 | 2022-02-18T14:51:46.000Z | 2022-02-18T14:51:46.000Z | import os
import csv
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
startingDir = 'data/output'
plotTestNumber = 12
totalRequests = 447
singleTestData = []
allLatencies = []
allErrors = 0
errorsData = []
tipsData = []
tipsDataSTD = []
powsData = []
powsDataSTD = []
plotData = {}
path = os.walk(startingDir)
next(path)
for directory in path:
tempTestData = {
'name': directory[0].split('/')[-1],
'tipsValue': [],
'powValue': [],
'errors': 0
}
for csvFilename in directory[2]:
with open(directory[0]+'/'+csvFilename, 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
srt = int(row[1])
tips = int(row[2])
fin = int(row[3])
if fin is -1:
tempTestData['errors'] += 1
allErrors += 1
else:
tipsValue = tips - srt
powValue = fin - tips
tempTestData['powValue'].append(powValue)
tempTestData['tipsValue'].append(tipsValue)
allLatencies.append(tipsValue+powValue)
latence = fin - srt
if latence in plotData.keys():
plotData[latence] += 1
else:
plotData[latence] = 1
csvFile.close()
errorsNotWritten = totalRequests - \
len(tempTestData['powValue']) - tempTestData['errors']
tempTestData['errors'] += errorsNotWritten
allErrors += errorsNotWritten
singleTestData.append(tempTestData)
print('Avg= ' + str(round(np.mean(allLatencies), 4)))
print('Err= ' + str(100 * round((allErrors / (totalRequests * len(singleTestData))), 4)) + '%')
print(mean_confidence_interval(allLatencies))
fig = plt.figure()
ax = plt.axes()
sortedData = {}
for k, v in sorted(plotData.items()):
sortedData[k] = v
mean = int(round(np.mean(allLatencies), 4))
plt.plot(range(len(sortedData)), list(sortedData.values()), color='#b42526')
plt.axvline(x = mean, color='#252525')
plt.show()
| 27.25 | 95 | 0.565888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.069224 |
273feec7e2e1888ae2ab4fb11f3421c980ee0353 | 4,844 | py | Python | skutil/preprocessing/tests/test_balance.py | tgsmith61591/pynorm | 672e353a721036791e1e32250879c3276961e05a | [
"BSD-3-Clause"
]
| 38 | 2016-08-31T19:24:13.000Z | 2021-06-28T17:10:20.000Z | skutil/preprocessing/tests/test_balance.py | tgsmith61591/pynorm | 672e353a721036791e1e32250879c3276961e05a | [
"BSD-3-Clause"
]
| 42 | 2016-06-20T19:07:21.000Z | 2017-10-29T20:53:11.000Z | skutil/preprocessing/tests/test_balance.py | tgsmith61591/pynorm | 672e353a721036791e1e32250879c3276961e05a | [
"BSD-3-Clause"
]
| 17 | 2016-06-27T18:07:53.000Z | 2019-04-09T12:33:59.000Z | from __future__ import print_function
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from skutil.preprocessing import *
from skutil.preprocessing.balance import _BaseBalancer
from numpy.testing import assert_array_equal
from skutil.testing import assert_fails
import warnings
# Def data for testing
iris = load_iris()
X = pd.DataFrame(data=iris.data, columns=iris.feature_names)
X['target'] = iris.target
def _get_three_results(sampler):
x = X.iloc[:60] # 50 zeros, 10 ones
y = pd.concat([x, X.iloc[140:150]])
a, b = sampler.balance(x), sampler.balance(y)
sampler.ratio = 0.2
return a, b, sampler.balance(y)
def test_oversample():
a, b, c = _get_three_results(OversamplingClassBalancer(y='target', ratio=0.5))
expected_1_ct = 25
cts = a.target.value_counts()
assert cts[1] == expected_1_ct
cts = b.target.value_counts()
assert cts[1] == expected_1_ct
assert cts[2] == expected_1_ct
expected_2_ct = 10
cts = c.target.value_counts()
assert cts[1] == expected_2_ct
assert cts[2] == expected_2_ct
# test what happens when non-string passed as col name
failed = False
try:
OversamplingClassBalancer(y=1).balance(X)
except ValueError:
failed = True
assert failed
# test with too many classes
Y = X.copy()
Y['class'] = np.arange(Y.shape[0])
failed = False
try:
OversamplingClassBalancer(y='class').balance(Y)
except ValueError:
failed = True
assert failed
# test with one class
Y['class'] = np.zeros(Y.shape[0])
failed = False
try:
OversamplingClassBalancer(y='class').balance(Y)
except ValueError:
failed = True
assert failed
# test with bad ratio
for r in [0.0, 1.1, 'string']:
failed = False
try:
OversamplingClassBalancer(y='target', ratio=r).balance(X)
except ValueError:
failed = True
assert failed
# test where two classes are equally represented, and one has only a few
Y = X.iloc[:105]
d = OversamplingClassBalancer(y='target', ratio=1.0).balance(Y)
assert d.shape[0] == 150
cts = d.target.value_counts()
assert cts[0] == 50
assert cts[1] == 50
assert cts[2] == 50
def test_oversample_warning():
x = np.array([
[1, 2, 3],
[1, 2, 3],
[1, 2, 4]
])
df = pd.DataFrame.from_records(data=x, columns=['a', 'b', 'c'])
# catch the warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
OversamplingClassBalancer(y='c', ratio=1.0).balance(df)
assert len(w) == 1
def test_smote_error():
x = np.array([
[1, 2, 3],
[1, 2, 3],
[1, 2, 4]
])
df = pd.DataFrame.from_records(data=x, columns=['a', 'b', 'c'])
# this fails because we can't perform smote on single observation (obs='4', in this case)
assert_fails(SMOTEClassBalancer(y='c', ratio=1.0).balance, ValueError, df)
def test_smote():
a, b, c = _get_three_results(SMOTEClassBalancer(y='target', ratio=0.5))
expected_1_ct = 25
cts = a.target.value_counts()
assert cts[1] == expected_1_ct
cts = b.target.value_counts()
assert cts[1] == expected_1_ct
assert cts[2] == expected_1_ct
expected_2_ct = 10
cts = c.target.value_counts()
assert cts[1] == expected_2_ct
assert cts[2] == expected_2_ct
def test_undersample():
# since all classes are equal, should be no change here
b = UndersamplingClassBalancer(y='target').balance(X)
assert b.shape[0] == X.shape[0]
x = X.iloc[:60] # 50 zeros, 10 ones
b = UndersamplingClassBalancer(y='target', ratio=0.5).balance(x)
assert b.shape[0] == 30
cts = b.target.value_counts()
assert cts[0] == 20
assert cts[1] == 10
b = UndersamplingClassBalancer(y='target', ratio=0.25).balance(x)
assert b.shape[0] == 50
cts = b.target.value_counts()
assert cts[0] == 40
assert cts[1] == 10
def test_unneeded():
for sample_class in (UndersamplingClassBalancer,
SMOTEClassBalancer,
OversamplingClassBalancer):
sampler = sample_class(y='target', ratio=0.2, shuffle=False)
sampled = sampler.balance(X)
# assert array the same
assert_array_equal(X.index.values, sampled.index.values)
assert sampled.shape[0] == X.shape[0]
def test_superclass_not_implemented():
# anon balancer
class AnonBalancer(_BaseBalancer):
def __init__(self, y=None, ratio=0.2, as_df=True):
super(AnonBalancer, self).__init__(ratio, y, as_df)
def balance(self, X):
return super(AnonBalancer, self).balance(X)
assert_fails(AnonBalancer().balance, NotImplementedError, X)
| 27.367232 | 93 | 0.635632 | 244 | 0.050372 | 0 | 0 | 0 | 0 | 0 | 0 | 597 | 0.123245 |
2740e363aa1f94977ef475388c48f61555bf5a1a | 754 | py | Python | hard-gists/5017218/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/5017218/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/5017218/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | #!/usr/bin/env python
from Crypto.Cipher import AES
from Crypto.Util.strxor import strxor
from binascii import hexlify
K = '0123456789abcdef'
cipher = AES.new(K, AES.MODE_ECB)
# Original Message
M1 = K
M2 = K
Cm0 = cipher.encrypt('\0' * AES.block_size)
Cm1 = cipher.encrypt(strxor(Cm0,M1))
Tm = Cm2 = cipher.encrypt(strxor(Cm1,M2))
N1 = 'iheiowehfiowehfw'
# Inject second message after the first message
Cx0 = cipher.encrypt('\0' * AES.block_size)
Cx1 = cipher.encrypt(strxor(Cx0,M1))
Cx2 = cipher.encrypt(strxor(Cx1,N1))
# X needs to *encrypt* to the same value as Cm1
X = strxor(cipher.decrypt(Cx1),Cx2)
Cx3 = cipher.encrypt(strxor(Cx2,X))
Tx = Cx4 = cipher.encrypt(strxor(Cx3,M2))
print "Tm = '%s'" % hexlify(Tm)
print "Tx = '%s'" % hexlify(Tx)
| 25.133333 | 47 | 0.709549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.263926 |
274247c350250c602de94ec641a4010f931234e4 | 3,540 | py | Python | options/running_options.py | kwshh/ImageDeconvlution | 561468463372a5727b553efa0330fc75901e29fc | [
"MIT"
]
| 25 | 2019-05-10T13:51:25.000Z | 2021-10-13T01:35:43.000Z | options/running_options.py | kwshh/ImageDeconvlution | 561468463372a5727b553efa0330fc75901e29fc | [
"MIT"
]
| 8 | 2019-05-10T13:51:07.000Z | 2021-06-03T07:13:28.000Z | options/running_options.py | kwshh/ImageDeconvlution | 561468463372a5727b553efa0330fc75901e29fc | [
"MIT"
]
| 7 | 2020-08-15T09:16:11.000Z | 2021-07-06T21:54:20.000Z | import argparse
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class Options():
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self):
parser = argparse.ArgumentParser()
parser.add_argument('--ModelName', help='Model Name', default='RGDNbasic')
parser.add_argument('--UseCUDA', help='Use CUDA?', type=str2bool, nargs='?', const=True, default=True)
parser.add_argument('--UseGradAdj',
help='Use grad adj module?',
type=str2bool,
nargs='?',
const=True,
default=True)
parser.add_argument('--UseReg',
help='Use Reg?',
type=str2bool,
nargs='?',
const=True,
default=True)
parser.add_argument('--UseGradScaler',
help='Add the grad scaler?',
type=str2bool,
nargs='?',
const=True,
default=True)
parser.add_argument('--StepNum',
help='maximum number of steps',
type=int,
nargs='?',
const=True,
default=40)
parser.add_argument('--StopEpsilon',
help='stopping condition',
type=float,
# default=1e-7)
default=float("inf"))
# CropSize =0 when no padding applied on y in advance; -1 for padding with kernel size in advance.
parser.add_argument('--CropSize', help='crop boundaies of results', type=int, default=-1)
parser.add_argument('--ImgPad', help='pad image before processing', type=str2bool, default=False)
parser.add_argument('--DataPath', help='DataPath', type=str, default='../rgdn_dataset/')
parser.add_argument('--OutPath', help='Path for output', type=str, default='../rgdn_results/')
parser.add_argument('--TrainedModelPath', help='path of trained model', type=str, default='./rgdn.tr')
parser.add_argument('--Suffix', type=str, help='Manually set suffix', default='Debug')
self.initialized = True
self.parser = parser
return parser
def print_options(self, opt):
# This function is adapted from 'cycleGAN' project.
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
self.message = message
def parse(self, is_print):
parser = self.initialize()
opt = parser.parse_args()
if(is_print):
self.print_options(opt)
self.opt = opt
return self.opt
| 41.647059 | 110 | 0.487288 | 3,277 | 0.925706 | 0 | 0 | 0 | 0 | 0 | 0 | 924 | 0.261017 |
2743e4857e6a05d46ea22a4c0d0bcac87b1eae86 | 982 | py | Python | python/scanr_doiresolver-0.10/scanr_doiresolver/main.py | dataesr/scanr-backend | 39681be69b9a96b4a07b9410754c897cd5b65c24 | [
"MIT"
]
| null | null | null | python/scanr_doiresolver-0.10/scanr_doiresolver/main.py | dataesr/scanr-backend | 39681be69b9a96b4a07b9410754c897cd5b65c24 | [
"MIT"
]
| null | null | null | python/scanr_doiresolver-0.10/scanr_doiresolver/main.py | dataesr/scanr-backend | 39681be69b9a96b4a07b9410754c897cd5b65c24 | [
"MIT"
]
| null | null | null | import json
from companies_plugin import extractor
from companies_plugin.utils import add_logger
from scanr_doiresolver import LIB_PATH
from scanr_doiresolver.resolver import resolve_publications
@add_logger
class Extractor(extractor.Extractor):
def extract(self, headers, properties, message):
"""
The message is only {"url": ""} as input
Output is {"url": "", "publications": ["", ""]}
"""
reply_to = properties["reply_to"]
msg = json.loads(message)
return json.dumps({
"id": msg.get("id"),
"url": msg.get("url"),
"publications": resolve_publications(msg.get("dois", []), msg.get("references", []))
}), reply_to
if __name__ == "__main__":
m = extractor.Main(batch_name="PUBLICATION_RESOLVER",
queue_name="PUBLICATION_RESOLVER",
extractor_class=Extractor,
mod_path=LIB_PATH)
m.launch()
| 30.6875 | 96 | 0.602851 | 518 | 0.527495 | 0 | 0 | 530 | 0.539715 | 0 | 0 | 242 | 0.246436 |
27453b9db08872e055996422f63acffb64350e41 | 361 | py | Python | examples/03additionals/legacy_score.py | dotness/swagger-marshmallow-codegen | 62938d780672d754431d50bde3eae04abefb64f1 | [
"MIT"
]
| null | null | null | examples/03additionals/legacy_score.py | dotness/swagger-marshmallow-codegen | 62938d780672d754431d50bde3eae04abefb64f1 | [
"MIT"
]
| null | null | null | examples/03additionals/legacy_score.py | dotness/swagger-marshmallow-codegen | 62938d780672d754431d50bde3eae04abefb64f1 | [
"MIT"
]
| null | null | null | # -*- coding:utf-8 -*-
# this is auto-generated by swagger-marshmallow-codegen
from swagger_marshmallow_codegen.schema.legacy import (
AdditionalPropertiesSchema,
LegacySchema
)
from marshmallow import fields
class Score(AdditionalPropertiesSchema):
name = fields.String(required=True)
class Meta:
additional_field = fields.Integer()
| 24.066667 | 55 | 0.753463 | 141 | 0.390582 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.213296 |
2746d32e08d6ab65d2525605b064c6bb89db2db5 | 1,280 | py | Python | tests/bigwig/test_create_bigwigs.py | BioinformaticsMaterials/epic | d601f2384784ea38f5de8c25156be4c8d9ef2fff | [
"MIT"
]
| null | null | null | tests/bigwig/test_create_bigwigs.py | BioinformaticsMaterials/epic | d601f2384784ea38f5de8c25156be4c8d9ef2fff | [
"MIT"
]
| null | null | null | tests/bigwig/test_create_bigwigs.py | BioinformaticsMaterials/epic | d601f2384784ea38f5de8c25156be4c8d9ef2fff | [
"MIT"
]
| null | null | null | import pytest
import pandas as pd
import numpy as np
from os import stat
from io import StringIO
from epic.bigwig.create_bigwigs import _create_bigwig
from epic.config.genomes import create_genome_size_dict
@pytest.fixture
def input_data():
contents = u"""Chromosome Bin End examples/test.bed
chr1 887600 887799 0
chr1 994600 994799 0
chr1 1041000 1041199 0
chr1 1325200 1325399 1
chr1 1541600 1541799 1
chr1 1599000 1599199 1
chr1 1770200 1770399 0
chr1 1820200 1820399 1
chr1 1995000 1995199 0
chr1 2063800 2063999 0
chr1 2129400 2129599 0
chr1 2239000 2239199 0
chr1 2318800 2318999 0
chr1 2448200 2448399 1
chr1 3006000 3006199 0
chr1 3046000 3046199 1
chr1 3089200 3089399 0
chr1 3093800 3093999 0
chr1 3096400 3096599 0"""
return pd.read_table(StringIO(contents), sep="\s+", index_col=[0, 1, 2])
@pytest.fixture
def output_bigwig(tmpdir):
p = tmpdir.mkdir("sub").join("outfile.bw")
return str(p)
@pytest.mark.unit
def test_create_bigwigs(input_data, output_bigwig, args_200_fast):
print(input_data)
print(output_bigwig)
d = create_genome_size_dict(args_200_fast.genome)
print(d)
_create_bigwig(input_data, output_bigwig, d)
filesize = stat(output_bigwig).st_size
print(filesize, "filesize")
assert filesize > 0
| 21.333333 | 76 | 0.767969 | 0 | 0 | 0 | 0 | 1,062 | 0.829688 | 0 | 0 | 508 | 0.396875 |
27484ca2bc8228c9f67c0d19e4ae6ba4c2b57e35 | 555 | py | Python | tests/solvers/__init__.py | neuyhwu/MIPLearn | c6b31a827d6c6e682d45171f21478162c0bc46d6 | [
"BSD-3-Clause"
]
| 64 | 2020-02-27T01:24:40.000Z | 2022-03-31T12:38:56.000Z | tests/solvers/__init__.py | neuyhwu/MIPLearn | c6b31a827d6c6e682d45171f21478162c0bc46d6 | [
"BSD-3-Clause"
]
| 3 | 2020-04-07T14:43:31.000Z | 2021-01-15T14:02:01.000Z | tests/solvers/__init__.py | zclab/MIPLearn | 9bd64c885aa645d41c30fa0ec4e0eedfaf703dac | [
"BSD-3-Clause"
]
| 13 | 2020-03-30T16:41:38.000Z | 2022-02-17T15:38:01.000Z | # MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from io import StringIO
from miplearn.solvers import _RedirectOutput
def test_redirect_output() -> None:
import sys
original_stdout = sys.stdout
io = StringIO()
with _RedirectOutput([io]):
print("Hello world")
assert sys.stdout == original_stdout
assert io.getvalue() == "Hello world\n"
| 30.833333 | 82 | 0.731532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 257 | 0.463063 |
27486b3592aa44c1f3081be500edb4d9d40e6414 | 1,717 | py | Python | QuantumBlack Machine Learning Software Engineer 2019/Correlation.py | sivolko/codeforce | 4b00c4c012780036e56d2f0e79adb2f5db7559df | [
"MIT"
]
| 3 | 2021-04-21T07:11:33.000Z | 2022-01-09T00:05:55.000Z | InterviewChallenges/QuantumBlack Machine Learning Software Engineer 2019/Correlation.py | sweetpand/Algorithms | 2e4dcf2d42de25531fae5b4ec0d96ce100043117 | [
"MIT"
]
| null | null | null | InterviewChallenges/QuantumBlack Machine Learning Software Engineer 2019/Correlation.py | sweetpand/Algorithms | 2e4dcf2d42de25531fae5b4ec0d96ce100043117 | [
"MIT"
]
| null | null | null | import math
def Correlation(scores):
physics = []
maths = []
chemistry = []
for each_scores in scores:
values = each_scores.split("\t")
maths.append(int(values[0]))
physics.append(int(values[1]))
chemistry.append(int(values[2]))
length = len(physics)
value1 = calculate_correlation(maths, physics, length)
value2 = calculate_correlation(physics, chemistry, length)
value3 = calculate_correlation(chemistry, maths, length)
# print(value1)
# print(value2)
# print(value3)
return [str(value1), str(value2), str(value3)]
# return '{}\{}{}'.format(value1, value2, value3)
def calculate_correlation(list1, list2, length):
# print("into calculate_correlation", list2, list1, length)
multiply_list = [each[0] * each[1] for each in zip(list1, list2)]
num_termA = sum(multiply_list) * length
num_termB = sum(list1) * sum(list2)
numerator = num_termA - num_termB
# print("tA: {}, tB: {}, n: {}".format(num_termA, num_termB, numerator))
denom_calculator = lambda lis, l: math.sqrt((sum(list([pow(each, 2) for each in lis])) * l) - pow(sum(lis), 2))
denominator = denom_calculator(list1, length) * denom_calculator(list2, length)
value = round(numerator / denominator, 2)
# print("deno: {}, value: {}".format(denominator, value))
return value
class_scores = ['73\t72\t76', '48\t67\t76', '95\t92\t95', '95\t95\t96', '33\t59\t79', '47\t58\t74', '98\t95\t97',
'91\t94\t97', '95\t84\t90', '93\t83\t90', '70\t70\t78', '85\t79\t91', '33\t67\t76', '47\t73\t90',
'95\t87\t95', '84\t86\t95', '43\t63\t75', '95\t92\t100', '54\t80\t87', '72\t76\t90']
res = Correlation(class_scores)
print(res)
| 33.019231 | 115 | 0.638905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.306931 |
27487403a3d7b9e65ea004d6b9bb66d14163ac93 | 2,617 | py | Python | utest/test_iinekoko_db.py | MizunagiKB/IIneKoKo | 495ae6dc2887bb8b41331fab1f21812368400cf2 | [
"MIT"
]
| null | null | null | utest/test_iinekoko_db.py | MizunagiKB/IIneKoKo | 495ae6dc2887bb8b41331fab1f21812368400cf2 | [
"MIT"
]
| null | null | null | utest/test_iinekoko_db.py | MizunagiKB/IIneKoKo | 495ae6dc2887bb8b41331fab1f21812368400cf2 | [
"MIT"
]
| null | null | null | import sys
import unittest
import configparser
sys.path.append("./svc")
class CIIneKoKo_DB(unittest.TestCase):
def setUp(self):
import iinekoko_db
self.o_conf = configparser.ConfigParser()
self.o_conf.read("./svc/config.ini")
self.o_conn = iinekoko_db.CDatabase(self.o_conf)
def test_database(self):
pass
"""
def test_image_encdec(self):
import iinekoko_db
with open("utest/image/test.jpg", "rb") as f:
raw_data = f.read()
enc_data1 = iinekoko_db.dict_image_b64enc("image/jpeg", raw_data)
mime, dec_data = iinekoko_db.dict_image_b64dec(enc_data1)
iinekoko_db.dict_image_b64enc(mime, dec_data)
self.assertTrue(True)
def test_doc_session(self):
o_doc1 = self.o_conn.new_session("1", "username_1")
self.assertEqual(o_doc1.tw_id, "1")
self.assertEqual(o_doc1.tw_username, "username_1")
o_doc2 = self.o_conn.get_session(o_doc1.document_id)
self.assertEqual(o_doc1.document_id, o_doc2.document_id)
self.o_conn.del_session(o_doc1.document_id)
o_doc = self.o_conn.get_session(o_doc1.document_id)
self.assertEqual(o_doc, None)
o_doc = self.o_conn.get_session("X")
self.assertEqual(o_doc, None)
self.assertFalse(self.o_conn.del_session("X"))
def test_doc_image_ref(self):
import iinekoko_db
with open("utest/image/test.jpg", "rb") as f:
raw_data = f.read()
enc_data = iinekoko_db.dict_image_b64enc("image/jpeg", raw_data)
TW_ID = "1"
TW_USERNAME = "username_1"
o_doc = self.o_conn.new_image_ref(enc_data, TW_ID, TW_USERNAME, [])
self.assertEqual(o_doc.tw_id, TW_ID)
o_doc1 = self.o_conn.get_image_ref(o_doc.get_document_id())
self.assertEqual(o_doc.get_document_id(), o_doc1.get_document_id())
self.o_conn.del_image_ref(o_doc.get_document_id())
o_doc = self.o_conn.get_image_ref(o_doc.get_document_id())
self.assertEqual(o_doc, None)
def test_doc_image_mrk(self):
import iinekoko_db
ID_IMAGE_REF = "1"
TW_ID = "1"
TW_USERNAME = "username_1"
o_doc = self.o_conn.append_image_mrk(ID_IMAGE_REF, TW_ID, TW_USERNAME,
[])
self.o_conn.remove_image_mrk(o_doc.get_document_id())
def test_doc_image_ref_list(self):
import iinekoko_db
TW_ID = "431236837"
self.o_conn.get_image_ref_list(TW_ID)
"""
# [EOF]
| 29.077778 | 79 | 0.632404 | 2,531 | 0.967138 | 0 | 0 | 0 | 0 | 0 | 0 | 2,276 | 0.869698 |
2748a53e6b912bda05b5145d677d95d85ebd3529 | 987 | py | Python | Champion.py | KhangOP/PaladinsAssistantBot | 9b705dc688610ba52909f0b0e152d8684006c6a6 | [
"MIT"
]
| null | null | null | Champion.py | KhangOP/PaladinsAssistantBot | 9b705dc688610ba52909f0b0e152d8684006c6a6 | [
"MIT"
]
| null | null | null | Champion.py | KhangOP/PaladinsAssistantBot | 9b705dc688610ba52909f0b0e152d8684006c6a6 | [
"MIT"
]
| null | null | null | # Class to store champion names in one location
class Champion:
"""Champions in Paladins"""
DAMAGES = ["Cassie", "Kinessa", "Drogoz", "Bomb King", "Viktor", "Sha Lin", "Tyra", "Willo", "Lian", "Strix",
"Vivian", "Dredge", "Imani", "Tiberius"]
FLANKS = ["Skye", "Buck", "Evie", "Androxus", "Maeve", "Lex", "Zhin", "Talus", "Moji", "Koga"]
TANKS = ["Barik", "Fernando", "Ruckus", "Makoa", "Torvald", "Inara", "Ash", "Terminus", "Khan", "Atlas", "Raum"]
SUPPORTS = ["Grohk", "Grover", "Ying", "Mal Damba", "Seris", "Jenos", "Furia", "Pip"]
# Returns a number for indexing in a list
def get_champ_class(self, champ_name: str):
champ_name = champ_name.title()
if champ_name in self.DAMAGES:
return 0
elif champ_name in self.FLANKS:
return 1
elif champ_name in self.TANKS:
return 2
elif champ_name in self.SUPPORTS:
return 3
else:
return -1
| 42.913043 | 116 | 0.566363 | 938 | 0.950355 | 0 | 0 | 0 | 0 | 0 | 0 | 432 | 0.43769 |
27491dfc485679f55f2ea50c1629c408b70c365b | 2,292 | py | Python | src/gpuz/data_helper.py | scalasm/gpuz-log-analysis | c9a6dd46bf8a12120c9d284411c5b1562b97fdfd | [
"Apache-2.0"
]
| null | null | null | src/gpuz/data_helper.py | scalasm/gpuz-log-analysis | c9a6dd46bf8a12120c9d284411c5b1562b97fdfd | [
"Apache-2.0"
]
| null | null | null | src/gpuz/data_helper.py | scalasm/gpuz-log-analysis | c9a6dd46bf8a12120c9d284411c5b1562b97fdfd | [
"Apache-2.0"
]
| null | null | null | import logging
from typing import Optional, List, Any
import gpuz.utility as utility
from pathlib import Path
import os
import pandas as pd
from pandas.core.frame import DataFrame
import numpy as np
import matplotlib as mp
logger: logging.Logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class DataHelper:
def __init__(self, root_dir: str) -> None:
self.root_dir = root_dir
logger.info( f"DataHelper working on root dir \"{root_dir}\" with"
f"\n\t - Dataset dir = {self.data_dir}"
f"\n\t - Work dataset dir= {self.work_dir}"
)
if not self.work_dir.exists():
logger.debug( f"Work dir {self.work_dir} does not exist: creating it ..." )
self.work_dir.mkdir()
@property
def data_dir(self) -> Path:
return Path(self.root_dir) / "datasets"
@property
def work_dir(self) -> Path:
return Path(self.root_dir) / "datasets_work"
def get_dataset_path(self, dataset_name: str) -> str:
dataset_path = self.data_dir / dataset_name
return str(dataset_path)
def get_work_dataset_path(self, dataset_name: str) -> str:
dataset_path = self.work_dir / dataset_name
return str(dataset_path)
def create_clean_csv_dataset(self, dataset_name: str, clean_dataset_name: str) -> None:
original_dataset_path = self.get_dataset_path(dataset_name)
clean_dataset_path = self.get_work_dataset_path(clean_dataset_name)
utility.preprocess_gpuz_log_file(original_dataset_path, clean_dataset_path)
def load_gpuz_dataset(self, dataset_name: str) -> DataFrame:
dataset_path = self.get_work_dataset_path(dataset_name)
df: DataFrame = pd.read_csv( dataset_path )
# Force the right column data types
for column in df.columns:
print( column )
if str(column) == "date":
df[column] = pd.to_datetime( df[column], errors="coerce" )
else:
df[column] = pd.to_numeric( df[column], errors="coerce" )
return df
if __name__ == "__main__":
root_dir = os.getcwd()
data_helper = DataHelper( root_dir )
data_helper.create_clean_csv_dataset( "gpuz_sensor_log.txt", "clean_gpuz_sensor_log.csv" )
| 32.742857 | 94 | 0.665794 | 1,783 | 0.777923 | 0 | 0 | 184 | 0.080279 | 0 | 0 | 338 | 0.147469 |
274a678ce7ef66ccf7cfb21453ee41a8617d1632 | 4,173 | py | Python | m5-101/content/solutions/web-crawler/section1&2&3.py | PaulCCCCCCH/m5-101 | 81201b00cd81c1747ea0cd5f042a09eda02d6d1c | [
"MIT"
]
| 4 | 2021-03-25T13:15:38.000Z | 2021-11-10T12:29:19.000Z | m5-101/content/solutions/web-crawler/section1&2&3.py | PaulCCCCCCH/m5-101 | 81201b00cd81c1747ea0cd5f042a09eda02d6d1c | [
"MIT"
]
| null | null | null | m5-101/content/solutions/web-crawler/section1&2&3.py | PaulCCCCCCH/m5-101 | 81201b00cd81c1747ea0cd5f042a09eda02d6d1c | [
"MIT"
]
| 4 | 2021-03-25T13:18:10.000Z | 2021-04-08T13:44:48.000Z | from posix import listdir
import requests
from bs4 import BeautifulSoup as bs
import math
import sys, getopt
import re
import os
def re_cleaner(target: str, rep: str) -> str:
return re.sub("[^0-9a-zA-Z]+", rep, target)
# For Oxford ==============================================================================
# base_url = "https://www.ox.ac.uk/"
# base_dir = "pages/oxford"
# if not os.path.exists(base_dir):
# os.makedirs(base_dir)
# # Get the root page and extract target urls
# url_pages = requests.get('https://www.ox.ac.uk/admissions/graduate/courses/courses-a-z-listing')
# root_soup = bs(url_pages.text, 'html.parser')
# # print(root_soup.prettify())
# # find by class attr
# course_divs = root_soup.find_all(attrs={"class": "course-title"})
# for div in course_divs:
# # 从div中取出a然后解析url
# # 用re直接find_all 符合 ** graduate/courses/ ** 的url更好解释
# link, degree = div.children
# degree = degree.strip()
# if re.search("D", degree) is None and re.match("PG", degree) is None:
# r = requests.get(base_url + link.get('href'))
# course_name = link.text
# with open(os.path.join(base_dir, re_cleaner(course_name+' '+degree, '-')+'.html'), mode='wb') as f:
# f.write(r.content)
#UIUC ==============================================================================
# base_url = "http://catalog.illinois.edu/"
# base_dir = "pages/uiuc"
# if not os.path.exists(base_dir):
# os.makedirs(base_dir)
# # Get the root page and extract target urls
# url_pages = requests.get('http://catalog.illinois.edu/graduate/')
# root_soup = bs(url_pages.text, 'html.parser')
# # print(root_soup.prettify())
# course_heads = root_soup.find_all("h4")
# for h in course_heads:
# # 从head中取出a然后解析url, 若有margin left, 则不考虑
# if 'style' not in h.attrs:
# # 最多分成两端,此处degree会有冗余, 但生成文件时正确的degree会在最后一个破折号处,优雅
# major, degree = h.text.split(',' ,1)
# degree = degree.strip()
# if re.search("D", degree) is None and re.match("PG", degree) is None:
# r = requests.get(base_url + h.a['href'])
# with open(os.path.join(base_dir, re_cleaner(major + ' ' + degree, ' ')+'.html'), mode='wb') as f:
# f.write(r.content)
# IC ==============================================================================
#
# base_url = "https://www.imperial.ac.uk/"
# base_dir = "pages/ic"
# if not os.path.exists(base_dir):
# os.makedirs(base_dir)
# # Get the root page and extract target urls
# url_pages = requests.get('https://www.imperial.ac.uk/study/pg/courses/')
# root_soup = bs(url_pages.text, 'html.parser')
# # find by class attr
# course_lis = root_soup.find_all(attrs={"class": "course"})
# for li in course_lis:
# degree = li.a.contents[5].contents[1].strip()
# if re.match("D", degree) is None and re.match("PG", degree) is None:
# url = base_url + li.a['href']
# major = li.a['title']
# r = requests.get(url)
# with open(os.path.join(base_dir, re_cleaner(major + ' ' + degree, '-')+'.html'), mode='wb') as f:
# f.write(r.content)
# Make Index ==============================================================================
import json
import pickle
def clean_html(soup: bs):
ss = soup.find_all('script')
for s in ss:
s.decompose()
return re_cleaner(soup.get_text(), ' ')
data = {}
pages_path = os.path.join(os.getcwd(), 'pages')
idx = 1
for school in os.listdir(pages_path):
school_path = os.path.join(pages_path, school)
for filename in os.listdir(school_path):
filepath = os.path.join(school_path, filename)
program, degree_html = filename.rsplit('-', 1)
degree,_ = degree_html.split('.', 1)
print(filename)
with open(filepath) as f:
soup = bs(f, 'html.parser')
desc = clean_html(soup)
jsobj = json.dumps({"document_id": idx, "school_name": school, "program_name": program, "degree": degree, "file_path": filepath, "program_desc": desc})
data[idx] = jsobj
idx += 1
pkfile = 'programs.pkl'
with open(pkfile, 'wb') as f:
pickle.dump(data, f)
| 32.601563 | 163 | 0.578481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,176 | 0.737404 |
274f3dedfd8af9c5162a6375c14921ce4ca86095 | 6,319 | py | Python | sandbox/andrew/run_trpo_strike.py | leopauly/Observation-Learning-Simulations | 462c04a87c45aae51537b8ea5b44646afa31d3a5 | [
"MIT"
]
| 49 | 2017-12-11T11:00:02.000Z | 2022-03-30T05:19:31.000Z | sandbox/andrew/run_trpo_strike.py | leopauly/Observation-Learning-Simulations | 462c04a87c45aae51537b8ea5b44646afa31d3a5 | [
"MIT"
]
| 2 | 2018-01-01T17:39:56.000Z | 2019-07-24T04:49:08.000Z | sandbox/andrew/run_trpo_strike.py | leopauly/Observation-Learning-Simulations | 462c04a87c45aae51537b8ea5b44646afa31d3a5 | [
"MIT"
]
| 12 | 2017-12-13T11:52:17.000Z | 2020-12-03T00:53:29.000Z | import os
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
# from rllab.envs.mujoco.gather.swimmer_gather_env import SwimmerGatherEnv
os.environ["THEANO_FLAGS"] = "device=cpu"
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.envs.normalized_env import NormalizedEnv
from rllab.algos.trpo import TRPO
from rllab.misc.instrument import stub, run_experiment_lite
import itertools
from rllab import config
from sandbox.bradly.third_person.launchers.cyberpunk_aws import CyberpunkAWS
from sandbox.bradly.third_person.launchers.cyberpunk_aws_gail import CyberpunkAWSGAIL
stub(globals())
from distutils.dir_util import copy_tree
import numpy as np
import os, shutil
srcmodeldirs = ['../models/strikeinc/']
modeldir = 'model/'
if os.path.exists(modeldir):
shutil.rmtree(modeldir)
for srcdir in srcmodeldirs:
copy_tree(srcdir, modeldir)
# config.AWS_IMAGE_ID = "ami-7d23496b"#"ami-1263eb04"
# config.AWS_INSTANCE_TYPE = "g2.8xlarge"
# config.AWS_SPOT_PRICE = "2.6001"
# subnet = 'us-east-1d'
config.AWS_IMAGE_ID = "ami-20c1e740"
config.AWS_INSTANCE_TYPE = "g2.2xlarge"
config.AWS_SPOT_PRICE = "0.903"
subnet = 'us-west-1c'
# config.AWS_IMAGE_ID = "ami-ecdd408c"
# config.AWS_INSTANCE_TYPE = "g2.8xlarge"
# config.AWS_SPOT_PRICE = "2.601"
# subnet = 'us-west-2b'
# config.AWS_IMAGE_ID = "ami-b8f069d8"
# config.AWS_INSTANCE_TYPE = "g2.2xlarge"
# config.AWS_SPOT_PRICE = "0.601"
# subnet = 'us-west-2b'
config.AWS_NETWORK_INTERFACES = [
dict(
SubnetId=config.ALL_SUBNET_INFO[subnet]["SubnetID"],
Groups=[config.ALL_SUBNET_INFO[subnet]["Groups"]],
DeviceIndex=0,
AssociatePublicIpAddress=True,
)
]
def rand_strike():
vp = np.random.uniform(low=0, high=360, size=10).tolist()
angle = [45]#np.random.uniform(low=0, high=90, size=10).tolist()
ball = np.array([0.5, -0.175])
while True:
goal = np.concatenate([
np.random.uniform(low=0.15, high=0.7, size=1),
np.random.uniform(low=0.1, high=1.0, size=1)])
if np.linalg.norm(ball - goal) > 0.17:
break
return dict(vp=vp, goal=goal.tolist(), angle=angle,
imsize=(64, 64), name="strike", nvp=1,
modelname='model/model_90000_1408.57_1291.54_110.72',
modeldata='model/vdata_train.npy')
strike_params = {
"env" : "Striker-v0",
"rand" : rand_strike,
}
oracle_mode = dict(mode='oracle', mode2='oracle')
# inception_mode = dict(mode='inception', imsize=(299, 299))
oursinception_mode = dict(mode='oursinception', mode2='oursinception', scale=0.1, imsize=(299, 299),
modelname='model/model_70000_225002.77_128751.15_96043.16_0')
ours_mode = dict(mode='ours', mode2='ours', scale=0.1)
ours_recon = dict(mode='ours', mode2='oursrecon', scale=1.0, ablation_type='recon')
tpil_mode = dict(mode='tpil', mode2='tpil', imsize=(48, 48))
gail_mode = dict(mode='tpil', mode2='gail')
ours_nofeat = dict(mode='ours', mode2='ours_nofeat', scale=1.0, ablation_type='nofeat')
ours_noimage = dict(mode='ours', mode2='ours_noimage', scale=1.0, ablation_type='noimage')
seeds = [123]
sanity = 'changing'
for params in [strike_params]:
for nvar in range(5):
randparams = params['rand']()
for modeparams in [oursinception_mode]:
for scale in [0.0, 0.1, 100.0]:#[1.0, 10.0, 100.0, 0.1]:
copyparams = randparams.copy()
copyparams.update(modeparams)
copyparams['scale'] = scale
mdp = normalize(GymEnv(params['env'], **copyparams))
if copyparams['mode'] == 'tpil':
if sanity == 'change1':
copyparams = params['rand']()
copyparams.update(modeparams)
mdp2 = normalize(GymEnv(params['env'], **copyparams))
elif sanity == 'same':
mdp2 = mdp
elif sanity == 'changing':
mdp2 = normalize(GymEnv(params['env'], mode='tpil'))
if 'imsize' in copyparams:
imsize = copyparams['imsize']
for seed in seeds:
if copyparams['mode'] == 'tpil':
del copyparams['imsize']
awsalgo = CyberpunkAWS
if modeparams == gail_mode:
awsalgo = CyberpunkAWSGAIL
algo = awsalgo(
expert_env=mdp2,#normalize(GymEnv(params['env'], mode='tpil')),
novice_env=mdp,
horizon=50,
itrs=200,
trajs=250,
imsize=imsize,
expert_pkl='expert_striker.pkl',
sanity=sanity,
**copyparams,
)
else:
policy = GaussianMLPPolicy(
env_spec=mdp.spec,
hidden_sizes=(32, 32),
init_std=1.0
)
baseline = LinearFeatureBaseline(
mdp.spec,
)
batch_size = 50*250
algo = TRPO(
env=mdp,
policy=policy,
baseline=baseline,
batch_size=batch_size,
whole_paths=True,
max_path_length=50,
n_itr=200,
step_size=0.01,
subsample_factor=1.0,
**copyparams
)
run_experiment_lite(
algo.train(),
exp_prefix="r-strike-ours-inception-7c-quad2",
n_parallel=4,
# dry=True,
snapshot_mode="all",
seed=seed,
mode="ec2_mujoco",
# terminate_machine=False
)
| 37.613095 | 100 | 0.547238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,336 | 0.211426 |
274fe43fa546672495660673160348de8f0a6c2e | 17,257 | py | Python | bpmfwfft/fft_sampling.py | jimtufts/bpmfwfft | 091d2269b122f00b9dd8a01e34303e3e946f8ea0 | [
"MIT"
]
| null | null | null | bpmfwfft/fft_sampling.py | jimtufts/bpmfwfft | 091d2269b122f00b9dd8a01e34303e3e946f8ea0 | [
"MIT"
]
| null | null | null | bpmfwfft/fft_sampling.py | jimtufts/bpmfwfft | 091d2269b122f00b9dd8a01e34303e3e946f8ea0 | [
"MIT"
]
| null | null | null | """
This is to generate interaction energies and corresponding translational vectors,
given a fixed receptor and an ensemble of ligand coordinates (including rotations and/or configurations)
"""
from __future__ import print_function
import numpy as np
import netCDF4
try:
from bpmfwfft.grids import RecGrid
from bpmfwfft.grids import LigGrid
except:
from grids import RecGrid
from grids import LigGrid
KB = 0.001987204134799235
class Sampling(object):
def __init__(self, rec_prmtop, lj_sigma_scal_fact, rec_inpcrd,
bsite_file, grid_nc_file,
lig_prmtop, lig_inpcrd,
lig_coord_ensemble,
energy_sample_size_per_ligand,
output_nc,
temperature=300.):
"""
:param rec_prmtop: str, name of receptor prmtop file
:param lj_sigma_scal_fact: float, used to check consitency when loading receptor and ligand grids
:param rec_inpcrd: str, name of receptor inpcrd file
:param bsite_file: None or str, name of file defining the box, the same as
from AlGDock pipeline. "measured_binding_site.py"
:param grid_nc_file: str, name of receptor precomputed grid netCDF file
:param lig_prmtop: str, name of ligand prmtop file
:param lig_inpcrd: str, name of ligand inpcrd file
:param lig_coord_ensemble: list of 2d array, each array is an ligand coordinate
:param energy_sample_size_per_ligand: int, number of energies and translational vectors to store for each ligand crd
:param output_nc: str, name of nc file
:param temperature: float
"""
self._energy_sample_size_per_ligand = energy_sample_size_per_ligand
self._beta = 1./ temperature / KB
rec_grid = self._create_rec_grid(rec_prmtop, lj_sigma_scal_fact, rec_inpcrd,
bsite_file, grid_nc_file)
self._rec_crd = rec_grid.get_crd()
self._lig_grid = self._create_lig_grid(lig_prmtop, lj_sigma_scal_fact, lig_inpcrd, rec_grid)
self._lig_coord_ensemble = self._load_ligand_coor_ensemble(lig_coord_ensemble)
self._nc_handle = self._initialize_nc(output_nc)
def _create_rec_grid(self, rec_prmtop, lj_sigma_scal_fact, rec_inpcrd, bsite_file, grid_nc_file):
rec_grid = RecGrid(rec_prmtop, lj_sigma_scal_fact, rec_inpcrd, bsite_file,
grid_nc_file, new_calculation=False)
return rec_grid
def _create_lig_grid(self, lig_prmtop, lj_sigma_scal_fact, lig_inpcrd, rec_grid):
lig_grid = LigGrid(lig_prmtop, lj_sigma_scal_fact, lig_inpcrd, rec_grid)
return lig_grid
def _load_ligand_coor_ensemble(self, lig_coord_ensemble):
assert len(lig_coord_ensemble.shape) == 3, "lig_coord_ensemble must be 3-D array."
ensemble = lig_coord_ensemble
natoms = self._lig_grid.get_natoms()
for i in range(len(ensemble)):
if (ensemble[i].shape[0] != natoms) or (ensemble[i].shape[1] != 3):
raise RuntimeError("Ligand crd %d does not have correct shape"%i)
return ensemble
def _initialize_nc(self, output_nc):
nc_handle = netCDF4.Dataset(output_nc, mode="w", format="NETCDF4")
nc_handle.createDimension("three", 3)
rec_natoms = self._rec_crd.shape[0]
nc_handle.createDimension("rec_natoms", rec_natoms)
lig_natoms = self._lig_grid.get_natoms()
nc_handle.createDimension("lig_natoms", lig_natoms)
nc_handle.createDimension("lig_sample_size", self._lig_coord_ensemble.shape[0])
nc_handle.createDimension("energy_sample_size_per_ligand", self._energy_sample_size_per_ligand)
nc_handle.createVariable("rec_positions", "f8", ("rec_natoms", "three"))
nc_handle.variables["rec_positions"][:,:] = self._rec_crd
nc_handle.createVariable("lig_positions", "f8", ("lig_sample_size", "lig_natoms", "three"))
nc_handle.createVariable("lig_com", "f8", ("lig_sample_size", "three"))
nc_handle.createVariable("volume", "f8", ("lig_sample_size"))
nc_handle.createVariable("nr_grid_points", "i8", ("lig_sample_size"))
nc_handle.createVariable("exponential_sums", "f8", ("lig_sample_size"))
nc_handle.createVariable("log_of_divisors", "f8", ("lig_sample_size"))
nc_handle.createVariable("mean_energy", "f8", ("lig_sample_size"))
nc_handle.createVariable("min_energy", "f8", ("lig_sample_size"))
nc_handle.createVariable("energy_std", "f8", ("lig_sample_size"))
nc_handle.createVariable("resampled_energies", "f8", ("lig_sample_size", "energy_sample_size_per_ligand"))
nc_handle.createVariable("resampled_trans_vectors", "i8", ("lig_sample_size", "energy_sample_size_per_ligand", "three"))
nc_handle = self._write_grid_info(nc_handle)
return nc_handle
def _write_grid_info(self, nc_handle):
"""
write grid info, "x", "y", "z" ...
"""
data = self._lig_grid.get_grids()
grid_func_names = self._lig_grid.get_grid_func_names()
keys = [key for key in data.keys() if key not in grid_func_names]
for key in keys:
for dim in data[key].shape:
dim_name = "%d"%dim
if dim_name not in nc_handle.dimensions.keys():
nc_handle.createDimension(dim_name, dim)
for key in keys:
if data[key].dtype == int:
store_format = "i8"
elif data[key].dtype == float:
store_format = "f8"
else:
raise RuntimeError( "Unsupported dtype %s"%data[key].dtype )
dimensions = tuple([ "%d"%dim for dim in data[key].shape ])
nc_handle.createVariable(key, store_format, dimensions)
for key in keys:
nc_handle.variables[key][:] = data[key]
return nc_handle
def _save_data_to_nc(self, step):
self._nc_handle.variables["lig_positions"][step, :, :] = self._lig_grid.get_crd()
self._nc_handle.variables["lig_com"][step, :] = self._lig_grid.get_initial_com()
self._nc_handle.variables["volume"][step] = self._lig_grid.get_box_volume()
self._nc_handle.variables["nr_grid_points"][step] = self._lig_grid.get_number_translations()
self._nc_handle.variables["exponential_sums"][step] = self._exponential_sum
self._nc_handle.variables["log_of_divisors"][step] = self._log_of_divisor
self._nc_handle.variables["mean_energy"][step] = self._mean_energy
self._nc_handle.variables["min_energy"][step] = self._min_energy
self._nc_handle.variables["energy_std"][step] = self._energy_std
self._nc_handle.variables["resampled_energies"][step,:] = self._resampled_energies
self._nc_handle.variables["resampled_trans_vectors"][step,:,:] = self._resampled_trans_vectors
return None
def _do_fft(self, step):
print("Doing FFT for step %d"%step, "test")
lig_conf = self._lig_coord_ensemble[step]
self._lig_grid.cal_grids(molecular_coord = lig_conf)
energies = self._lig_grid.get_meaningful_energies()
print("Energies shape:", energies.shape)
self._mean_energy = energies.mean()
self._min_energy = energies.min()
self._energy_std = energies.std()
print("Number of finite energy samples", energies.shape[0])
exp_energies = -self._beta * energies
print(f"Max exp energy {exp_energies.max()}, Min exp energy {exp_energies.min()}")
self._log_of_divisor = exp_energies.max()
exp_energies[exp_energies < 0] = 0
exp_energies = np.exp(exp_energies - self._log_of_divisor)
self._exponential_sum = exp_energies.sum()
exp_energies /= self._exponential_sum
print("Number of exponential energy samples", exp_energies.sum())
# sel_ind = np.random.choice(exp_energies.shape[0], size=self._energy_sample_size_per_ligand, p=exp_energies, replace=True)
try:
sel_ind = np.random.choice(exp_energies.shape[0], size=self._energy_sample_size_per_ligand, p=exp_energies, replace=False)
except:
print(f"Only {np.count_nonzero(exp_energies)} non-zero entries in p, falling back to replacement")
sel_ind = np.random.choice(exp_energies.shape[0], size=self._energy_sample_size_per_ligand, p=exp_energies, replace=True)
del exp_energies
self._resampled_energies = [energies[ind] for ind in sel_ind]
del energies
self._lig_grid.set_meaningful_energies_to_none()
trans_vectors = self._lig_grid.get_meaningful_corners()
self._resampled_trans_vectors = [trans_vectors[ind] for ind in sel_ind]
del trans_vectors
self._resampled_energies = np.array(self._resampled_energies, dtype=float)
self._resampled_trans_vectors = np.array(self._resampled_trans_vectors, dtype=int)
self._save_data_to_nc(step)
return None
def run_sampling(self):
"""
"""
for step in range(self._lig_coord_ensemble.shape[0]):
self._do_fft(step)
print("Min energy", self._min_energy)
print("Mean energy", self._mean_energy)
print("STD energy", self._energy_std)
print("Initial center of mass", self._lig_grid.get_initial_com())
print("Grid volume", self._lig_grid.get_box_volume())
print("Number of translations", self._lig_grid.get_number_translations())
print("-------------------------------\n\n")
self._nc_handle.close()
return None
#
#TODO the class above assumes that the resample size is smaller than number of meaningful energies
# in general, the number of meaningful energies can be very smaller or even zero (no energy)
# when the number of meaningful energies is zero, that stratum contributes n_points zeros to the exponential mean
#
# so when needs to consider separately 3 cases:
# len(meaningful energies) == 0
# 0< len(meaningful energies) <= resample size
# len(meaningful energies) > resample size
#
class Sampling_PL(Sampling):
def _write_data_key_2_nc(self, data, key):
if data.shape[0] == 0:
return None
for dim in data.shape:
dim_name = "%d"%dim
if dim_name not in self._nc_handle.dimensions.keys():
self._nc_handle.createDimension(dim_name, dim)
if data.dtype == int:
store_format = "i8"
elif data.dtype == float:
store_format = "f8"
else:
raise RuntimeError("unsupported dtype %s"%data.dtype)
dimensions = tuple(["%d"%dim for dim in data.shape])
self._nc_handle.createVariable(key, store_format, dimensions)
self._nc_handle.variables[key][:] = data
return None
def _initialize_nc(self, output_nc):
"""
"""
nc_handle = netCDF4.Dataset(output_nc, mode="w", format="NETCDF4")
nc_handle.createDimension("three", 3)
rec_natoms = self._rec_crd.shape[0]
nc_handle.createDimension("rec_natoms", rec_natoms)
lig_natoms = self._lig_grid.get_natoms()
nc_handle.createDimension("lig_natoms", lig_natoms)
nc_handle.createDimension("lig_sample_size", self._lig_coord_ensemble.shape[0])
#nc_handle.createDimension("energy_sample_size_per_ligand", self._energy_sample_size_per_ligand)
nc_handle.createVariable("rec_positions", "f8", ("rec_natoms", "three"))
nc_handle.variables["rec_positions"][:,:] = self._rec_crd
nc_handle.createVariable("lig_positions", "f8", ("lig_sample_size", "lig_natoms", "three"))
nc_handle.createVariable("lig_com", "f8", ("lig_sample_size", "three"))
nc_handle.createVariable("volume", "f8", ("lig_sample_size"))
nc_handle.createVariable("nr_grid_points", "i8", ("lig_sample_size"))
nc_handle.createVariable("nr_finite_energy", "i8", ("lig_sample_size"))
nc_handle.createVariable("exponential_sums", "f8", ("lig_sample_size"))
nc_handle.createVariable("log_of_divisors", "f8", ("lig_sample_size"))
nc_handle.createVariable("mean_energy", "f8", ("lig_sample_size"))
nc_handle.createVariable("min_energy", "f8", ("lig_sample_size"))
nc_handle.createVariable("energy_std", "f8", ("lig_sample_size"))
#nc_handle.createVariable("resampled_energies", "f8", ("lig_sample_size", "energy_sample_size_per_ligand"))
#nc_handle.createVariable("resampled_trans_vectors", "i8", ("lig_sample_size", "energy_sample_size_per_ligand", "three"))
nc_handle = self._write_grid_info(nc_handle)
return nc_handle
def _save_data_to_nc(self, step):
self._nc_handle.variables["lig_positions"][step, :, :] = self._lig_grid.get_crd()
self._nc_handle.variables["lig_com"][step, :] = self._lig_grid.get_initial_com()
self._nc_handle.variables["volume"][step] = self._lig_grid.get_box_volume()
self._nc_handle.variables["nr_grid_points"][step] = self._lig_grid.get_number_translations()
self._nc_handle.variables["nr_finite_energy"][step] = self._nr_finite_energy
self._nc_handle.variables["exponential_sums"][step] = self._exponential_sum
self._nc_handle.variables["log_of_divisors"][step] = self._log_of_divisor
self._nc_handle.variables["mean_energy"][step] = self._mean_energy
self._nc_handle.variables["min_energy"][step] = self._min_energy
self._nc_handle.variables["energy_std"][step] = self._energy_std
self._write_data_key_2_nc(self._resampled_energies, "resampled_energies_%d"%step)
self._write_data_key_2_nc(self._resampled_trans_vectors, "resampled_trans_vectors_%d"%step)
return None
def _do_fft(self, step):
print("Doing FFT for step %d"%step)
lig_conf = self._lig_coord_ensemble[step]
print(self._lig_grid["SASAr"])
self._lig_grid.cal_grids(molecular_coord = lig_conf)
energies = self._lig_grid.get_meaningful_energies()
self._nr_finite_energy = energies.shape[0]
print("Number of finite energy samples", self._nr_finite_energy)
if energies.shape[0] > 0:
self._mean_energy = energies.mean()
self._min_energy = energies.min()
self._energy_std = energies.std()
exp_energies = -self._beta * energies
self._log_of_divisor = exp_energies.max()
exp_energies = np.exp(exp_energies - self._log_of_divisor)
self._exponential_sum = exp_energies.sum()
exp_energies /= self._exponential_sum
sample_size = min(exp_energies.shape[0], self._energy_sample_size_per_ligand)
sel_ind = np.random.choice(exp_energies.shape[0], size=sample_size, p=exp_energies, replace=True)
del exp_energies
self._resampled_energies = [energies[ind] for ind in sel_ind]
del energies
self._lig_grid.set_meaningful_energies_to_none()
trans_vectors = self._lig_grid.get_meaningful_corners()
self._resampled_trans_vectors = [trans_vectors[ind] for ind in sel_ind]
del trans_vectors
self._resampled_energies = np.array(self._resampled_energies, dtype=float)
self._resampled_trans_vectors = np.array(self._resampled_trans_vectors, dtype=int)
else:
self._mean_energy = np.inf
self._min_energy = np.inf
self._energy_std = np.inf
self._log_of_divisor = 1.
self._exponential_sum = 0.
self._resampled_energies = np.array([], dtype=float)
del energies
self._lig_grid.set_meaningful_energies_to_none()
self._resampled_trans_vectors = np.array([], dtype=float)
self._save_data_to_nc(step)
return None
if __name__ == "__main__":
# test
rec_prmtop = "../examples/amber/ubiquitin_ligase/receptor.prmtop"
lj_sigma_scal_fact = 0.8
rec_inpcrd = "../examples/amber/ubiquitin_ligase/receptor.inpcrd"
# bsite_file = "../examples/amber/t4_lysozyme/measured_binding_site.py"
bsite_file = None
grid_nc_file = "../examples/grid/ubiquitin_ligase/grid.nc"
lig_prmtop = "../examples/amber/ubiquitin/ligand.prmtop"
lig_inpcrd = "../examples/amber/ubiquitin/ligand.inpcrd"
energy_sample_size_per_ligand = 200
output_nc = "../examples/fft_sampling/ubql_ubiquitin/fft_sampling.nc"
ligand_md_trj_file = "../examples/ligand_md/ubiquitin/rotation.nc"
lig_coord_ensemble = netCDF4.Dataset(ligand_md_trj_file, "r").variables["positions"][:]
sampler = Sampling(rec_prmtop, lj_sigma_scal_fact, rec_inpcrd,
bsite_file, grid_nc_file,
lig_prmtop, lig_inpcrd,
lig_coord_ensemble,
energy_sample_size_per_ligand,
output_nc,
temperature=300.)
sampler.run_sampling()
| 42.296569 | 134 | 0.665121 | 15,119 | 0.876108 | 0 | 0 | 0 | 0 | 0 | 0 | 4,679 | 0.271136 |
27516da8559c76c6cbc57c679759132dc516e07e | 6,241 | py | Python | kotori/config.py | joshuaavalon/kotori | 3e7e1cf7b2c1834aea9a9404e80a53f8282aba1b | [
"Apache-2.0"
]
| null | null | null | kotori/config.py | joshuaavalon/kotori | 3e7e1cf7b2c1834aea9a9404e80a53f8282aba1b | [
"Apache-2.0"
]
| null | null | null | kotori/config.py | joshuaavalon/kotori | 3e7e1cf7b2c1834aea9a9404e80a53f8282aba1b | [
"Apache-2.0"
]
| null | null | null | import json
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from os.path import splitext
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from PIL import Image
from ruamel.yaml import YAML
from kotori.error import ConfigError
__all__ = [
"ItemKey", "RouteConfig", "TransformConfig", "StorageConfig", "Config",
"ConfigLoader", "DictConfigLoader", "JsonConfigLoader", "YamlConfigLoader"
]
@dataclass
class ItemKey:
path: str
key_path: str = field(init=False)
key: str = field(init=False)
transform: str = field(init=False)
suffix: str = field(init=False)
folder: str = field(init=False)
name: str = field(init=False)
format: str = field(init=False)
def __post_init__(self):
if self.path.endswith("/"):
raise ValueError("path cannot end with /")
path, suffix = splitext(self.path)
parts: List[str] = list(filter(None, path.split("/")))
self.format = Image.registered_extensions().get(suffix)
if self.format is None:
raise ValueError("Unknown format")
if len(parts) < 2:
raise ValueError("Too few arguments")
self.transform = parts[0]
key_parts = parts[1:]
self.key = "/".join(key_parts)
self.key_path = f"/{self.key}"
self.suffix = suffix
self.name = key_parts.pop()
self.folder = f"/{'/'.join(key_parts)}"
@dataclass
class SaveConfig:
format: str
options: Dict[str, Any]
@dataclass
class RouteConfig:
storage: str
transform: Union[bool, List[str], str] = False
expire: Optional[int] = None
save: Dict[str, Dict[str, Any]] = field(default_factory=dict)
@dataclass
class TransformConfig:
type: str
options: List[str] = field(default_factory=list)
@staticmethod
def from_query(query: str) -> "TransformConfig":
parts = [t.strip() for t in query.split("_")]
return TransformConfig(type=parts[0], options=parts[1:])
@staticmethod
def from_queries(queries: str) -> List["TransformConfig"]:
queries = [t.strip() for t in queries.split(",")]
for query in queries:
yield TransformConfig.from_query(query)
@dataclass
class StorageConfig:
type: str
options: Dict[str, Any] = field(default_factory=dict)
@dataclass
class Config:
storage: Dict[str, StorageConfig]
transform: Dict[str, List[TransformConfig]]
route: List[Tuple[str, RouteConfig]]
cache: Dict[str, Any]
def storage_of(self, key: ItemKey) -> StorageConfig:
route = self.route_of(key)
return self.storage[route.storage]
def route_of(self, key: ItemKey) -> RouteConfig:
for route in self.route:
pattern, config = route
if re.search(pattern, key.path) is not None:
return config
raise ConfigError(f"Cannot find config for {key.path}")
def transforms_of(self, key: ItemKey) -> List[TransformConfig]:
if key.transform in self.transform.keys():
return self.transform[key.transform]
return TransformConfig.from_queries(key.transform)
def allow_transform(self, key: ItemKey) -> bool:
route = self.route_of(key)
if not route.transform:
return False
if isinstance(route.transform, bool):
return True
if isinstance(route.transform, str):
transforms = [route.transform]
else:
transforms = route.transform
if key.transform in self.transform.keys():
return key.transform in transforms
configs = TransformConfig.from_queries(key.transform)
for config in configs:
if config.type not in transforms:
return False
return True
class ConfigLoader(ABC):
loaders: Dict[str, Type["ConfigLoader"]] = {}
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
for suffix in cls.support_suffixes():
if suffix not in cls.loaders:
cls.loaders[suffix] = cls
@classmethod
@abstractmethod
def support_suffixes(cls) -> List[str]:
raise NotImplementedError()
@classmethod
def load(cls, path: Union[Path, str]) -> Config:
if isinstance(path, str):
path = Path(path)
suffix = path.suffix
if suffix not in cls.loaders:
raise ConfigError(f"{suffix} is a unknown format")
loader = cls.loaders[suffix]()
config = loader._load(path) # pylint: disable=protected-access
return config
@abstractmethod
def _load(self, path: Path) -> Config:
raise NotImplementedError()
class DictConfigLoader(ConfigLoader):
@classmethod
def support_suffixes(cls) -> List[str]:
return []
def _load(self, path: Path) -> Config:
config = self._load_dict(path)
storage = {}
for name, cfg in config.get("storage", {}).items():
storage[name] = StorageConfig(**cfg)
transform = {}
for name, cfg in config.get("transform", {}).items():
transform[name] = [TransformConfig(**c) for c in cfg]
route = []
for name, cfg in config.get("route", {}).items():
route.append((name, RouteConfig(**cfg)))
return Config(
storage=storage,
transform=transform,
route=route,
cache=config.get("cache", {})
)
@abstractmethod
def _load_dict(self, path: Path) -> Dict[str, Any]:
raise NotImplementedError()
class JsonConfigLoader(DictConfigLoader):
@classmethod
def support_suffixes(cls) -> List[str]:
return [".json"]
def _load_dict(self, path: Path) -> Dict[str, Any]:
with open(path, "r", encoding="utf-8") as file:
return json.load(file)
class YamlConfigLoader(DictConfigLoader):
@classmethod
def support_suffixes(cls) -> List[str]:
return [".yml", ".yaml"]
def _load_dict(self, path: Path) -> Dict[str, Any]:
yaml = YAML(typ="safe")
with open(path, "r", encoding="utf-8") as file:
return yaml.load(file)
| 30.296117 | 78 | 0.621054 | 5,667 | 0.908028 | 198 | 0.031726 | 4,286 | 0.686749 | 0 | 0 | 472 | 0.075629 |
27542459664ee95574391c6b162f6e4cf09c76b9 | 4,587 | py | Python | parser_utils.py | fgypas/panoptes | d85bf83905fd0e546cce11e00d4daf4da2199fbf | [
"MIT"
]
| 1 | 2019-11-03T22:08:19.000Z | 2019-11-03T22:08:19.000Z | parser_utils.py | fgypas/panoptes | d85bf83905fd0e546cce11e00d4daf4da2199fbf | [
"MIT"
]
| 27 | 2019-10-23T19:24:38.000Z | 2022-02-10T19:40:24.000Z | parser_utils.py | fgypas/panoptes | d85bf83905fd0e546cce11e00d4daf4da2199fbf | [
"MIT"
]
| null | null | null | import os
import re
from pprint import pprint
from pandas.io.json import json_normalize
import datetime
import argparse
import sys
DATE = 'date'
LEVEL = 'level'
TYPE = 'type'
CLASS = 'class'
MESSAGE = 'message'
def match_date(line):
match_this = ''
matched = re.match(r'\[\w\w\w\s\w\w\w \d\d \d\d:\d\d:\d\d\s\d\d\d\d\]', line)
if matched:
# matches a date and adds it to match_this
match_this = matched.group()
else:
match_this = 'NONE'
return match_this
def generate_dicts(log_fh):
current_dict = {}
for line in log_fh:
if line.startswith(match_date(line)):
if current_dict:
yield current_dict
current_dict = {DATE: line.split('__')[0][1:25],
# TYPE: temp[0],
# CLASS: temp[1].split(' ')[2],
MESSAGE: ''}
else:
if DATE in current_dict:
current_dict[MESSAGE] += line[:]
else:
pass
yield current_dict
def structure_snakemake_logs(logs):
"""
Takes as input a parced log dictionary.
Returns a structured object for each entry.
Two types of entries exist:
- Submitted rules/jobs
- Finished rules/jobs
Returns list of structured entries
"""
snakemake_log_objects = []
for log in logs:
if 'rule' in log['message']:
print(log["message"])
try:
rule = re.search(r'rule (\w+):', log['message']).group(1)
except:
rule = None
try:
input = re.search(r'input:\s(.*)', log['message']).group(1).split(",")
except Exception as e:
input = None
try:
output = re.search(r'output:\s(.*)', log['message']).group(1).split(",")
except:
output = None
try:
log_c = re.search(r'log:\s(.*)', log['message']).group(1)
except:
log_c = None
try:
wildcards = re.search(r'wildcards:\s(.*)', log['message']).group(1).split(",")
except Exception as e:
wildcards = None
try:
jobid = re.search(r'jobid:\s(\d+)', log['message']).group(1)
except Exception as e:
jobid = None
snakemake_log_objects.append({"job_type": 'submitted',
"job_id": jobid,
"rule": rule,
"input": input,
"output": output,
"log": log_c,
"wildcards": wildcards
})
elif "Finished job" in log['message']:
try:
job_id = re.search(r'Finished job (\d+)\.', log['message']).group(1)
progress = re.search(r'(\d+) of (\d+) steps \((\d+%)\) done', log['message']).group(1,2,3)
current_job = progress[0]
total_jobs = progress[1]
percent = progress[2]
except Exception as e:
current_job = None
total_jobs = None
percent = None
snakemake_log_objects.append({"job_type": 'finished',
"job_id": job_id,
"current_job": current_job,
"total_jobs": total_jobs,
"percent": percent
})
return snakemake_log_objects
def main():
"""
-import_file "example_files/example.log"
-export_csv_file "exported_tabular.csv"
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-import_file', metavar='import_file', type=str,
help='Path to import the simulation json.')
parser.add_argument('-export_csv_file', metavar='export_csv_file', type=str,
help='Path to export the results')
args = parser.parse_args()
import_file = args.import_file
with open(import_file) as f:
parced_logs = list(generate_dicts(f))
pprint(parced_logs)
print(structure_snakemake_logs(parced_logs))
#data = parced_logs.jason_normalize()
#data.to_csv('exported.csv')
if __name__ == '__main__':
main()
| 30.177632 | 106 | 0.480706 | 0 | 0 | 553 | 0.120558 | 0 | 0 | 0 | 0 | 1,155 | 0.251799 |
2755a12d13ff285f1e00c9a95f222cd1d7e6e11b | 71 | py | Python | nimanifold/data/__init__.py | jcreinhold/nimanifold | 21e6546d24304a7ccfdd6a56d7ec47b2527e1c15 | [
"Apache-2.0"
]
| null | null | null | nimanifold/data/__init__.py | jcreinhold/nimanifold | 21e6546d24304a7ccfdd6a56d7ec47b2527e1c15 | [
"Apache-2.0"
]
| null | null | null | nimanifold/data/__init__.py | jcreinhold/nimanifold | 21e6546d24304a7ccfdd6a56d7ec47b2527e1c15 | [
"Apache-2.0"
]
| null | null | null | from nimanifold.data.csv import *
from nimanifold.data.sample import *
| 23.666667 | 36 | 0.802817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2756a87403668d7d8cbcd575aefad24bc180bbaa | 452 | py | Python | Simulations/Test.py | BoettigerLab/polychrom | b9d6b0e0f1a62def13ac28a32232c33628e43dae | [
"MIT"
]
| null | null | null | Simulations/Test.py | BoettigerLab/polychrom | b9d6b0e0f1a62def13ac28a32232c33628e43dae | [
"MIT"
]
| null | null | null | Simulations/Test.py | BoettigerLab/polychrom | b9d6b0e0f1a62def13ac28a32232c33628e43dae | [
"MIT"
]
| null | null | null | import sys
import os
import numpy as np
import ast
import pandas as pd
from LEBondUpdater import bondUpdater
import polychrom
from polychrom.starting_conformations import grow_cubic
from polychrom.hdf5_format import HDF5Reporter, list_URIs, load_URI, load_hdf5_file
from polychrom.simulation import Simulation
from polychrom import polymerutils
from polychrom import forces
from polychrom import forcekits
import time
print('running!')
import h5py
| 21.52381 | 83 | 0.85177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.022124 |
27574dceb612771ac763cff0815e740d67568d21 | 373 | py | Python | deca/gui/deca_interfaces.py | kk49/deca | 8a03ea5d1b7ae0d787638f1797b6e2cb46de4bae | [
"MIT"
]
| 50 | 2019-06-05T04:01:04.000Z | 2022-03-05T14:56:43.000Z | deca/gui/deca_interfaces.py | kk49/deca | 8a03ea5d1b7ae0d787638f1797b6e2cb46de4bae | [
"MIT"
]
| 115 | 2019-03-27T13:34:00.000Z | 2022-03-11T23:43:12.000Z | deca/gui/deca_interfaces.py | kk49/deca | 8a03ea5d1b7ae0d787638f1797b6e2cb46de4bae | [
"MIT"
]
| 13 | 2020-01-25T01:15:49.000Z | 2022-02-08T02:20:05.000Z | from PySide2.QtCore import Signal
from PySide2.QtWidgets import QWidget
from deca.db_view import VfsView
class IVfsViewSrc(QWidget):
signal_visible_changed = Signal(VfsView)
signal_selection_changed = Signal(VfsView)
def vfs_get(self):
return None
def vfs_view_get(self):
return None
def archive_open(self, selection):
pass
| 20.722222 | 46 | 0.723861 | 265 | 0.710456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2758815a9272e4ba22d391476a74de67b2fc6c02 | 7,746 | py | Python | python/beached_probability.py | OceanParcels/BayesianAnalysis_SouthAtlantic | a808896ea9104931a6ad625531231525c6c12826 | [
"MIT"
]
| 1 | 2022-01-12T08:24:14.000Z | 2022-01-12T08:24:14.000Z | python/beached_probability.py | OceanParcels/BayesianAnalysis_SouthAtlantic | a808896ea9104931a6ad625531231525c6c12826 | [
"MIT"
]
| null | null | null | python/beached_probability.py | OceanParcels/BayesianAnalysis_SouthAtlantic | a808896ea9104931a6ad625531231525c6c12826 | [
"MIT"
]
| null | null | null | """
Computes the probability field of beached particles from Ocean Parcels
simulations. Computes the posterior probability in the latitude of the beached
particles.
"""
import numpy as np
import xarray as xr
import pandas as pd
import os
def time_averaging_coast(array, window=30):
"""It averages the counts_america and computes a probability map that adds
up to 100%. It is built for the Beached particles 2D array.
Parameters
----------
array: array
2D array with dimensions (time, space). The time averaging
happens in axis=0 of the array.
window: int, optional
The time window for the averaging. Default value is 30 (days).
normalized: bool, optional
Normalizes the average in space, axis=1&2. Default True.
Returns
-------
averaged: array
time averaged fields dimensions (time//window, space).
time_array:
1D array showing the window jumps. Its useless...
"""
nt, ny = array.shape
new_t_dim = nt//window
averaged = np.zeros((new_t_dim, ny))
time_array = np.arange(window, nt, window)
for t in range(0, new_t_dim):
index_slice = slice((t)*window, (t+1)*window)
mean_aux = np.mean(array[index_slice, :], axis=0)
if mean_aux.sum() == 0:
print(f'-- mean_aux.sum() = {mean_aux.sum()}')
averaged[t] = np.zeros_like(mean_aux)
else:
averaged[t] = mean_aux/mean_aux.sum()
print('-- Normalized?', averaged[t].sum())
return averaged, time_array
# Creating the directory to store the analysis dataset
newpath = r'../analysis/'
if not os.path.exists(newpath):
os.makedirs(newpath)
path2folder = '../PierardBassottoMeirervanSebille_AttributionofPlastic/'
###############################################################################
# Setting the parameters
###############################################################################
compute_mean = True # True if you want to compute the average probability
average_window = 1234 # window size for computing the probability
print(f'Compute mean == {compute_mean}!')
domain_limits = [[-73, 25], [-80, -5]]
number_bins = (98, 75) # defined with respect to domain_limits to be 1x1 cell
half_point = number_bins[0]//2
lat_range = np.linspace(domain_limits[1][0], domain_limits[1][1],
number_bins[1])
# Loading priors. Computed with release_points.py script.
priors = pd.read_csv(path2folder + 'priors_river_inputs.csv',
index_col=0)
sources = list(priors.index)
number_sources = len(sources)
# Empty dictionaries to store computed probabilities.
counts_america = {}
counts_africa = {}
likelihood_america = {}
posterior_america = {}
likelihood_africa = {}
posterior_africa = {}
avg_label = ''
###############################################################################
# Building the histograms
###############################################################################
print('Building histograms')
time_dimensions = []
for loc in sources:
print(f'- {loc}')
path_2_file = path2folder + f"sa-s06-{loc}.nc"
particles = xr.load_dataset(path_2_file)
n = particles.dims['traj']
time = particles.dims['obs']
time_dimensions.append(time)
# filter the particles that beached
particles = particles.where((particles.beach == 1))
h_ame = np.zeros((time, number_bins[1]))
h_afr = np.zeros((time, number_bins[1]))
# beached_loc = np.zeros(time)
for t in range(time):
lons = particles['lon'][:, t].values
lats = particles['lat'][:, t].values
index = np.where(~np.isnan(lons))
lons = lons[index]
lats = lats[index]
# Compute the histogram
H, x_edges, y_edges = np.histogram2d(lons, lats, bins=number_bins,
range=domain_limits)
H = np.nan_to_num(H) # drop nans or covert them to zeros
count_ame = np.sum(H[:55, :], axis=0) # west meridional sum
count_afr = np.sum(H[80:-5, :], axis=0) # east meridional sum
h_ame[t] = count_ame
h_afr[t] = count_afr
counts_america[loc] = h_ame
counts_africa[loc] = h_afr
time = min(time_dimensions)
###############################################################################
# To average or not to average, that's the question.
###############################################################################
if compute_mean:
print('Averaging histograms and computing likelihood')
for loc in sources:
print(f'- {loc}')
mean_ame, time_range = time_averaging_coast(counts_america[loc],
window=average_window)
mean_afr, _ = time_averaging_coast(counts_africa[loc],
window=average_window)
likelihood_america[loc] = mean_ame
likelihood_africa[loc] = mean_afr
time = time//average_window
avg_label = f'average_{average_window}'
else:
# convert counts to likelihood. The counts were normalized in line ~120.
likelihood_america = counts_america
likelihood_africa = counts_africa
time_range = np.arange(0, time, 1)
###############################################################################
# Normalizing constant (sum of all hypothesis)
###############################################################################
print('Computing Normailizing constant')
normalizing_constant = np.zeros((time, 2, number_bins[1]))
# normalizing_constant_afr = np.zeros((time, 2, number_bins))
for t in range(time):
total = np.zeros((number_sources, 2, number_bins[1]))
for j, loc in enumerate(sources):
total[j, 0] = likelihood_america[loc][t]*priors['prior'][loc]
total[j, 1] = likelihood_africa[loc][t]*priors['prior'][loc]
normalizing_constant[t, 0] = np.sum(total[:, 0, :], axis=0)
normalizing_constant[t, 1] = np.sum(total[:, 1, :], axis=0)
###############################################################################
# Posterior probability
###############################################################################
print('Computing posterior probability')
for k, loc in enumerate(sources):
aux_ame = np.zeros((time, number_bins[1]))
aux_afr = np.zeros((time, number_bins[1]))
for t in range(time):
aux_ame[t] = likelihood_america[loc][t]*priors['prior'][loc] / \
normalizing_constant[t, 0]
aux_afr[t] = likelihood_africa[loc][t]*priors['prior'][loc] / \
normalizing_constant[t, 1]
posterior_america[loc] = (["time", "y"], aux_ame)
posterior_africa[loc] = (["time", "y"], aux_afr)
###############################################################################
# Saving the likelihood & posteior as netCDFs
###############################################################################
coordinates = dict(time=time_range,
lat=(["y"], lat_range))
attributes = {'description': "Beached posterior probability for America.",
'average_window': average_window}
# Posterior dataset
post_ame = xr.Dataset(data_vars=posterior_america,
coords=coordinates,
attrs=attributes)
attributes = {'description': "Beached posterior probability for Africa.",
'average_window': average_window}
# Posterior dataset
post_afr = xr.Dataset(data_vars=posterior_africa,
coords=coordinates,
attrs=attributes)
output_path_ame = newpath + f'beach_posterior_America_{avg_label}.nc'
output_path_afr = newpath + f'beach_posterior_Africa_{avg_label}.nc'
post_ame.to_netcdf(output_path_ame)
post_afr.to_netcdf(output_path_afr)
| 35.53211 | 79 | 0.572941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,341 | 0.431319 |
2759eded27b5f65ad495b43a6846ccc30736fba9 | 2,985 | py | Python | models/cam_decoder.py | BwCai/DCAA-UDA | 359c2122060aebfbe4384c918768c261fe2dc9c7 | [
"Apache-2.0"
]
| 2 | 2022-01-28T10:35:53.000Z | 2022-03-09T14:38:59.000Z | models/cam_decoder.py | BwCai/DCAA-UDA | 359c2122060aebfbe4384c918768c261fe2dc9c7 | [
"Apache-2.0"
]
| 1 | 2022-03-07T10:48:11.000Z | 2022-03-07T10:48:11.000Z | models/cam_decoder.py | BwCai/DCAA-UDA | 359c2122060aebfbe4384c918768c261fe2dc9c7 | [
"Apache-2.0"
]
| null | null | null | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from models.aspp import build_aspp
from models.decoder import build_decoder
import pdb
class AttentionDecoder(nn.Module):
def __init__(self, num_classes, modal_num, backbone, BatchNorm):
super(AttentionDecoder, self).__init__()
backbone = 'resnet'
if backbone == 'resnet' or backbone == 'drn':
inplanes = 256 * modal_num
elif backbone == 'xception':
inplanes = 128 * modal_num
elif backbone == 'mobilenet':
inplanes = 24 * modal_num
else:
raise NotImplementedError
self.modal_num = modal_num
# attention sequential
self.att_conv = nn.Sequential(
nn.Conv2d(inplanes, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256) if BatchNorm!=nn.GroupNorm else BatchNorm(16, 256),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Conv2d(256, modal_num, kernel_size=1, stride=1, bias=False),
nn.Softmax(),
)
self.last_conv = nn.Sequential(
nn.Conv2d(256 * (modal_num + 1), 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256) if BatchNorm!=nn.GroupNorm else BatchNorm(16, 256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256) if BatchNorm!=nn.GroupNorm else BatchNorm(16, 256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1))
self._init_weight()
def forward(self, x_in, low_level_feat):
x = x_in.copy()
_b, _c, _w, _h = x[0].size()
modal_x = torch.cat(x, dim=1) # B x 2C x W x H
# attention module
att_mask = self.att_conv(modal_x) # B x 2 x W x H
feat_x = x[0] * torch.unsqueeze(att_mask[:, 0, :, :], 1)
for _i in range(1, self.modal_num):
feat_x += x[_i] * torch.unsqueeze(att_mask[:, _i, :, :], 1)
x.append(feat_x)
residual_x = torch.cat(x, dim=1)
for _j in range(len(self.last_conv)-1):
residual_x = self.last_conv[_j](residual_x)
out = self.last_conv[-1](residual_x)
return att_mask, residual_x, out
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def build_attention_decoder(num_classes, modal_num, backbone, BatchNorm):
return AttentionDecoder(num_classes, modal_num, backbone, BatchNorm)
| 37.78481 | 98 | 0.59665 | 2,601 | 0.871357 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.037856 |
275bb771b2d649e2d94b8d769d96499bc3dc8e16 | 2,505 | py | Python | src/server/noize_reduction.py | kikuchiken-waseda/MRIVuewer | 66cfe30d006b6491a093a9dfab5b538c5e49409e | [
"MIT"
]
| null | null | null | src/server/noize_reduction.py | kikuchiken-waseda/MRIVuewer | 66cfe30d006b6491a093a9dfab5b538c5e49409e | [
"MIT"
]
| 23 | 2020-02-11T19:13:24.000Z | 2020-05-16T07:49:43.000Z | src/server/noize_reduction.py | kikuchiken-waseda/MRIVuewer | 66cfe30d006b6491a093a9dfab5b538c5e49409e | [
"MIT"
]
| null | null | null | import scipy as sp
from pyssp.util import (
get_frame, add_signal, compute_avgpowerspectrum
)
def writeWav(param, signal, filename):
import wave
with wave.open(filename, 'wb') as wf:
wf.setparams(param)
s = sp.int16(signal * 32767.0).tostring()
wf.writeframes(s)
def jointMap(signal, params, **kwargs):
from pyssp.voice_enhancement import JointMap
# Setting for JM
ntime = kwargs.get('ntime', 300)
ratio = kwargs.get('ratio', 0.9)
winsize = kwargs.get('winsize', 256)
alpha = kwargs.get('alpha', 0.99)
constant = kwargs.get('constant', 0.001)
window = sp.hanning(winsize)
n_pow = compute_avgpowerspectrum(
signal[0:winsize * int(params[2] / float(winsize) / (1000.0/ntime))],
winsize, window
)
nf = int(len(signal) / (winsize / 2) - 1)
result = sp.zeros(len(signal), sp.float32)
ss = JointMap(
winsize, window,
alpha=alpha, ratio=ratio,
constant=constant
)
for no in range(nf):
s = get_frame(signal, winsize, no)
add_signal(result, ss.compute_by_noise_pow(s, n_pow), winsize, no)
return params, result
def videoRead(videoclip, winsize=256):
from wave import open
from os import remove
tmp = 'tmp.wav'
audioclip = videoclip.audio
audioclip.write_audiofile(tmp)
with open(tmp) as wf:
n = wf.getnframes()
frames = wf.readframes(n)
params = (
(
wf.getnchannels(), wf.getsampwidth(), wf.getframerate(),
wf.getnframes(), wf.getcomptype(), wf.getcompname()
)
)
siglen = ((int)(len(frames) / 2 / winsize) + 1) * winsize
signal = sp.zeros(siglen, sp.float32)
signal[0:int(len(frames) / 2)] = sp.float32(
sp.fromstring(frames, sp.int16)
) / 32767.0
remove(tmp)
return signal, params
def normalization_from_video(fname, outfile, **kwargs):
from glob import glob
from os import remove
from moviepy.editor import VideoFileClip, AudioFileClip
tmp = 'tmp.wav'
video = VideoFileClip(fname)
winsize = kwargs.get('winsize', 256)
signal, params = videoRead(video, winsize)
kwargs.update({'params': params})
params, result = jointMap(signal, **kwargs)
writeWav(params, result, tmp)
newAudio = AudioFileClip(tmp)
newVideo = video.set_audio(newAudio)
newVideo.write_videofile(outfile)
remove(tmp)
for t in glob('*.mp3'):
remove(t)
| 30.54878 | 77 | 0.621158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.040719 |
275bf3b0ea75846995ad189f786825044efb445e | 4,940 | py | Python | chord_rec/harmalysis/classes/scale.py | TianxueHu/ChordSymbolRec | d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac | [
"Unlicense",
"MIT"
]
| null | null | null | chord_rec/harmalysis/classes/scale.py | TianxueHu/ChordSymbolRec | d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac | [
"Unlicense",
"MIT"
]
| null | null | null | chord_rec/harmalysis/classes/scale.py | TianxueHu/ChordSymbolRec | d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac | [
"Unlicense",
"MIT"
]
| null | null | null | '''
harmalysis - a language for harmonic analysis and roman numerals
Copyright (C) 2020 Nestor Napoles Lopez
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import harmalysis.common
from harmalysis.classes import interval
class MajorScale(object):
def __init__(self):
self._qualities = [
# Starting from I
['P', 'M', 'M', 'P', 'P', 'M', 'M'],
# Starting from II
['P', 'M', 'm', 'P', 'P', 'M', 'm'],
# Starting from III
['P', 'm', 'm', 'P', 'P', 'm', 'm'],
# Starting from IV
['P', 'M', 'M', 'A', 'P', 'M', 'M'],
# Starting from V
['P', 'M', 'M', 'P', 'P', 'M', 'm'],
# Starting from VI
['P', 'M', 'm', 'P', 'P', 'm', 'm'],
# Starting from VII
['P', 'm', 'm', 'P', 'D', 'm', 'm'],
]
self._semitones = [
# Starting from I
[0, 2, 4, 5, 7, 9, 11],
# Starting from II
[0, 2, 3, 5, 7, 9, 10],
# Starting from III
[0, 1, 3, 5, 7, 8, 10],
# Starting from IV
[0, 2, 4, 6, 7, 9, 11],
# Starting from V
[0, 2, 4, 5, 7, 9, 10],
# Starting from VI
[0, 2, 3, 5, 7, 8, 10],
# Starting from VII
[0, 1, 3, 5, 6, 8, 10],
]
def step_to_interval_spelling(self, step, mode=1):
qualities = self._qualities[(mode - 1) % harmalysis.common.DIATONIC_CLASSES]
quality = qualities[(step - 1) % harmalysis.common.DIATONIC_CLASSES]
return interval.IntervalSpelling(quality, step)
def step_to_semitones(self, step, mode=1):
semitones = self._semitones[(mode - 1) % harmalysis.common.DIATONIC_CLASSES]
step_semitones = semitones[(step - 1) % harmalysis.common.DIATONIC_CLASSES]
octaves = (step - 1) // harmalysis.common.DIATONIC_CLASSES
distance = (12 * octaves) + step_semitones
return distance
class NaturalMinorScale(MajorScale):
def __init__(self):
super().__init__()
self._qualities = [
['P', 'M', 'm', 'P', 'P', 'm', 'm'],
['P', 'm', 'm', 'P', 'D', 'm', 'm'],
['P', 'M', 'M', 'P', 'P', 'M', 'M'],
['P', 'M', 'm', 'P', 'P', 'M', 'm'],
['P', 'm', 'm', 'P', 'P', 'm', 'm'],
['P', 'M', 'M', 'A', 'P', 'M', 'M'],
['P', 'M', 'M', 'P', 'P', 'M', 'm'],
]
self._semitones = [
[0, 2, 3, 5, 7, 8, 10],
[0, 1, 3, 5, 6, 8, 10],
[0, 2, 4, 5, 7, 9, 11],
[0, 2, 3, 5, 7, 9, 10],
[0, 1, 3, 5, 7, 8, 10],
[0, 2, 4, 6, 7, 9, 11],
[0, 2, 4, 5, 7, 9, 10],
]
class HarmonicMinorScale(NaturalMinorScale):
def __init__(self):
super().__init__()
self._qualities = [
['P', 'M', 'm', 'P', 'P', 'm', 'M'],
['P', 'm', 'm', 'P', 'D', 'M', 'm'],
['P', 'M', 'M', 'P', 'A', 'M', 'M'],
['P', 'M', 'm', 'A', 'P', 'M', 'm'],
['P', 'm', 'M', 'P', 'P', 'm', 'm'],
['P', 'A', 'M', 'A', 'P', 'M', 'M'],
['P', 'm', 'm', 'D', 'D', 'm', 'D'],
]
self._semitones = [
[0, 2, 3, 5, 7, 8, 11],
[0, 1, 3, 5, 6, 9, 10],
[0, 2, 4, 5, 6, 9, 11],
[0, 2, 3, 6, 7, 9, 10],
[0, 1, 4, 5, 7, 8, 10],
[0, 3, 4, 6, 7, 9, 11],
[0, 1, 3, 4, 6, 8, 9],
]
class AscendingMelodicMinorScale(HarmonicMinorScale):
def __init__(self):
super().__init__()
self._qualities = [
['P', 'M', 'm', 'P', 'P', 'M', 'M'],
['P', 'm', 'm', 'P', 'P', 'M', 'm'],
['P', 'M', 'M', 'A', 'A', 'M', 'M'],
['P', 'M', 'M', 'A', 'P', 'M', 'm'],
['P', 'M', 'M', 'P', 'P', 'm', 'm'],
['P', 'M', 'm', 'P', 'D', 'm', 'm'],
['P', 'm', 'm', 'D', 'D', 'm', 'm'],
]
self._semitones = [
[0, 2, 3, 5, 7, 9, 11],
[0, 1, 3, 5, 7, 9, 10],
[0, 2, 4, 6, 8, 9, 11],
[0, 2, 4, 6, 7, 9, 10],
[0, 2, 4, 5, 7, 8, 10],
[0, 2, 3, 5, 6, 8, 10],
[0, 1, 3, 4, 6, 8, 10]
] | 35.285714 | 84 | 0.411943 | 4,088 | 0.82753 | 0 | 0 | 0 | 0 | 0 | 0 | 1,615 | 0.326923 |
275c01a7663aa8d447750578c192b3d7fd40d88b | 1,335 | py | Python | airq/api/crypto.py | dwightmulcahy/airq | 82e97825f70852d7fab973e498ef71345e1ed422 | [
"MIT"
]
| 6 | 2020-11-25T10:34:52.000Z | 2022-02-18T22:22:12.000Z | airq/api/crypto.py | dwightmulcahy/airq | 82e97825f70852d7fab973e498ef71345e1ed422 | [
"MIT"
]
| 6 | 2021-10-15T06:49:14.000Z | 2021-11-09T20:30:01.000Z | airq/api/crypto.py | dwightmulcahy/airq | 82e97825f70852d7fab973e498ef71345e1ed422 | [
"MIT"
]
| 2 | 2021-04-18T03:11:35.000Z | 2022-01-01T23:02:51.000Z | import hashlib
from Crypto.Cipher import AES
class Crypto:
SALT = "@uhooinc.com"
def __init__(self, clientCode):
self.key = hashlib.md5(
clientCode.encode("utf-8")
).digest() # initialization key
self.length = AES.block_size # Initialize the block size
self.aes = AES.new(
self.key, AES.MODE_ECB
) # Initialize AES, an instance of ECB mode
# Truncate function to remove padded characters
self.unpad = lambda date: date[0 : -ord(date[-1])]
def pad(self, text):
"""
Fill the function so that the bytecode length of the encrypted data is an integer multiple of block_size
"""
text = str(text, encoding="utf-8")
count = len(text)
add = self.length - (count % self.length)
entext = text + (chr(add) * add)
return bytes(entext, encoding="utf-8")
def encrypt(self, uid, password):
passwordSalted = uid + password + Crypto.SALT
passwordHashed = (
hashlib.sha256(passwordSalted.encode("utf-8")).hexdigest().encode("utf-8")
)
res = self.aes.encrypt(self.pad(passwordHashed))
return res
def decrypt(self, decrData):
res = decrData
msg = self.aes.decrypt(res).decode("utf8")
return self.unpad(msg)
| 32.560976 | 112 | 0.597753 | 1,287 | 0.964045 | 0 | 0 | 0 | 0 | 0 | 0 | 318 | 0.238202 |
275c77321e139727fd234a2be346f888681c1954 | 8,350 | py | Python | idaes/apps/caprese/examples/cstr_rodrigo/nmpc_main.py | Robbybp/idaes-pse | 8a41dbd05819f82806cf17a6e5f06aef79a775e3 | [
"RSA-MD"
]
| null | null | null | idaes/apps/caprese/examples/cstr_rodrigo/nmpc_main.py | Robbybp/idaes-pse | 8a41dbd05819f82806cf17a6e5f06aef79a775e3 | [
"RSA-MD"
]
| 2 | 2021-08-18T19:42:02.000Z | 2021-10-22T04:44:31.000Z | idaes/apps/caprese/examples/cstr_rodrigo/nmpc_main.py | Robbybp/idaes-pse | 8a41dbd05819f82806cf17a6e5f06aef79a775e3 | [
"RSA-MD"
]
| 1 | 2021-03-17T20:31:17.000Z | 2021-03-17T20:31:17.000Z | ##############################################################################
# Institute for the Design of Advanced Energy Systems Process Systems
# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2019, by the
# software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia
# University Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and
# license information, respectively. Both files are also available online
# at the URL "https://github.com/IDAES/idaes-pse".
##############################################################################
"""
Example for Caprese's module for NMPC.
Main script for running the example.
"""
import random
from idaes.apps.caprese.dynamic_builder import DynamicSim
from idaes.apps.caprese.util import apply_noise_with_bounds
from pyomo.environ import SolverFactory, Reference
from pyomo.dae.initialization import solve_consistent_initial_conditions
import idaes.logger as idaeslog
from idaes.apps.caprese.examples.cstr_rodrigo.cstr_rodrigo_model import make_model
from idaes.apps.caprese.data_manager import PlantDataManager
from idaes.apps.caprese.data_manager import ControllerDataManager
from idaes.apps.caprese.plotlibrary import (
plot_setpoint_tracking_results,
plot_control_input)
__author__ = "Kuan-Han Lin"
# See if ipopt is available and set up solver
if SolverFactory('ipopt').available():
solver = SolverFactory('ipopt')
solver.options = {
'tol': 1e-6,
'bound_push': 1e-8,
'halt_on_ampl_error': 'yes',
'linear_solver': 'ma57',
}
else:
solver = None
def main():
m_controller = make_model(horizon=10, ntfe=5, ntcp=2, bounds=True)
sample_time = 2.
m_plant = make_model(horizon=sample_time, ntfe=2, ntcp=2, bounds = True)
time_plant = m_plant.t
simulation_horizon = 20
n_samples_to_simulate = round(simulation_horizon/sample_time)
samples_to_simulate = [time_plant.first() + i*sample_time
for i in range(1, n_samples_to_simulate)]
# We must identify for the controller which variables are our
# inputs and measurements.
inputs = [
m_plant.Tjinb[0],
]
measurements = [
m_plant.Tall[0, "T"],
m_plant.Tall[0, "Tj"],
m_plant.Ca[0],
]
# Construct the "NMPC simulator" object
nmpc = DynamicSim(
plant_model=m_plant,
plant_time_set=m_plant.t,
controller_model=m_controller,
controller_time_set=m_controller.t,
inputs_at_t0=inputs,
measurements_at_t0=measurements,
sample_time=sample_time,
)
plant = nmpc.plant
controller = nmpc.controller
p_t0 = nmpc.plant.time.first()
c_t0 = nmpc.controller.time.first()
p_ts = nmpc.plant.sample_points[1]
c_ts = nmpc.controller.sample_points[1]
#--------------------------------------------------------------------------
# Declare variables of interest for plotting.
# It's ok not declaring anything. The data manager will still save some
# important data.
states_of_interest = [Reference(nmpc.plant.mod.Ca[:]),
Reference(nmpc.plant.mod.Tall[:, "T"])]
plant_data= PlantDataManager(plant, states_of_interest)
controller_data= ControllerDataManager(controller, states_of_interest)
#--------------------------------------------------------------------------
solve_consistent_initial_conditions(plant, plant.time, solver)
solve_consistent_initial_conditions(controller, controller.time, solver)
# We now perform the "RTO" calculation: Find the optimal steady state
# to achieve the following setpoint
setpoint = [(controller.mod.Ca[0], 0.018)]
setpoint_weights = [(controller.mod.Ca[0], 1.)]
# nmpc.controller.add_setpoint_objective(setpoint, setpoint_weights)
# nmpc.controller.solve_setpoint(solver)
nmpc.controller.add_single_time_optimization_objective(setpoint,
setpoint_weights)
nmpc.controller.solve_single_time_optimization(solver,
ic_type = "measurement_var",
require_steady = True,
load_setpoints = True)
# Now we are ready to construct the tracking NMPC problem
tracking_weights = [
*((v, 1.) for v in nmpc.controller.vectors.differential[:,0]),
*((v, 1.) for v in nmpc.controller.vectors.input[:,0]),
]
nmpc.controller.add_tracking_objective(tracking_weights)
nmpc.controller.constrain_control_inputs_piecewise_constant()
nmpc.controller.initialize_to_initial_conditions()
# Solve the first control problem
nmpc.controller.vectors.input[...].unfix()
nmpc.controller.vectors.input[:,0].fix()
solver.solve(nmpc.controller, tee=True)
controller_data.save_controller_data(iteration = 0)
#-------------------------------------------------------------------------
#noise for measurements
variance = [
(nmpc.controller.mod.Tall[0, "T"], 0.05),
(nmpc.controller.mod.Tall[0, "Tj"], 0.02),
(nmpc.controller.mod.Ca[0], 1.0E-5),
]
nmpc.controller.set_variance(variance)
measurement_variance = [
v.variance for v in controller.MEASUREMENT_BLOCK[:].var
]
measurement_noise_bounds = [
(var[c_t0].lb, var[c_t0].ub)
for var in controller.MEASUREMENT_BLOCK[:].var
]
# noise for inputs
variance = [
(plant.mod.Tjinb[0], 0.01),
]
nmpc.plant.set_variance(variance)
input_variance = [v.variance for v in plant.INPUT_BLOCK[:].var]
input_noise_bounds = [
(var[p_t0].lb, var[p_t0].ub) for var in plant.INPUT_BLOCK[:].var
]
random.seed(246)
#-------------------------------------------------------------------------
# Extract inputs from controller and inject them into plant
inputs = controller.generate_inputs_at_time(c_ts)
plant.inject_inputs(inputs)
plant_data.save_initial_plant_data()
# This "initialization" really simulates the plant with the new inputs.
nmpc.plant.initialize_by_solving_elements(solver)
nmpc.plant.vectors.input[...].fix() #Fix the input to solve the plant
solver.solve(nmpc.plant, tee = True)
plant_data.save_plant_data(iteration = 0)
for i in range(1, n_samples_to_simulate +1):
print('\nENTERING NMPC LOOP ITERATION %s\n' % i)
measured = nmpc.plant.generate_measurements_at_time(p_ts)
nmpc.plant.advance_one_sample()
nmpc.plant.initialize_to_initial_conditions()
measured = apply_noise_with_bounds(
measured,
measurement_variance,
random.gauss,
measurement_noise_bounds,
)
nmpc.controller.advance_one_sample()
nmpc.controller.load_initial_conditions(measured)
solver.solve(nmpc.controller, tee=True)
controller_data.save_controller_data(iteration = i)
inputs = controller.generate_inputs_at_time(c_ts)
inputs = apply_noise_with_bounds(
inputs,
input_variance,
random.gauss,
input_noise_bounds,
)
plant.inject_inputs(inputs)
nmpc.plant.initialize_by_solving_elements(solver)
nmpc.plant.vectors.input[...].fix() #Fix the input to solve the plant
solver.solve(nmpc.plant, tee = True)
plant_data.save_plant_data(iteration = i)
plot_setpoint_tracking_results(states_of_interest,
plant_data.plant_df,
controller_data.setpoint_df)
inputs_to_plot = [Reference(nmpc.plant.mod.Tjinb[:])]
plot_control_input(inputs_to_plot, plant_data.plant_df)
return nmpc, plant_data, controller_data
if __name__ == '__main__':
nmpc, plant_data, controller_data = main()
| 38.127854 | 82 | 0.623713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,174 | 0.260359 |
275e2811cd73b93eae3878d64929106994338a2d | 1,642 | py | Python | apps/scheduler/migrations/0008_current_task.py | techlib/czechelib-stats | ca132e326af0924740a525710474870b1fb5fd37 | [
"MIT"
]
| 1 | 2019-12-12T15:38:42.000Z | 2019-12-12T15:38:42.000Z | apps/scheduler/migrations/0008_current_task.py | techlib/czechelib-stats | ca132e326af0924740a525710474870b1fb5fd37 | [
"MIT"
]
| null | null | null | apps/scheduler/migrations/0008_current_task.py | techlib/czechelib-stats | ca132e326af0924740a525710474870b1fb5fd37 | [
"MIT"
]
| null | null | null | # Generated by Django 3.1.3 on 2020-11-23 15:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0007_fetchintention_one_to_one_attempt'),
]
operations = [
migrations.AddField(
model_name='scheduler',
name='current_celery_task_id',
field=models.UUIDField(blank=True, null=True),
),
migrations.AddField(
model_name='scheduler',
name='current_intention',
field=models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name='current_scheduler',
to='scheduler.fetchintention',
),
),
migrations.AddField(
model_name='scheduler',
name='current_start',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='fetchintention',
name='scheduler',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='intentions',
to='scheduler.scheduler',
),
),
migrations.AlterField(
model_name='fetchintention',
name='when_processed',
field=models.DateTimeField(
blank=True, help_text='When fetch intention was processed', null=True
),
),
]
| 30.407407 | 85 | 0.54933 | 1,516 | 0.923264 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.220463 |
275e8507ce35dccac0615b8d962c545d75b3823a | 2,555 | py | Python | connect_four/transposition/sqlite_transposition_table_test.py | rpachauri/connect4 | 6caf6965afaaff6883193ac295c6ac5b1f4e9c4a | [
"MIT"
]
| null | null | null | connect_four/transposition/sqlite_transposition_table_test.py | rpachauri/connect4 | 6caf6965afaaff6883193ac295c6ac5b1f4e9c4a | [
"MIT"
]
| null | null | null | connect_four/transposition/sqlite_transposition_table_test.py | rpachauri/connect4 | 6caf6965afaaff6883193ac295c6ac5b1f4e9c4a | [
"MIT"
]
| null | null | null | import unittest
import gym
import numpy as np
from connect_four.hashing import TicTacToeHasher
from connect_four.transposition.sqlite_transposition_table import SQLiteTranspositionTable
class TestSQLiteTranspositionTable(unittest.TestCase):
def setUp(self) -> None:
self.env = gym.make('tic_tac_toe-v0')
def test_save_and_retrieve(self):
self.env.state = np.array([
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
])
transposition = TicTacToeHasher(self.env).hash()
tt = SQLiteTranspositionTable(database_file=":memory:")
want_phi, want_delta = 1, 1
tt.save(transposition=transposition, phi=want_phi, delta=want_delta)
self.assertIn(transposition, tt)
got_phi, got_delta = tt.retrieve(transposition=transposition)
self.assertEqual(want_phi, got_phi)
self.assertEqual(want_delta, got_delta)
def test_overwrite_save(self):
self.env.state = np.array([
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
])
transposition = TicTacToeHasher(self.env).hash()
tt = SQLiteTranspositionTable(database_file=":memory:")
tt.save(transposition=transposition, phi=1, delta=1)
want_phi, want_delta = 2, 2
tt.save(transposition=transposition, phi=want_phi, delta=want_delta)
got_phi, got_delta = tt.retrieve(transposition=transposition)
self.assertEqual(want_phi, got_phi)
self.assertEqual(want_delta, got_delta)
def test_close_and_reload(self):
self.env.state = np.array([
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
])
transposition = TicTacToeHasher(self.env).hash()
tt = SQLiteTranspositionTable(database_file="sqlite_test.db")
tt.save(transposition=transposition, phi=1, delta=1)
tt.close()
tt2 = SQLiteTranspositionTable(database_file="sqlite_test.db")
self.assertIn(transposition, tt2)
tt2.close()
if __name__ == '__main__':
unittest.main()
| 30.416667 | 90 | 0.523288 | 2,315 | 0.906067 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.030528 |
275ea88f14a647fe3701cbe45b6f30ea2d89fba6 | 1,226 | py | Python | test/unittests/study/mri/test_mc.py | szho42/banana | 131804803e6293716e9d11cbb6f3ca244b2931f9 | [
"Apache-2.0"
]
| null | null | null | test/unittests/study/mri/test_mc.py | szho42/banana | 131804803e6293716e9d11cbb6f3ca244b2931f9 | [
"Apache-2.0"
]
| null | null | null | test/unittests/study/mri/test_mc.py | szho42/banana | 131804803e6293716e9d11cbb6f3ca244b2931f9 | [
"Apache-2.0"
]
| null | null | null | from nipype import config
config.enable_debug_mode()
from banana.testing import BaseTestCase as TestCase # @IgnorePep8 @Reimport
# from banana.study.multimodal.test_motion_detection import ( # @IgnorePep8 @Reimport
# MotionDetection, inputs)
from banana.study.multimodal.mrpet import create_motion_correction_class # @IgnorePep8 @Reimport
ref = 'ref'
ref_type = 't1'
t1s = ['ute']
t2s = ['t2']
epis = ['epi']
dwis = [['dwi_main', '0'], ['dwi_opposite', '-1']]
class TestMC(TestCase):
# def test_epi_mc(self):
#
# study = self.create_study(
# MotionDetection, 'MotionDetection', inputs=inputs,
# enforce_inputs=False)
# study.data('motion_detection_output')
# self.assertFilesetCreated('motion_detection_output', study.name)
def test_motion_correction(self):
MotionCorrection, inputs, out_data = create_motion_correction_class(
'MotionCorrection', ref, ref_type, t1s=t1s, t2s=t2s, dwis=dwis,
epis=epis)
study = self.create_study(
MotionCorrection, 'MotionCorrection', inputs=inputs,
enforce_inputs=False)
study.data(out_data)
self.assertFilesetCreated(out_data, study.name)
| 32.263158 | 97 | 0.681892 | 754 | 0.615008 | 0 | 0 | 0 | 0 | 0 | 0 | 539 | 0.439641 |
275f0b6cee294c3156bfba77754c517b6dc76211 | 162 | py | Python | dgbpy/__init__.py | maxprop/dgbpy | 05d0900b0fb2cb612b298c8c78f0f84640a38c4e | [
"Apache-2.0"
]
| 7 | 2021-11-18T18:03:46.000Z | 2022-02-09T06:55:50.000Z | dgbpy/__init__.py | maxprop/dgbpy | 05d0900b0fb2cb612b298c8c78f0f84640a38c4e | [
"Apache-2.0"
]
| null | null | null | dgbpy/__init__.py | maxprop/dgbpy | 05d0900b0fb2cb612b298c8c78f0f84640a38c4e | [
"Apache-2.0"
]
| 1 | 2022-03-18T06:55:55.000Z | 2022-03-18T06:55:55.000Z | #
# (C) dGB Beheer B.V.; (LICENSE) http://opendtect.org/OpendTect_license.txt
# AUTHOR : Bert
# DATE : August 2018
#
# Module init
#
__version__ = '1.0.0'
| 16.2 | 75 | 0.635802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.858025 |
27616031c83ae4189c96219a4ca18b7e0e254aed | 1,052 | py | Python | tests/test_country_service.py | gothill/python-fedex | 62dc8f554babd7066d6c6e7c478944f30fc2b75b | [
"BSD-3-Clause"
]
| 100 | 2016-01-22T23:46:10.000Z | 2022-03-26T05:00:53.000Z | tests/test_country_service.py | gothill/python-fedex | 62dc8f554babd7066d6c6e7c478944f30fc2b75b | [
"BSD-3-Clause"
]
| 77 | 2016-01-19T06:10:22.000Z | 2022-03-26T06:04:14.000Z | tests/test_country_service.py | gothill/python-fedex | 62dc8f554babd7066d6c6e7c478944f30fc2b75b | [
"BSD-3-Clause"
]
| 102 | 2016-02-08T23:28:45.000Z | 2022-02-28T11:37:27.000Z | """
Test module for the Fedex CountryService WSDL.
"""
import unittest
import logging
import sys
sys.path.insert(0, '..')
from fedex.services.country_service import FedexValidatePostalRequest
# Common global config object for testing.
from tests.common import get_fedex_config
CONFIG_OBJ = get_fedex_config()
logging.getLogger('suds').setLevel(logging.ERROR)
logging.getLogger('fedex').setLevel(logging.INFO)
@unittest.skipIf(not CONFIG_OBJ.account_number, "No credentials provided.")
class PackageMovementServiceTests(unittest.TestCase):
"""
These tests verify that the country service WSDL is in good shape.
"""
def test_postal_inquiry(self):
inquiry = FedexValidatePostalRequest(CONFIG_OBJ)
inquiry.Address.PostalCode = '29631'
inquiry.Address.CountryCode = 'US'
inquiry.send_request()
assert inquiry.response
assert inquiry.response.HighestSeverity == 'SUCCESS'
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
unittest.main()
| 25.658537 | 75 | 0.742395 | 447 | 0.424905 | 0 | 0 | 523 | 0.497148 | 0 | 0 | 251 | 0.238593 |
27621f7f93d71cdc400cae5017e3588d01c3c114 | 3,730 | py | Python | setup_tools/getkucore.py | dougmassay/kindleunpack-calibre-plugin | 906fc3820a9b1c179fc754ae5774ebe689a61419 | [
"Unlicense",
"MIT"
]
| 101 | 2015-03-24T10:29:15.000Z | 2022-03-25T07:15:45.000Z | setup_tools/getkucore.py | dougmassay/kindleunpack-calibre-plugin | 906fc3820a9b1c179fc754ae5774ebe689a61419 | [
"Unlicense",
"MIT"
]
| 3 | 2016-09-14T10:47:02.000Z | 2018-01-09T13:32:29.000Z | setup_tools/getkucore.py | dougmassay/kindleunpack-calibre-plugin | 906fc3820a9b1c179fc754ae5774ebe689a61419 | [
"Unlicense",
"MIT"
]
| 13 | 2015-09-28T07:05:18.000Z | 2022-02-13T15:16:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals, division, absolute_import, print_function
import os
import sys
import shutil
import inspect
import glob
import zipfile
import pythonpatch
if sys.version_info >= (3,):
import urllib
else:
import urllib2
SCRIPT_DIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
SOURCE_DIR = os.path.dirname(SCRIPT_DIR)
COMMIT_SHA = 'c8be31a196fd92803f78ad34a3f18d40319bbac5'
# REMOTE_URL = 'https://github.com/kevinhendricks/KindleUnpack/archive/master.zip'
REMOTE_URL = 'https://github.com/kevinhendricks/KindleUnpack/archive/{}.zip'.format(COMMIT_SHA)
# FILE_NAME = os.path.join(SCRIPT_DIR, REMOTE_URL.split('/')[-1])
FILE_NAME = os.path.join(SCRIPT_DIR, 'KindleUnpack-{}'.format(REMOTE_URL.split('/')[-1]))
# CORE_DIR = 'KindleUnpack-master/lib/'
CORE_DIR = 'KindleUnpack-{}/lib'.format(COMMIT_SHA)
CORE_EXCLUDES = ['askfolder_ed.py', 'mobiml2xhtml.py', 'prefs.py', 'scrolltextwidget.py']
TARGET_DIR = os.path.join(SOURCE_DIR, 'kindleunpackcore')
def retrieveKindleUnpack():
if os.path.exists(FILE_NAME) and os.path.isfile(FILE_NAME):
os.remove(FILE_NAME)
if sys.version_info >= (3,):
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
s = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize: # near the end
sys.stderr.write("\n")
else: # total size is unknown
sys.stderr.write("read %d\n" % (readsofar,))
urllib.request.urlretrieve(REMOTE_URL, FILE_NAME, reporthook)
else:
u = urllib2.urlopen(REMOTE_URL)
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
with open(FILE_NAME, 'wb') as f:
print('Downloading: %s Bytes: %s' % (FILE_NAME, file_size))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r'%10d [%3.2f%%]' % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print(status),
retrieveKindleUnpack()
if os.path.exists(TARGET_DIR) and os.path.isdir(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
os.mkdir(TARGET_DIR)
with zipfile.ZipFile(FILE_NAME) as zip_file:
for member in zip_file.namelist():
if member.startswith(CORE_DIR):
name = os.path.basename(member)
if not name or name in CORE_EXCLUDES:
continue
source = zip_file.open(member)
target = open(os.path.join(TARGET_DIR, name), "wb")
with source, target:
shutil.copyfileobj(source, target)
# Patch kindleunpack.py, mobi_nav.py
print('Attempting to patch KindleUnpack file(s) ...')
patchfiles = glob.glob('*.patch')
for patch in patchfiles:
parsedPatchSet = pythonpatch.fromfile(patch)
if parsedPatchSet is not False:
if parsedPatchSet.apply():
print(parsedPatchSet.diffstat())
else:
os.chdir('..')
sys.exit('Cannot apply patch to KindleUnpack file(s)!')
else:
os.chdir('..')
sys.exit('Cannot parse patch file(s)!')
if os.path.exists(FILE_NAME) and os.path.isfile(FILE_NAME):
os.remove(FILE_NAME)
| 36.930693 | 95 | 0.627346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 821 | 0.220107 |
2764c5afd2309a3e37c20e039e1d0182465889f2 | 5,696 | py | Python | pymoku/_oscilloscope_data.py | liquidinstruments/pymoku | a10c2516e5953722a5f5b52aec7944bec22492c2 | [
"MIT"
]
| 11 | 2016-10-11T04:37:14.000Z | 2021-09-10T02:34:03.000Z | pymoku/_oscilloscope_data.py | liquidinstruments/pymoku | a10c2516e5953722a5f5b52aec7944bec22492c2 | [
"MIT"
]
| 8 | 2017-06-02T18:19:49.000Z | 2020-09-07T06:15:53.000Z | pymoku/_oscilloscope_data.py | liquidinstruments/pymoku | a10c2516e5953722a5f5b52aec7944bec22492c2 | [
"MIT"
]
| 11 | 2018-07-12T04:18:40.000Z | 2022-03-04T10:10:48.000Z | import struct
from pymoku import _frame_instrument
_OSC_SCREEN_WIDTH = 1024
class VoltsData(_frame_instrument.InstrumentData):
"""
Object representing a frame of dual-channel data in units of Volts, and
time in units of seconds. This is the native output format of the
:any:`Oscilloscope` instrument. The *waveformid* property enables
identification of uniqueness of a frame of data, as it is possible to
retrieve the same data more than once (i.e. if the instrument has been
paused).
This object should not be instantiated directly, but will be returned by a
call to :any:`get_data <pymoku.instruments.Oscilloscope.get_data>` or
:any:`get_realtime_data
<pymoku.instruments.Oscilloscope.get_realtime_data>` on the associated
:any:`Oscilloscope` instrument.
.. autoinstanceattribute:: pymoku._frame_instrument.VoltsData.ch1
:annotation: = [CH1_DATA]
.. autoinstanceattribute:: pymoku._frame_instrument.VoltsData.ch2
:annotation: = [CH2_DATA]
.. autoinstanceattribute:: pymoku._frame_instrument.VoltsData.time
:annotation: = [TIME]
.. autoinstanceattribute:: pymoku._frame_instrument.VoltsData.waveformid
:annotation: = n
"""
def __init__(self, instrument, scales):
super(VoltsData, self).__init__(instrument)
# : Channel 1 data array in units of Volts. Present whether or not the
# channel is enabled, but the
# : contents are undefined in the latter case.
self.ch1 = []
#: Channel 2 data array in units of Volts.
self.ch2 = []
#: Timebase
self.time = []
self._scales = scales
def __json__(self):
return {'ch1': self.ch1,
'ch2': self.ch2,
'time': self.time,
'waveform_id': self.waveformid}
def process_complete(self):
super(VoltsData, self).process_complete()
if self._stateid not in self._scales:
return
scales = self._scales[self._stateid]
scale_ch1 = scales['scale_ch1']
scale_ch2 = scales['scale_ch2']
t1 = scales['time_min']
ts = scales['time_step']
try:
smpls = int(len(self._raw1) / 4)
dat = struct.unpack('<' + 'i' * smpls, self._raw1)
dat = [x if x != -0x80000000 else None for x in dat]
self._ch1_bits = [float(x) if x is not None
else None for x in dat[:_OSC_SCREEN_WIDTH]]
self.ch1 = [x * scale_ch1 if x is not None
else None for x in self._ch1_bits]
smpls = int(len(self._raw2) / 4)
dat = struct.unpack('<' + 'i' * smpls, self._raw2)
dat = [x if x != -0x80000000 else None for x in dat]
self._ch2_bits = [float(x) if x is not None
else None for x in dat[:_OSC_SCREEN_WIDTH]]
self.ch2 = [x * scale_ch2 if x is not None
else None for x in self._ch2_bits]
except (IndexError, TypeError, struct.error):
# If the data is bollocksed, force a reinitialisation on next
# packet
self._frameid = None
self._complete = False
self.time = [t1 + (x * ts) for x in range(_OSC_SCREEN_WIDTH)]
return True
def process_buffer(self):
# Compute the x-axis of the buffer
if self._stateid not in self._scales:
return
scales = self._scales[self._stateid]
self.time = [scales['buff_time_min'] + (scales['buff_time_step'] * x)
for x in range(len(self.ch1))]
return True
def _get_timescale(self, tspan):
# Returns a scaling factor and units for time 'T'
if(tspan < 1e-6):
scale_str = 'ns'
scale_const = 1e9
elif (tspan < 1e-3):
scale_str = 'us'
scale_const = 1e6
elif (tspan < 1):
scale_str = 'ms'
scale_const = 1e3
else:
scale_str = 's'
scale_const = 1.0
return [scale_str, scale_const]
def _get_xaxis_fmt(self, x, pos):
# This function returns a format string for the x-axis ticks and
# x-coordinates along the time scale. Use this to set an x-axis format
# during plotting of Oscilloscope frames
if self._stateid not in self._scales:
return
scales = self._scales[self._stateid]
ts = scales['time_step']
tscale_str, tscale_const = self._get_timescale(ts * _OSC_SCREEN_WIDTH)
return {'xaxis': '%.1f %s' % (x * tscale_const, tscale_str),
'xcoord': '%.3f %s' % (x * tscale_const, tscale_str)}
def get_xaxis_fmt(self, x, pos):
""" Function suitable to use as argument to a matplotlib FuncFormatter
for X (time) axis """
return self._get_xaxis_fmt(x, pos)['xaxis']
def get_xcoord_fmt(self, x):
""" Function suitable to use as argument to a matplotlib FuncFormatter
for X (time) coordinate """
return self._get_xaxis_fmt(x, None)['xcoord']
def _get_yaxis_fmt(self, y, pos):
return {'yaxis': '%.1f %s' % (y, 'V'), 'ycoord': '%.3f %s' % (y, 'V')}
def get_yaxis_fmt(self, y, pos):
""" Function suitable to use as argument to a matplotlib FuncFormatter
for Y (voltage) axis """
return self._get_yaxis_fmt(y, pos)['yaxis']
def get_ycoord_fmt(self, y):
""" Function suitable to use as argument to a matplotlib FuncFormatter
for Y (voltage) coordinate """
return self._get_yaxis_fmt(y, None)['ycoord']
| 35.6 | 78 | 0.596208 | 5,615 | 0.985779 | 0 | 0 | 0 | 0 | 0 | 0 | 2,298 | 0.403441 |
2764e8cf2af125cde1e1dea98f00be38d0e21369 | 6,205 | py | Python | FlightRisingColiseum/Bot_FR.py | Eternal05/Flightrising-Coliseum-Bot | 8f4895ff8a2d5533fe6a6546e09361738fd54910 | [
"MIT"
]
| 1 | 2021-05-17T02:52:40.000Z | 2021-05-17T02:52:40.000Z | FlightRisingColiseum/Bot_FR.py | Eternal05/Flightrising-Coliseum-Bot | 8f4895ff8a2d5533fe6a6546e09361738fd54910 | [
"MIT"
]
| null | null | null | FlightRisingColiseum/Bot_FR.py | Eternal05/Flightrising-Coliseum-Bot | 8f4895ff8a2d5533fe6a6546e09361738fd54910 | [
"MIT"
]
| null | null | null | import os
from PIL import ImageGrab
import time
import win32api, win32con
from PIL import ImageOps
from numpy import *
import pyautogui
import random
from ctypes import windll
user32 = windll.user32
user32.SetProcessDPIAware()
#some sort of DPS problem unrelated to project
#this stops the images from being cut off while using screengrab
# ------------------
x_pad = 475 #These pads is so it works for different resolutions. Instead of
y_pad = 699 #changing all the coordinates, other users of the bot would just
#have to adjust the pads using screenGrab() defined further below
class Cord: #All important coordinates that are checked often are stored here
mainmenu = (835, 893)
attack = (922, 806)
scratch = (1106, 835)
shred = (919, 950)
attacker1 = (974, 177)
hpattacker1 = (924, 13)
attacker2 = (1091, 331)
hpattacker2 = (1044, 147)
attacker3 = (1223, 477)
hpattacker3 = (1164, 305)
attacker4 = (1031, 265)
hpattacker4 = (984, 67)
attacker5 = (1145, 433)
hpattacker5 = (1104, 227)
boss = (1007, 292)
hpboss = (893, 67)
def screenGrab(): #Originally used as a tool to get x_pad and y_pad
#Currently used to scan the for RGB values in startGame(). See previous versions in journal
box = (x_pad+1,y_pad+1,x_pad+1371,y_pad+1220)
im = ImageGrab.grab(box)
hm = im.getpixel(Cord.hpboss) #put any coordinate u want
print(hm)
return im
def leftClick(): #just for clicking
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0) #Press left click
time.sleep(.1) #delays
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0) #Release left click
print('Click')
def mousePos(cord): #Moves the mouse to the given coordinates. This changed a lot, see previous versions in journal
pyautogui.moveTo(x_pad + cord[0] + random.randint(0, 20), y_pad + cord[1] + random.randint(0, 20), duration=0.25)
#Receives coordinates given in startGame(), goes to location taking into account the pads
#random.randint(0,20) randomizes the coordinates a bit to avoid bot detection
def get_cords(): #Tool that was used to get the coordinates of all the buttons and attackers in the game.
#No longer used now that the bot is completed
x,y = win32api.GetCursorPos()
x = x - x_pad #Takes into account pads, like all the other functions
y = y - y_pad
print(x,y)
# ------------------
def startGame(): #Start of the main function
wait = 0 #Used and explained further below
while x_pad == 475: #Just needed this to loop forever so picked random variable
#location of first menu
mousePos((257, 559))
leftClick()
leftClick()
time.sleep(1.5)
#location of second menu
mousePos((489, 771))
leftClick()
time.sleep(3.5)
while x_pad == 475: #Loop for the actual game once past menus
x = round(random.uniform(0, 0.2),2) #Generates random float that'll be added to randomize wait intervals
screenGrab()
s = screenGrab() #Takes picture of the screen and assigns it to s
if s.getpixel((205, 57)) == (93, 94, 134): #Checks if bot got past the menu, good for checking 'camping' (explained in journal)
wait = 0 #Resets the counter for amount of times 'waiting', used farther below
if s.getpixel(Cord.mainmenu) == (222, 214, 202):
#Checks if coordinates of mainmenu match RGB value. If so, that means this menu popped up, and level is complete
#The coordinates & RGB values are from using get_cords() & screenGrab() as tools. Check journal for how
print('level complete')
mousePos((811, 822)) #Goes to the button that sends us back to the mainmenu
leftClick()
time.sleep(1.4 + x) #Pauses after clicking for 1.4 + (randomized number) seconds
break #Breaks out of this loop to go back to the menu loop
#All the other if statements have the same idea as the above if statement
if s.getpixel(Cord.attack) == (236, 234, 231):
wait=0
print('attacking')
mousePos(Cord.attack)
leftClick()
time.sleep(0.1 + x)
screenGrab()
s = screenGrab() #Important screen change here, picture of screen taken again
if s.getpixel(Cord.shred) == (214, 172, 99): #Special attack option
mousePos(Cord.shred)
leftClick()
time.sleep(0.4 + x)
else:
mousePos(Cord.scratch) #Normal attack option
leftClick()
time.sleep(0.4 + x)
if s.getpixel(Cord.hpattacker1) == (49, 61, 48):
mousePos(Cord.attacker1)
leftClick()
time.sleep(1.2+ x)
elif s.getpixel(Cord.hpattacker2) == (49, 61, 48):
mousePos(Cord.attacker2)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpattacker3) == (49, 61, 48):
mousePos(Cord.attacker3)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpattacker4) == (49, 61, 48):
mousePos(Cord.attacker4)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpattacker5) == (49, 61, 48):
mousePos(Cord.attacker5)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpboss) == (10, 10, 13):
mousePos(Cord.boss)
leftClick()
time.sleep(1.2 + x)
else: #If no hp bars or attack buttons are detected, page is probably loading or enemies are attacking
wait = wait+1 #Wait counter goes up 1 every loop
print('waiting')
if wait == 15: #If computer waited 15 consecutive times, something must've went wrong. So, program exits
exit()
time.sleep(2) #Pauses for 2 seconds to wait, then loops back to recheck if they're hp bars or attack buttons
| 34.859551 | 134 | 0.604674 | 538 | 0.086704 | 0 | 0 | 0 | 0 | 0 | 0 | 2,516 | 0.405479 |
2764ee38e5ee492d56a5166db4a12d098e304e48 | 6,047 | py | Python | models/layers.py | RemiDelaunay/DeepUSE | fea4220d53583fe3d4e63c870a62b04deb005eda | [
"MIT"
]
| 2 | 2021-08-19T07:40:35.000Z | 2022-01-14T18:55:58.000Z | models/layers.py | RemiDelaunay/DeepUSE | fea4220d53583fe3d4e63c870a62b04deb005eda | [
"MIT"
]
| null | null | null | models/layers.py | RemiDelaunay/DeepUSE | fea4220d53583fe3d4e63c870a62b04deb005eda | [
"MIT"
]
| null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class ConvLSTMCell(nn.Module):
"""
Generate a convolutional LSTM cell
Adapted from "RVOS: End-to-End Recurrent Network for Video Object Segmentation"
"""
def __init__(self, input_size, hidden_size, kernel_size=3, padding=1):
super(ConvLSTMCell,self).__init__()
self.use_gpu = True
self.input_size = input_size
self.hidden_size = hidden_size
self.Gates = nn.Conv2d(input_size + hidden_size, 4 * hidden_size, kernel_size, padding=padding)
def forward(self, input_, prev_state):
# get batch and spatial sizes
batch_size = input_.data.size()[0]
spatial_size = input_.data.size()[2:]
if prev_state is None:
state_size = [batch_size, self.hidden_size] + list(spatial_size)
if self.use_gpu:
prev_state = (
Variable(torch.zeros(state_size)).cuda(),
Variable(torch.zeros(state_size)).cuda()
)
else:
prev_state = (
Variable(torch.zeros(state_size)),
Variable(torch.zeros(state_size))
)
prev_hidden, prev_cell = prev_state
# data size is [batch, channel, height, width]
stacked_inputs = torch.cat([input_, prev_hidden], 1)
gates = self.Gates(stacked_inputs)
# chunk across channel dimension
in_gate, remember_gate, out_gate, cell_gate = gates.chunk(4, 1)
# apply sigmoid non linearity
in_gate = F.sigmoid(in_gate)
remember_gate = F.sigmoid(remember_gate)
out_gate = F.sigmoid(out_gate)
# apply tanh non linearity
cell_gate = F.tanh(cell_gate)
# compute current cell and hidden state
cell = (remember_gate * prev_cell) + (in_gate * cell_gate)
hidden = out_gate * F.tanh(cell)
state = [hidden,cell]
return state
class convBNrelu(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=out_channels, padding=1, bias=False),
nn.BatchNorm2d(out_channels, track_running_stats=False),
nn.ReLU())
def forward(self, x):
return self.conv(x)
class deconvBNrelu(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
super().__init__()
self.conv = nn.Sequential(
nn.ConvTranspose2d(in_channels,out_channels, kernel_size, stride=2,padding=1, output_padding=1, bias=False),
nn.BatchNorm2d(out_channels, track_running_stats=False),
nn.ReLU())
def forward(self, x):
return self.conv(x)
class convBN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=out_channels, padding=1, bias=False),
nn.BatchNorm2d(out_channels,track_running_stats=False))
def forward(self, x):
return self.conv(x)
class ddf_summand(nn.Module):
def __init__(self, in_channels_list):
super().__init__()
self.convs = nn.ModuleList([nn.Conv2d(kernel_size=3, in_channels=in_channels, out_channels=2, padding=1, bias=True)
for in_channels in in_channels_list])
def forward(self, x, size_out):
x1_resize = []
for i, _ in enumerate(self.convs):
x1 = self.convs[i](x[4-i])
x1_resize.append(F.interpolate(x1, size=size_out, mode='bilinear', align_corners=True))
return torch.sum(torch.stack(x1_resize,dim=4), dim=4)
class DownResBlock(nn.Module):
def __init__(self,in_channels, out_channels, kernel_size=3):
super().__init__()
self.conv_0 = convBNrelu(in_channels, out_channels, kernel_size)
self.conv_1 = convBNrelu(out_channels, out_channels, kernel_size)
self.conv_2 = convBN(out_channels, out_channels, kernel_size)
self.acti = nn.ReLU()
self.down = nn.MaxPool2d(kernel_size=2)
def forward(self,x):
x1 = self.conv_0(x)
x2 = self.conv_1(x1)
x3 = self.conv_2(x2)
x3 += x1
x3 = self.acti(x3)
down = self.down(x3)
return x1, down
class UpResBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
super().__init__()
self.transposed = deconvBNrelu(in_channels,out_channels, kernel_size)
self.conv_0 = convBNrelu(out_channels, out_channels, kernel_size)
self.conv_1 = convBN(out_channels, out_channels, kernel_size)
self.acti = nn.ReLU()
def forward(self, x, input_skip):
add_up = self.transposed(x)
add_up += input_skip
add_up += additive_up_sampling(x, input_skip)
x1 = self.conv_0(add_up)
x2 = self.conv_1(x1)
x2 += x1
x2 = self.acti(x2)
return x2
class LstmUpBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
super().__init__()
self.transposed = deconvBNrelu(in_channels,out_channels, kernel_size)
self.lstm = ConvLSTMCell(out_channels, out_channels, kernel_size)
def forward(self, x, input_skip, hidden_state_temporal):
add_up = self.transposed(x)
add_up += input_skip
add_up += additive_up_sampling(x, input_skip)
x1 = self.lstm(add_up, hidden_state_temporal)
return x1
def additive_up_sampling(input, input_skip):
upsampled = F.interpolate(input,size=input_skip.size()[2:4], mode='bilinear', align_corners=True)
upsampled_split = torch.chunk(upsampled, 2, dim=1)
upsampled_stack = torch.stack(upsampled_split, dim=1)
upsampled_final = torch.sum(upsampled_stack, dim=1)
return upsampled_final
| 38.272152 | 123 | 0.64379 | 5,583 | 0.923268 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.058707 |
2765c7c3e9e989737c9e28cfcc8a7675bc22b1e5 | 907 | py | Python | main.py | denissearenas/face_recognition_image | 63f43ae188cc12ba443d2aeff84959eba95c9049 | [
"MIT"
]
| null | null | null | main.py | denissearenas/face_recognition_image | 63f43ae188cc12ba443d2aeff84959eba95c9049 | [
"MIT"
]
| null | null | null | main.py | denissearenas/face_recognition_image | 63f43ae188cc12ba443d2aeff84959eba95c9049 | [
"MIT"
]
| null | null | null | import logging
from logging.config import fileConfig
import os, os.path
import imageRecognition
#Test Folder
TestFolder = 'WorkingFolder/TestImages/'
# Create the Working folders
working_folders = ['logs','.metadata','WorkingFolder','./Workingfolder/OutputImages']
[os.makedirs(folder) for folder in working_folders if not os.path.exists(folder)]
# Load log config
fileConfig('logging_config.ini')
logger = logging.getLogger()
if __name__ == "__main__":
encodings = imageRecognition.loadEncodings()
if len(os.listdir(TestFolder)) > 0:
for file in os.listdir(TestFolder):
name_image = os.path.join(TestFolder,file)
filename = 'output'
if file.rfind('.') >= 0:
filename = file[:file.rfind('.')]
imageRecognition.tagPeople_cv2(TestFolder+file, encodings, tolerance=0.60, output_filename = filename)
| 24.513514 | 114 | 0.680265 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.209482 |
2768921c04ac38d6998b1d53e7d2b264cb24e683 | 755 | py | Python | Medio 3/ex056.py | Gustavsantos/python1 | 5520f2d2ee591157942008fdcd6bd42eb521f1a6 | [
"MIT"
]
| null | null | null | Medio 3/ex056.py | Gustavsantos/python1 | 5520f2d2ee591157942008fdcd6bd42eb521f1a6 | [
"MIT"
]
| null | null | null | Medio 3/ex056.py | Gustavsantos/python1 | 5520f2d2ee591157942008fdcd6bd42eb521f1a6 | [
"MIT"
]
| null | null | null | total = 0
media = 0
hmais = 0
no = ''
contm = 0
from datetime import date
atual = date.today().year
for p in range(1,5):
print('{}° Pessoa'.format(p))
nome = str(input('Nome: ')).strip().capitalize()
ns = int(input('O ano em que nasceu: '))
sexo = str(input('Sexo: ')).strip().upper()
idade = atual - ns
total += idade
media = total/4
if p == 1 and sexo == 'M':
hmais = idade
no = nome
if idade > hmais and sexo == 'M':
hmais = idade
no = nome
if idade < 20 and sexo == 'F':
contm += 1
print('Existem, {} mulheres com menos de 20 anos'.format(contm))
print('O homem mais, velho tem {} e se chama {}'.format(hmais, no))
print('A media de idade, é de {} anos'.format(media))
| 26.964286 | 67 | 0.564238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.240423 |
2768edd67418ad70fbc7628b2d0db9a2e7e067b6 | 9,575 | py | Python | modules/cdp.py | experiencedft/defisaver-sim | 1d1f05078efb634286df450b125677a1685a066e | [
"MIT"
]
| 13 | 2021-02-01T11:08:21.000Z | 2022-01-13T05:29:11.000Z | modules/cdp.py | experiencedft/defisaver-sim | 1d1f05078efb634286df450b125677a1685a066e | [
"MIT"
]
| null | null | null | modules/cdp.py | experiencedft/defisaver-sim | 1d1f05078efb634286df450b125677a1685a066e | [
"MIT"
]
| 5 | 2021-01-27T22:01:55.000Z | 2022-02-20T22:14:16.000Z | '''
The CDP module contains all the tools required to simulate a collateralized debt position, such as increasing or decreasing
its leverage (boost and repay), closing the vault to calculate its lifetime profit, adding collateral or drawing more debt.
The position is represented as an object whose methods provide all the above interactions. It can be leveraged or non leveraged.
For the purpose of this simulation, a vault is considered leveraged if part or all of the debt is used to buy more collateral.
'''
import numpy as np
class CDP():
'''
Attributes
___________
collateral: float
the amount of collateral in the position, in unit of the collateral asset
debt: float
the amountof debt of the position, in unit of the debt asset
automated: bool
a boolean flag indicating whether the CDP is automated
automation_settings: dictionnary
a dictionnary containing the automation settings
{"repay from": ..., "repay to": ..., "boost from": ..., "boost to": ...}
min_automation_debt: float
the minimum debt required for automation to be enabled, in amount of debt asset
min_ratio: float
the minimum collateralization ratio admitted by the protocol, below which liquidation occurs
'''
def __init__(self, initial_collateral: float, initial_debt: float, min_ratio: float) -> None:
'''
min_ratio in %
'''
self.collateral = initial_collateral
self.debt = initial_debt
self.isAutomated = False
self.automation_settings = {"repay from": 0, "repay to": 0, "boost from": 0, "boost to": 0}
self.min_ratio = min_ratio
# TODO: pass this as an argument later on and include change in simulate.py and related function calls
self.min_automation_debt = 0
def getCollateralizationRatio(self, price: float):
'''
Returns the collateralization ratio in %
'''
return 100*self.collateral*price/self.debt
def changeCollateral(self, deltaCollateral: float):
'''
Add deltaCollateral to the position's collateral. Note: deltaCollateral may be negative.
'''
self.collateral += deltaCollateral
def changeDebt(self, deltaDebt: float):
'''
Add deltaDebt to the position's debt. Note: deltaDebt may be negative.
'''
self.debt += deltaDebt
def close(self, price: float) -> float:
'''
Close the vault by paying back all of the debt and return the amount of collateral left.
Assumes infinite liquidity at the current price.
Param:
price: float
The current price of the collateral denominated in the debt asset.
'''
if self.debt > 0:
# The amount of collateral to sell to pay back the debt
collateralToSell = self.debt/price
self.collateral -= collateralToSell
self.debt = 0
return self.collateral
def automate(self, repay_from: float, repay_to: float, boost_from: float, boost_to: float):
'''
Enable or update automation for a CDP with the given automation settings.
Param:
automation_settings:
each param is an automation setting in the order of repay from, repay to,
boost from, boost to
'''
assert repay_from > self.min_ratio + 10
self.isAutomated = True
self.automation_settings["repay from"] = repay_from
self.automation_settings["repay to"] = repay_to
self.automation_settings["boost from"] = boost_from
self.automation_settings["boost to"] = boost_to
def disableAutomation(self):
self.isAutomated = False
def boostTo(self, target: float, price: float, gas_price_in_gwei: float, service_fee: float):
'''
Given the current price of the collateral asset denominated in the debt asset, check whether
the collateralization ratio is above threshold, and if yes, boost to the target ratio.
A boost is defined as generating more debt from the position and buying collateral with it.
Params:
target:
target collateralization ratio (in %)
price:
current price of the collateral denominated in the debt asset
gas_price_in_gwei:
current on-chain gas price in gwei (nanoETH)
serice_fee:
current fee charged by DeFi Saver (in %)
'''
#Check that it's possible to boost with the desired target
if self.debt == 0 or target/100 < self.collateral*price/self.debt:
# Fixed estimate of 1M gas consumed by the boost operation to calculate the gas fee in
# ETH
g = 1000000*gas_price_in_gwei*1e-9
# Target collateralization ratio
t = target/100
c = self.collateral
d = self.debt
p = price
gamma = 1 - service_fee/100
# print("gas cost in USD: ", g*p)
# print("gas cost limit: ", (p*c - t*d)/(5*(t - gamma) + 1))
# Gas cost must be below 20% of the boost amount
if p*g < (p*c - t*d)/(5*(t - gamma) + 1):
#The gas charged to the user is capped at a price of 499 gwei
if gas_price_in_gwei > 499:
g = 1000000*499*1e-9
# Calculate debt increase (> 0)required to arrive to the target collateralization ratio
deltaDebt = (p*c - p*g - t*d)/(t - gamma)
# print("debt change: ", deltaDebt)
# print("gas_cost/debt_change: ", p*g/deltaDebt)
# Calculate corresponding collateral increase (> 0)
deltaCollateral = (gamma*deltaDebt - p*g)/p
# Update position
self.debt += deltaDebt
self.collateral += deltaCollateral
assert self.debt > 0
assert self.collateral > 0
# Return True if boost took place
return True
else:
return False
else:
# If boost not possible with desired parameters
return False
def repayTo(self, target: float, price: float, gas_price_in_gwei: float, service_fee: float):
'''
Given the current price of the collateral asset denominated in the debt asset, check whether
the collateralization ratio is below threshold, and if yes, repay to the target ratio.
A repay is defined as selling some of the collateral from the position to acquire more of the
debt asset and repay part of the debt with it.
Params:
target:
target collateralization ratio in %
price:
current price of the collateral denominated in the debt asset
gas_price_in_gwei:
current on-chain gas price in gwei (nanoETH)
serice_fee:
current fee charged by DeFi Saver (in %)
'''
collateralization = self.collateral*price/self.debt
# Check that it's possible to repay with the desired target
assert self.debt != 0
# The current CRatio must be below the target OR below min_ratio + 10%
if collateralization < target/100:
# Fixed estimate of 1M gas consumed by the repay operation to calculate the gas fee in
# ETH
if gas_price_in_gwei > 499:
gas_price_in_gwei = 499
g = 1000000*gas_price_in_gwei*1e-9
# Target collateralization ratio
t = target/100
c = self.collateral
d = self.debt
p = price
gamma = 1 - service_fee/100
# print("gas cost in USD: ", p*g)
# print("gas cost in ETH: ", g)
# print("gas cost limit: ", (t*d - p*c)/(5*(gamma*t - 1) + t))
# print("collateralization in %: ", 100*collateralization)
# print("min repay threshold: ", self.min_ratio + 10)
# Gas cost must be lower than 20% of repay amount OR we must be below the min repay ratio
if 100*collateralization < self.min_ratio + 10:
isEmergencyRepay = True
else:
isEmergencyRepay = False
if p*g < (t*d - p*c)/(5*(gamma*t - 1) - t) or isEmergencyRepay:
# In case of an emergency repay, this might exceed the previous 20%. In this case, cap the charged amount to 20%.
if p*g > (t*d - p*c)/(5*(gamma*t - 1) - t):
g = (1/p)*(t*d - p*c)/(5*(gamma*t - 1) - t)
# Calculate collateral decrease (> 0) required to arrive to the target collateralization ratio
deltaCollateral = (t*d + t*p*g - p*c)/(p*(gamma*t-1))
# print("collateral change: ", deltaCollateral)
# print("gas_cost/collateral_change: ", g/deltaCollateral)
deltaDebt = gamma*p*deltaCollateral - p*g
if self.debt < self.min_automation_debt :
self.isAutomated = False
# Update position
self.collateral -= deltaCollateral
self.debt -= deltaDebt
assert self.collateral > 0
assert self.debt > 0
# Return True if repay took place
return True
else:
return False
else:
return False | 43.522727 | 129 | 0.593211 | 9,036 | 0.943708 | 0 | 0 | 0 | 0 | 0 | 0 | 5,402 | 0.564178 |
276b88f7bc15b02ea6b733a96f259241381fe73b | 5,683 | py | Python | clustertools/test/test_experiment.py | jm-begon/clustertools | 264198d0ffbd60b883b7b6a2af79341425c7729b | [
"BSD-3-Clause"
]
| 7 | 2017-05-31T15:28:28.000Z | 2021-03-25T12:36:48.000Z | clustertools/test/test_experiment.py | jm-begon/clustertools | 264198d0ffbd60b883b7b6a2af79341425c7729b | [
"BSD-3-Clause"
]
| 42 | 2017-06-09T07:35:50.000Z | 2019-08-29T15:23:29.000Z | clustertools/test/test_experiment.py | jm-begon/clustertools | 264198d0ffbd60b883b7b6a2af79341425c7729b | [
"BSD-3-Clause"
]
| 3 | 2017-05-29T13:39:18.000Z | 2019-06-24T09:43:01.000Z | # -*- coding: utf-8 -*-
from functools import partial
from nose.tools import assert_equal, assert_in, assert_less, assert_raises, \
with_setup, assert_true
from nose.tools import assert_false
from clustertools import ParameterSet, Result, Experiment
from clustertools.state import RunningState, CompletedState, AbortedState, \
CriticalState, PartialState, LaunchableState
from clustertools.storage import PickleStorage
from .util_test import purge, prep, __EXP_NAME__, IntrospectStorage, \
TestComputation, InterruptedComputation, pickle_prep, pickle_purge, \
with_setup_
__author__ = "Begon Jean-Michel <[email protected]>"
__copyright__ = "3-clause BSD License"
# ----------------------------------------------------------------------- Result
def test_result():
expected = {"m"+str(x): x for x in range(1, 5)}
result = Result("m1", m2=2, m3=6)
result.m1 = 1
result.m3 = 3
result["m4"] = 4
assert_equal(len(expected), len(result))
for name, value in expected.items():
assert_equal(result[name], value)
for name, value in result.items():
# redundant
assert_equal(expected[name], value)
dict(result)
repr(result)
# ------------------------------------------------------------------ Computation
@with_setup(prep, purge)
def test_correct_computation():
computation = TestComputation()
intro_storage = computation.storage
result1 = computation(x1=5, x2=2, x3=50)
result2 = intro_storage.load_result(computation.comp_name)
for result in result1, result2:
assert_equal(len(result), 2) # One real metric + repr
assert_equal(result["mult"], 2 * 5)
assert_equal(len(intro_storage.result_history), 1) # Only one computation
assert_equal(len(intro_storage.state_history), 1) # Only one computation
states = list(intro_storage.state_history.values())[0]
# If correct, state should have followed the sequence:
# Running (p=0), Running (p=1), Critical, Partial, Completed
assert_equal(len(states), 5)
assert_true(isinstance(states[0], RunningState))
assert_true(isinstance(states[1], RunningState))
assert_true(isinstance(states[2], CriticalState))
assert_true(isinstance(states[3], PartialState))
assert_true(isinstance(states[4], CompletedState))
assert_equal(states[0].progress, 0.)
assert_equal(states[1].progress, 1.)
@with_setup(prep, purge)
def test_error_computation():
computation = TestComputation()
intro_storage = computation.storage
computation = computation.lazyfy(x1=5, x2=None, x3=50)
assert_raises(TypeError, computation) # 5*None
assert_equal(len(intro_storage.result_history), 0) # Computation not saved
assert_equal(len(intro_storage.state_history), 1) # Only one computation
states = list(intro_storage.state_history.values())[0]
# If correct (i.e. error occurs), state should have evolved as:
# Running, Aborted
assert_equal(len(states), 2)
assert_true(isinstance(states[0], RunningState))
assert_true(isinstance(states[1], AbortedState))
@with_setup(prep, purge)
def test_interrupted_computation():
computation = InterruptedComputation()
intro_storage = computation.storage
assert_raises(KeyboardInterrupt, computation)
assert_equal(len(intro_storage.result_history[computation.comp_name]), 0)
state_history = intro_storage.state_history[computation.comp_name]
# Running -> Launchable
assert_equal(len(state_history), 2)
assert_true(isinstance(state_history[0], RunningState))
assert_true(isinstance(state_history[1], LaunchableState))
@with_setup(prep, purge)
def test_has_parameters():
computation = TestComputation()
computation.lazyfy(p1="1", p2=2)
assert_true(computation.has_parameters(p1="1", p2=2))
assert_true(computation.has_parameters(p1="1"))
assert_true(computation.has_parameters(p2=2))
assert_false(computation.has_parameters(p3=""))
assert_false(computation.has_parameters(p1="1", p3=""))
assert_false(computation.has_parameters(p1="1", p2=2, p3=""))
# ------------------------------------------------------------------- Experiment
@with_setup(prep, purge)
def test_experiment():
parameter_set = ParameterSet()
parameter_set.add_parameters(x1=range(3), x2=range(3))
experiment = Experiment(__EXP_NAME__, parameter_set, TestComputation,
IntrospectStorage)
assert_equal(len(list(experiment.yield_computations())), 9)
# start=3 : skip 0,1,2
assert_equal(len(list(experiment.yield_computations(start=3))), 6)
# capacity=6 : skip 6, 7, 8
assert_equal(len(list(experiment.yield_computations(capacity=6))), 6)
@with_setup_(partial(pickle_prep, exp_name="{}_1".format(__EXP_NAME__)),
partial(pickle_purge, exp_name="{}_1".format(__EXP_NAME__)))
def do_auto_refresh(auto_refresh):
parameter_set = ParameterSet()
parameter_set.add_parameters(x1=range(3), x2=range(3))
experiment = Experiment("{}_1".format(__EXP_NAME__), parameter_set,
TestComputation)
# There should be 9 computations
assert_equal(len(experiment), 9)
count = 0
for i, _ in enumerate(experiment.yield_computations(auto_refresh=auto_refresh)):
if i == 0:
state = CompletedState(
Experiment.name_computation(experiment.exp_name, 6)
)
PickleStorage(experiment.exp_name).update_state(state)
count += 1
print("Auto refresh?", auto_refresh, "--", count)
assert_equal(count, 8 if auto_refresh else 9)
def test_auto_refresh():
do_auto_refresh(True)
do_auto_refresh(False)
| 36.664516 | 84 | 0.688193 | 0 | 0 | 0 | 0 | 4,217 | 0.742038 | 0 | 0 | 831 | 0.146226 |
276c363a6f57e3c85d7f037af185d706c5abdf10 | 1,657 | py | Python | ed.py | zzx288/words | 477516211cc43701ec4592a686f0bc06cbb9c141 | [
"MIT"
]
| null | null | null | ed.py | zzx288/words | 477516211cc43701ec4592a686f0bc06cbb9c141 | [
"MIT"
]
| null | null | null | ed.py | zzx288/words | 477516211cc43701ec4592a686f0bc06cbb9c141 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import codecs
def find_words(words):
split_words={}
count_all = 0
unused_words = u" \t\r\n,。:;、“‘”【】『』|=+-——()*&……%¥#@!~·《》?/?<>,.;:'\"[]{}_)(^$!`"
unused_english = u"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
for i in unused_words:
count_all += words.count(i)
for i in unused_english:
count_all += words.count(i)
lens = len(words)
len_deal = lens-count_all
for i in range(0,lens):
if(words[i] in unused_words or words[i] in unused_english):
continue
if(words[i+1] in unused_words or words[i+1] in unused_english):
continue
if words[i:i+2] in split_words:
split_words[words[i:i+2]][0]+=1
split_words[words[i:i+2]][1]=float(split_words[words[i:i+2]][0])/float(len_deal)
else:
split_words[words[i:i+2]]=[1,1/float(len_deal)]
return split_words
def read_file(a):
words = ""
i=0
pathdir = os.listdir(a)
for alldir in pathdir:
test = codecs.open(a+"\\"+alldir, 'r',encoding='utf-8')
words += test.read()
test.close()
i += 1
print(i)
return words
if __name__ == '__main__':
words = read_file('F:\\cs')
'''
test = codecs.open('F:\\760.xml', 'r',encoding='utf-8')
words = test.read()
test.close()
'''
print ("splitting......")
split=find_words(words)
ci = codecs.open('F:\\result.txt','a',encoding = 'utf-8')
for key in split.keys():
ci.write('('+key[0]+','+key[1]+','+str(split[key][1])+')\r\n')
ci.close
print("ok")
| 29.589286 | 92 | 0.556427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 432 | 0.253521 |
276d667a87fa4dfdeb86a4993f51124a0783875c | 2,971 | py | Python | acestream/ACEStream/Plugin/EngineWx.py | GrandPaRPi/p2ptv-pi | 6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3 | [
"MIT"
]
| null | null | null | acestream/ACEStream/Plugin/EngineWx.py | GrandPaRPi/p2ptv-pi | 6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3 | [
"MIT"
]
| null | null | null | acestream/ACEStream/Plugin/EngineWx.py | GrandPaRPi/p2ptv-pi | 6f79c00f9055a3763ddfe1dc41e14d2cb533f4c3 | [
"MIT"
]
| 2 | 2018-04-17T17:34:39.000Z | 2020-07-26T03:43:33.000Z | #Embedded file name: ACEStream\Plugin\EngineWx.pyo
import os
import sys
from traceback import print_exc
try:
import wxversion
wxversion.select('2.8')
except:
pass
try:
import wx
except:
print 'wx is not installed'
os._exit(1)
import ACEStream
from ACEStream.GlobalConfig import globalConfig
from ACEStream.Plugin.BackgroundProcess import run_bgapp, stop_bgapp, send_startup_event, get_default_api_version
from ACEStream.Player.systray import PlayerTaskBarIcon
from ACEStream.version import VERSION
ALLOW_MULTIPLE = False
class AppWrapper(wx.App):
def __init__(self, redirectstderrout = False):
self.bgapp = None
self.systray = None
wx.App.__init__(self, redirectstderrout)
def OnExit(self):
if self.systray is not None:
self.systray.RemoveIcon()
self.systray.Destroy()
if self.bgapp is not None:
self.bgapp.OnExit()
def set_bgapp(self, bgapp):
self.bgapp = bgapp
iconpath = bgapp.iconpath
self.systray = PlayerTaskBarIcon(self, self.bgapp, iconpath)
def set_icon_tooltip(self, txt):
if self.systray is not None:
self.systray.set_icon_tooltip(txt)
def on_error(self, errmsg, exit = False):
if self.bgapp is None:
title = 'Error'
else:
title = self.bgapp.appname + ' Error'
dlg = wx.MessageDialog(None, errmsg, title, wx.OK | wx.ICON_ERROR | wx.STAY_ON_TOP)
result = dlg.ShowModal()
dlg.Destroy()
if exit:
self.ExitMainLoop()
def start(apptype, exec_dir):
if apptype == 'torrentstream':
appname = 'Torrent Stream'
elif apptype == 'acestream':
appname = 'ACE Stream HD'
else:
raise Exception, 'Bad app type'
single_instance_checker = wx.SingleInstanceChecker(appname + '-' + wx.GetUserId())
if single_instance_checker.IsAnotherRunning():
print >> sys.stderr, 'appwrapper: already running, exit'
if get_default_api_version(apptype, exec_dir) < 2:
send_startup_event()
os._exit(0)
globalConfig.set_value('apptype', apptype)
globalConfig.set_mode('client_wx')
wrapper = AppWrapper()
try:
bgapp = run_bgapp(wrapper, appname, VERSION)
except Exception as e:
print >> sys.stderr, 'Fatal error while starting:', str(e)
print_exc()
os._exit(0)
wrapper.set_bgapp(bgapp)
bgapp.debug_systray = bgapp.debug_level & 1024 != 0
ACEStream.Player.systray.DEBUG = bgapp.debug_systray
ACEStream.Player.systray.SHOW_DEBUG_LEVEL = bgapp.debug_systray
ACEStream.Player.systray.DEBUG_PIECES = bgapp.debug_level & 128 != 0
ACEStream.Player.systray.DEBUG_VIDEOSTATUS = bgapp.debug_level & 2048 != 0
ACEStream.Player.systray.DEBUG_PROXY_BUF = bgapp.debug_level & 4096 != 0
wrapper.MainLoop()
if not ALLOW_MULTIPLE:
del single_instance_checker
stop_bgapp(bgapp)
| 31.946237 | 113 | 0.672164 | 1,031 | 0.347021 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.08381 |
276d90a94566353b66f7d294a0618d781bcf1b9c | 899 | py | Python | tcp_finite_state/client.py | aakashhemadri/python | 16955810dd1ea491c9ff9c99ee02bd463ba97f0a | [
"MIT"
]
| 2 | 2021-01-20T14:08:26.000Z | 2021-10-20T07:56:46.000Z | tcp_finite_state/client.py | aakashhemadri/python | 16955810dd1ea491c9ff9c99ee02bd463ba97f0a | [
"MIT"
]
| null | null | null | tcp_finite_state/client.py | aakashhemadri/python | 16955810dd1ea491c9ff9c99ee02bd463ba97f0a | [
"MIT"
]
| null | null | null | from tcp_statemachine import TCPStateMachine, Data
import socket
class Client:
"""
Client class
"""
def __init__(self, statemachine, data):
self.ME = 'client'
self.HOST = '127.0.0.1'
self.PORT = 22085
self.ADDRESS = (self.HOST, self.PORT)
self.client_socket = socket.socket()
def run(self):
self.client_socket.connect(self.ADDRESS)
self.statemachine.send_syn(self.client_socket, self.statemachine)
self.statemachine.conn_estb_client()
data.receive(self.client_socket, statemachine)
while statemachine.is_established:
data.send(self.client_socket, statemachine)
data.receive(self.client_socket, statemachine)
self.client_socket.close()
def whoami(self):
return self.ME
def main():
tcp_fsm = TCPFiniteStateMachine()
data = Data(12345)
client = Client(tcp_fsm, data)
if __name__ == '__main__':
main()
| 22.475 | 69 | 0.697442 | 681 | 0.757508 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.058954 |
276e00dcc9820a61c4d4ebff8a3b8b4d2a199a5f | 467 | py | Python | HackerRank/MinimumSwaps2.py | kokuraxc/play-ground | 48b5291f3cca117e0cd0a17bf9255ec4dc1a5cdd | [
"MIT"
]
| null | null | null | HackerRank/MinimumSwaps2.py | kokuraxc/play-ground | 48b5291f3cca117e0cd0a17bf9255ec4dc1a5cdd | [
"MIT"
]
| null | null | null | HackerRank/MinimumSwaps2.py | kokuraxc/play-ground | 48b5291f3cca117e0cd0a17bf9255ec4dc1a5cdd | [
"MIT"
]
| null | null | null | # https://www.hackerrank.com/challenges/minimum-swaps-2
# Complete the minimumSwaps function below.
def minimumSwaps(arr):
steps = 0
# for i, a in enumerate(arr):
for i in range(len(arr)):
while arr[i] != i+1:
#arr = [a if x == i+1 else x for x in arr]
#print(arr)
left = arr[i]
right = arr[left-1]
arr[i] = right
arr[left-1] = left
steps += 1
return steps
| 25.944444 | 55 | 0.51606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.385439 |
277311deb8c817997aeecabf48fe1ce321691cbf | 3,081 | py | Python | source/conf.py | Kinovea/kinovea-docs | a2c4c9561bd4f8cc663efcaaed017c9c018b6b20 | [
"CC0-1.0"
]
| 4 | 2020-11-17T18:09:42.000Z | 2021-12-29T07:34:29.000Z | source/conf.py | Kinovea/kinovea-docs | a2c4c9561bd4f8cc663efcaaed017c9c018b6b20 | [
"CC0-1.0"
]
| 4 | 2021-07-12T09:41:06.000Z | 2021-11-01T19:22:05.000Z | source/conf.py | Kinovea/kinovea-docs | a2c4c9561bd4f8cc663efcaaed017c9c018b6b20 | [
"CC0-1.0"
]
| 1 | 2021-07-12T05:17:47.000Z | 2021-07-12T05:17:47.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'Kinovea'
copyright = '2021, Kinovea documentation authors (CC0 1.0)'
author = 'Kinovea documentation authors'
# The full version, including alpha/beta/rc tags
release = '0.9.5'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_static_path = ['_static']
html_css_files = ['css/kinovea.css']
html_logo = 'images/logo/kinovea.svg'
html_copy_source = False
html_show_sourcelink = False
html_show_sphinx = False
html_theme_options = {
'logo_only': True,
'display_version': False,
'style_external_links': True,
'style_nav_header_background': "#404040",
# Collapse navigation (False makes it tree-like)
#'collapse_navigation': False,
}
html_context = {
'display_github': False,
}
pdf_documents = [('index', u'kinoveadoc', u'Kinovea documentation', u'Kinovea community'),]
# -- Options for Epub output ----------------------------------------------
# EPUB Output
epub_theme = "sphinx_rtd_theme"
#epub_theme = 'epub'
# Bibliographic Dublin Core info.
epub_description = "Kinovea reference manual"
epub_publisher = "Kinovea"
epub_title = project
epub_author = author
epub_copyright = copyright
# The cover page information. This is a tuple containing the filenames of
# the cover image and the html template.
#epub_cover = ('_static/cover.png', 'epub-cover.html')
epub_css_files = ['css/kinovea.css']
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in the file toc.ncx.
epub_tocdepth = 2
# Control whether to display URL addresses.
epub_show_urls = 'no'
| 30.205882 | 91 | 0.683544 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,368 | 0.768582 |
277376eb0677ba7d0fb6770b5320ed63bd24de63 | 51 | py | Python | 0-hello-world/hello.py | zeyuri/computer-science-and-python-intro | 178ff6647586fb2be716bb3c4796b03310731583 | [
"MIT"
]
| null | null | null | 0-hello-world/hello.py | zeyuri/computer-science-and-python-intro | 178ff6647586fb2be716bb3c4796b03310731583 | [
"MIT"
]
| null | null | null | 0-hello-world/hello.py | zeyuri/computer-science-and-python-intro | 178ff6647586fb2be716bb3c4796b03310731583 | [
"MIT"
]
| null | null | null | msg = "Hello World, i'm coming for you"
print(msg)
| 17 | 39 | 0.686275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.647059 |
2775121ab7502b6919cf78437931035cd8b7a2d9 | 158 | py | Python | src/onegov/user/auth/clients/__init__.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
]
| null | null | null | src/onegov/user/auth/clients/__init__.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
]
| null | null | null | src/onegov/user/auth/clients/__init__.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
]
| null | null | null | from onegov.user.auth.clients.kerberos import KerberosClient
from onegov.user.auth.clients.ldap import LDAPClient
__all__ = ('KerberosClient', 'LDAPClient')
| 31.6 | 60 | 0.816456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.177215 |
27756bad277e04032903ed08ddd795ce38d122b8 | 746 | py | Python | img_utils/files/file.py | DewMaple/image_utils | a0ca6a1b088389f5a70df60623e629634d57065a | [
"MIT"
]
| null | null | null | img_utils/files/file.py | DewMaple/image_utils | a0ca6a1b088389f5a70df60623e629634d57065a | [
"MIT"
]
| null | null | null | img_utils/files/file.py | DewMaple/image_utils | a0ca6a1b088389f5a70df60623e629634d57065a | [
"MIT"
]
| null | null | null | import glob
import os
def images_in_dir(images_dir, file_types=('*.png', '*.jpg', '*.jpeg', '*.gif')):
"""
:param images_dir: directory that contains target images
:param file_types: image files extend
:return: full image file path list
"""
file_names = []
for ext in file_types:
file_names.extend(glob.glob(os.path.join(images_dir, ext)))
return sorted(file_names)
def filename(file_path):
"""
:param file_path: file path
:return: file name with extend
"""
return file_path.split(os.sep)[-1]
def fname(file_name):
"""
:param file_name: file name or file path
:return: file name without extend
"""
idx = file_name.rindex(os.extsep)
return file_name[0:idx]
| 23.3125 | 80 | 0.647453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 354 | 0.474531 |
277599814e255a220a50444d8861eabc112abdd1 | 4,931 | py | Python | financial_fundamentals/xbrl.py | Mahesh-Salunke/financial_fundamentals | 421e7550e2c4b2cc9cc0ee8cb15ce2ad0d89234f | [
"Apache-2.0"
]
| 122 | 2015-01-28T17:57:08.000Z | 2022-02-12T12:24:55.000Z | financial_fundamentals/xbrl.py | Mahesh-Salunke/financial_fundamentals | 421e7550e2c4b2cc9cc0ee8cb15ce2ad0d89234f | [
"Apache-2.0"
]
| 1 | 2016-11-07T17:02:02.000Z | 2016-11-09T20:51:50.000Z | financial_fundamentals/xbrl.py | Mahesh-Salunke/financial_fundamentals | 421e7550e2c4b2cc9cc0ee8cb15ce2ad0d89234f | [
"Apache-2.0"
]
| 49 | 2015-01-01T03:12:27.000Z | 2021-07-06T10:19:31.000Z | '''
Created on Oct 8, 2013
@author: akittredge
'''
import dateutil.parser
import xmltodict
from financial_fundamentals.exceptions import ValueNotInFilingDocument
class XBRLMetricParams(object):
'''Bundle the parameters sufficient to extract a metric from an xbrl document.
'''
def __init__(self, possible_tags, context_type):
self.possible_tags = possible_tags
self.context_type = context_type
class DurationContext(object):
'''Encapsulate a time span XBRL context.'''
characteristic_key = 'startDate'
def __init__(self, start_date, end_date):
self.start_date = start_date
self.end_date = end_date
@property
def sort_key(self):
return self.start_date
def __repr__(self):
return '{}(start_date={}, end_date={})'.format(self.__class__,
self.start_date,
self.end_date)
@classmethod
def from_period(cls, period):
start_node = XBRLDocument.find_node(xml_dict=period, key='startDate')
start_date = dateutil.parser.parse(start_node).date()
end_node = XBRLDocument.find_node(xml_dict=period, key='endDate')
end_date = dateutil.parser.parse(end_node).date()
return cls(start_date, end_date)
class InstantContext(object):
characteristic_key = 'instant'
def __init__(self, instant):
self.instant = instant
@property
def sort_key(self):
return self.instant
def __repr__(self):
return '{}(instant={}'.format(self.__class__, self.instant)
@classmethod
def from_period(cls, period):
node = XBRLDocument.find_node(xml_dict=period, key='instant')
instant = dateutil.parser.parse(node).date()
return cls(instant=instant)
class XBRLDocument(object):
'''wrapper for XBRL documents, lazily downloads XBRL text.'''
def __init__(self, xbrl_url, gets_xbrl):
self._xbrl_url = xbrl_url
self._xbrl_dict_ = None
self._contexts = {}
self._get_xbrl = gets_xbrl
@property
def _xbrl_dict(self):
if not self._xbrl_dict_:
doc_text = self._get_xbrl(self._xbrl_url)
xml_dict = xmltodict.parse(doc_text)
self._xbrl_dict_ = self.find_node(xml_dict, 'xbrl')
return self._xbrl_dict_
def contexts(self, context_type):
contexts = self._contexts.get(context_type, {})
if not contexts:
context_nodes = self.find_node(xml_dict=self._xbrl_dict, key='context')
for context in context_nodes:
try:
period = self.find_node(xml_dict=context, key='period')
self.find_node(xml_dict=period, key=context_type.characteristic_key)
except KeyError:
continue
else:
contexts[context['@id']] = context_type.from_period(period)
self._contexts[context_type] = contexts
return contexts
def _latest_metric_value(self, possible_tags, contexts):
'''metric_params is a list of possible xbrl tags.
'''
for tag in possible_tags:
try:
metric_nodes = self._xbrl_dict[tag]
except KeyError:
continue
else:
if type(metric_nodes) != list:
metric_nodes = [metric_nodes]
break
else:
raise MetricNodeNotFound('Did not find any of {} in the document @ {}'\
.format(possible_tags, self._xbrl_url))
def key_func(value):
context_ref_id = value['@contextRef']
context = contexts[context_ref_id]
return context.sort_key
metric_node = sorted(metric_nodes,
key=key_func,
reverse=True)[0]
return float(metric_node['#text'])
def latest_metric_value(self, metric_params):
contexts = self.contexts(context_type=metric_params.context_type)
return self._latest_metric_value(possible_tags=metric_params.possible_tags,
contexts=contexts)
@staticmethod
def find_node(xml_dict, key):
'''OMG I hate XML.'''
try:
return xml_dict[key]
except KeyError:
return xml_dict['xbrli:{}'.format(key)]
@classmethod
def gets_XBRL_from_edgar(cls, xbrl_url):
from financial_fundamentals import edgar
return cls(xbrl_url=xbrl_url, gets_xbrl=edgar.get)
@classmethod
def gets_XBRL_locally(cls, file_path):
return cls(xbrl_url=file_path,
gets_xbrl=lambda file_path : open(file_path).read())
class MetricNodeNotFound(ValueNotInFilingDocument):
pass | 34.482517 | 88 | 0.602515 | 4,749 | 0.963091 | 0 | 0 | 1,488 | 0.301764 | 0 | 0 | 536 | 0.1087 |
277624309012d3684e6506d164e645ba545c1547 | 6,235 | py | Python | geospacelab/datahub/sources/wdc/dst/downloader.py | JouleCai/GeoSpaceLab | 6cc498d3c32501e946931de596a840c73e83edb3 | [
"BSD-3-Clause"
]
| 19 | 2021-08-07T08:49:22.000Z | 2022-03-02T18:26:30.000Z | geospacelab/datahub/sources/wdc/dst/downloader.py | JouleCai/GeoSpaceLab | 6cc498d3c32501e946931de596a840c73e83edb3 | [
"BSD-3-Clause"
]
| 4 | 2021-11-09T05:53:42.000Z | 2022-03-25T11:49:37.000Z | geospacelab/datahub/sources/wdc/dst/downloader.py | JouleCai/GeoSpaceLab | 6cc498d3c32501e946931de596a840c73e83edb3 | [
"BSD-3-Clause"
]
| 3 | 2021-11-07T11:41:20.000Z | 2022-02-14T13:43:11.000Z | # Licensed under the BSD 3-Clause License
# Copyright (C) 2021 GeospaceLab (geospacelab)
# Author: Lei Cai, Space Physics and Astronomy, University of Oulu
__author__ = "Lei Cai"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "[email protected]"
__docformat__ = "reStructureText"
import datetime
import numpy as np
import requests
import bs4
import pathlib
import re
import netCDF4
import cftime
import geospacelab.toolbox.utilities.pydatetime as dttool
import geospacelab.toolbox.utilities.pylogging as mylog
import geospacelab.datahub.sources.wdc as wdc
from geospacelab import preferences as prf
class Downloader(object):
def __init__(self, dt_fr, dt_to, data_file_root_dir=None, user_email=wdc.default_user_email):
self.dt_fr = dt_fr
self.dt_to = dt_to
self.user_email = user_email
self.done = False
if data_file_root_dir is None:
self.data_file_root_dir = prf.datahub_data_root_dir / 'WDC' / 'Dst'
else:
self.data_file_root_dir = pathlib.Path(data_file_root_dir)
self.url_base = "http://wdc.kugi.kyoto-u.ac.jp"
self.download()
def download(self):
diff_months = dttool.get_diff_months(self.dt_fr, self.dt_to)
dt0 = datetime.datetime(self.dt_fr.year, self.dt_fr.month, 1)
r = requests.get(self.url_base + '/dstae/')
soup = bs4.BeautifulSoup(r.text, 'html.parser')
form_tag = soup.find_all('form')
r_method = form_tag[0].attrs['method']
r_action_url = self.url_base + form_tag[0].attrs['action']
for i in range(diff_months + 1):
dt_fr = dttool.get_next_n_months(dt0, i)
dt_to = dttool.get_next_n_months(dt0, i + 1) - datetime.timedelta(seconds=1)
delta_seconds = (dt_to - dt_fr).total_seconds()
file_name = 'WDC_Dst_' + dt_fr.strftime('%Y%m') + '.nc'
file_path = self.data_file_root_dir / '{:4d}'.format(dt_fr.year) / file_name
if file_path.is_file():
mylog.simpleinfo.info(
"The file {} exists in the directory {}.".format(file_path.name, file_path.parent.resolve()))
self.done = True
continue
else:
file_path.parent.resolve().mkdir(parents=True, exist_ok=True)
form_dst = {
'SCent': str(int(dt_fr.year/100)),
'STens': str(int((dt_fr.year - np.floor(dt_fr.year/100)*100) / 10)),
'SYear': str(int((dt_fr.year - np.floor(dt_fr.year/10)*10))),
'SMonth': '{:02d}'.format(dt_fr.month),
'ECent': str(int(dt_to.year/100)),
'ETens': str(int((dt_to.year - np.floor(dt_to.year/100)*100) / 10)),
'EYear': str(int((dt_to.year - np.floor(dt_to.year/10)*10))),
'EMonth': '{:02d}'.format(dt_to.month),
"Image Type": "GIF",
"COLOR": "COLOR",
"AE Sensitivity": "100",
"Dst Sensitivity": "20",
"Output": 'DST',
"Out format": "IAGA2002",
"Email": self.user_email,
}
if r_method.lower() == 'get':
mylog.StreamLogger.info("Requesting data from WDC ...")
r_file = requests.get(r_action_url, params=form_dst)
if "No data for your request" in r_file.text or "DATE TIME DOY" not in r_file.text:
mylog.StreamLogger.warning("No data for your request!")
return
with open(file_path.with_suffix('.dat'), 'w') as f:
f.write(r_file.text)
mylog.StreamLogger.info("Preparing to save the data in the netcdf format ...")
self.save_to_netcdf(r_file.text, file_path)
def save_to_netcdf(self, r_text, file_path):
results = re.findall(
r'^(\d+-\d+-\d+ \d+:\d+:\d+.\d+)\s*(\d+)\s*([+\-\d.]+)',
r_text,
re.M
)
results = list(zip(*results))
# time_array = np.array([(datetime.datetime.strptime(dtstr+'000', "%Y-%m-%d %H:%M:%S.%f")
# - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1)
# for dtstr in results[0]])
dts = [datetime.datetime.strptime(dtstr+'000', "%Y-%m-%d %H:%M:%S.%f") for dtstr in results[0]]
time_array = np.array(cftime.date2num(dts, units='seconds since 1970-01-01 00:00:00.0'))
print('From {} to {}.'.format(
datetime.datetime.utcfromtimestamp(time_array[0]),
datetime.datetime.utcfromtimestamp(time_array[-1]))
)
dst_array = np.array(results[2])
dst_array.astype(np.float32)
num_rows = len(results[0])
fnc = netCDF4.Dataset(file_path, 'w')
fnc.createDimension('UNIX_TIME', num_rows)
fnc.title = "WDC DST index"
time = fnc.createVariable('UNIX_TIME', np.float64, ('UNIX_TIME',))
time.units = 'seconds since 1970-01-01 00:00:00.0'
dst = fnc.createVariable('Dst', np.float32, ('UNIX_TIME',))
time[::] = time_array[::]
dst[::] = dst_array[::]
# for i, res in enumerate(results):
# dt = datetime.datetime.strptime(res[0]+'000', "%Y-%m-%d %H:%M:%S.%f")
# time[i] = (dt - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1)
# asy_d[i] = float(res[2])
# asy_h[i] = float(res[3])
# sym_d[i] = float(res[4])
# sym_h[i] = float(res[5])
fnc.close()
mylog.StreamLogger.info("The requested data has been downloaded and saved in the file {}.".format(file_path))
self.done = True
if __name__ == "__main__":
dt_fr1 = datetime.datetime(2000, 1, 14)
dt_to1 = datetime.datetime(2000, 6, 16)
Downloader(dt_fr1, dt_to1, user_email="[email protected]")
# form_dst = {'SCent': 20, 'STens': 1, 'SYear': 1, 'SMonth': '01', 'ECent': 20, 'ETens': 1, 'EYear': 1, 'EMonth': 12, "Image Type": "GIF", "COLOR": "COLOR", "AE Sensitivity": "100", "Dst Sensitivity": "20", "Output": 'DST', "Out format": "IAGA2002", "Email": "[email protected]"} | 38.018293 | 277 | 0.577225 | 5,112 | 0.819888 | 0 | 0 | 0 | 0 | 0 | 0 | 1,916 | 0.307298 |
277858124203940aedb7475ae3b0715e859459a7 | 914 | py | Python | services/login_service.py | EderBevacqua/OPE_ADS_3C | af6e2a1757c82080dc5f6c1f7759f29f408341cc | [
"Apache-2.0"
]
| null | null | null | services/login_service.py | EderBevacqua/OPE_ADS_3C | af6e2a1757c82080dc5f6c1f7759f29f408341cc | [
"Apache-2.0"
]
| 6 | 2020-06-14T21:50:25.000Z | 2020-06-15T19:39:01.000Z | services/login_service.py | EderBevacqua/OPE_ADS_4C | af6e2a1757c82080dc5f6c1f7759f29f408341cc | [
"Apache-2.0"
]
| null | null | null | from infra.usuario_dao import \
listar as dao_listar, \
consultar as dao_consultar, \
cadastrar as dao_cadastrar, \
alterar as dao_alterar, \
remover as dao_remover,\
loadUserEmail as dao_loadUserEmail,\
validarLogin as dao_validarLogin,\
validaMatriculaUsuario as dao_validaMatriculaUsuario,\
carregarUsuario as dao_carregarUsuario,\
cadastrarNovoLogin as dao_cadastrarNovoLogin,\
ativarConta as dao_ativarConta
def loadUserEmail(email):
return dao_loadUserEmail(email)
def validarLogin(email):
return dao_validarLogin(email)
def validaMatriculaUsuario(nMatricula):
return dao_validaMatriculaUsuario(nMatricula)
def carregarUsuario(user_id):
return dao_carregarUsuario(user_id)
def cadastrarNovoLogin(novoLogin):
return dao_cadastrarNovoLogin(novoLogin)
def ativarConta(numeroMatricula,senha):
return dao_ativarConta(numeroMatricula,senha) | 30.466667 | 58 | 0.789934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
27797d81ccdfc4597762321be84ef3aaa9517905 | 421 | py | Python | server/apps/org/permissions.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
]
| null | null | null | server/apps/org/permissions.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
]
| null | null | null | server/apps/org/permissions.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
]
| null | null | null | from rest_framework import permissions
class IsMemberOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to access (read/write)
"""
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return obj is None or request.user.is_staff or obj.has_access(user=request.user) | 32.384615 | 88 | 0.736342 | 380 | 0.902613 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.213777 |
277b82514a7515e164e8b41c060b3213bef8d2d0 | 473 | py | Python | multi_domain/utils.py | newbieyd/multi-domain_NER | 78443f79cebf7c2fe1058bc6ba2dc793d0907574 | [
"Apache-2.0"
]
| 3 | 2020-10-26T02:23:57.000Z | 2021-01-28T09:29:35.000Z | multi_domain/utils.py | newbieyd/multi-domain_NER | 78443f79cebf7c2fe1058bc6ba2dc793d0907574 | [
"Apache-2.0"
]
| null | null | null | multi_domain/utils.py | newbieyd/multi-domain_NER | 78443f79cebf7c2fe1058bc6ba2dc793d0907574 | [
"Apache-2.0"
]
| 1 | 2021-01-28T09:29:39.000Z | 2021-01-28T09:29:39.000Z | import random
import torch
import numpy as np
# 设置随机种子,一旦固定种子,后面依次生成的随机数其实都是固定的
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# 计算评价指标:准确率,召回率,F1值
def calculate(data):
p = -1
r = -1
f1 = -1
if data[0] > 0:
p = data[2] / data[0]
if data[1] > 0:
r = data[2] / data[1]
if p != -1 and r != -1 and p + r != 0:
f1 = 2 * p * r / (p + r)
return p, r, f1
| 19.708333 | 43 | 0.5074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.262787 |
277c94d5ec87dc5540702d806c2c486ab6812053 | 769 | py | Python | server/app/budget/migrations/0004_auto_20210521_1347.py | catvitalio/personal-budget | b4470115ebbfd185a8a781a2024787cbfe822639 | [
"MIT"
]
| null | null | null | server/app/budget/migrations/0004_auto_20210521_1347.py | catvitalio/personal-budget | b4470115ebbfd185a8a781a2024787cbfe822639 | [
"MIT"
]
| null | null | null | server/app/budget/migrations/0004_auto_20210521_1347.py | catvitalio/personal-budget | b4470115ebbfd185a8a781a2024787cbfe822639 | [
"MIT"
]
| null | null | null | # Generated by Django 3.1.2 on 2021-05-21 06:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('budget', '0003_auto_20210521_1252'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='tags',
field=models.ManyToManyField(blank=True, to='budget.ExpenseTag'),
),
migrations.AlterField(
model_name='income',
name='tags',
field=models.ManyToManyField(blank=True, to='budget.IncomeTag'),
),
migrations.AlterField(
model_name='transfer',
name='tags',
field=models.ManyToManyField(blank=True, to='budget.TransferTag'),
),
]
| 26.517241 | 78 | 0.579974 | 676 | 0.879064 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.236671 |
277c9b03dc3a5dfcdb9e0188ddbb3e71c27016ed | 146 | py | Python | recommendations_system/io_/__init__.py | mmikolajczak/recommendation_system_hetrec2011_movielens | 3ae13e62605ffbf5517bc2079e086a400de48748 | [
"MIT"
]
| 4 | 2019-12-04T08:42:21.000Z | 2020-06-07T07:22:08.000Z | recommendations_system/io_/__init__.py | mmikolajczak/recommendation_system_hetrec2011_movielens | 3ae13e62605ffbf5517bc2079e086a400de48748 | [
"MIT"
]
| null | null | null | recommendations_system/io_/__init__.py | mmikolajczak/recommendation_system_hetrec2011_movielens | 3ae13e62605ffbf5517bc2079e086a400de48748 | [
"MIT"
]
| null | null | null | from ._load_hetrec import load_hetrec_to_df
from ._df2ffm import DF2FFMConverter
from ._utils import load_ffm, save_ffm, load_pickle, save_pickle
| 36.5 | 64 | 0.856164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
277cde189ead3857304afdc2356fa310f621847b | 7,689 | py | Python | Stack_Exchange/py2_text.py | sadahanu/DataScience_SideProject | f476fe939d660099e3209b91bbd5cb883b37b2d3 | [
"MIT"
]
| null | null | null | Stack_Exchange/py2_text.py | sadahanu/DataScience_SideProject | f476fe939d660099e3209b91bbd5cb883b37b2d3 | [
"MIT"
]
| null | null | null | Stack_Exchange/py2_text.py | sadahanu/DataScience_SideProject | f476fe939d660099e3209b91bbd5cb883b37b2d3 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 13 23:10:40 2016
@author: zhouyu
"""
#%%
import pandas as pd
import numpy as np
import os
import re
import nltk
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
os.chdir('/Users/zhouyu/Documents/Zhou_Yu/DS/kaggle_challenge/text processing')
#%% step1: import data
import glob
alltrainfiles = glob.glob("*.csv")
raw_text =pd.concat((pd.read_csv(f,index_col = None, header =0) for f in alltrainfiles),ignore_index = True)
#raw_text = pd.read_csv("crypto.csv",index_col = None)
#%% step2: clean data, remove HTML, symbols and stopwords
def text_to_words(rawtext):
#split into individual words, remove HTML, only keep letters and number
# convert letters to lower case
reg_c = re.compile('[^a-zA-Z0-9_\\+\\-]')
words = [word for word in reg_c.split(rawtext.lower()) if word!='']
stops = set(stopwords.words("english"))
#take out stop words
meaningful_words = [w for w in words if not w in stops]
return(" ".join(meaningful_words))
def target_to_words(rawtext):
#only return the first target word
reg_c = re.compile('[^a-zA-Z0-9_\\+\\-]')
words = [word for word in reg_c.split(rawtext.lower()) if word!='']
stops = set(stopwords.words("english"))
#take out stop words
meaningful_words = [w for w in words if not w in stops]
return(meaningful_words[0])
#%%
cleaned_post = []
cleaned_target = []
sz = raw_text.shape[0]
for i in range(0,sz):
raw_post = raw_text['title'][i]+' '+raw_text['content'][i]
raw_post = BeautifulSoup(raw_post).get_text()
cleaned_post.append(text_to_words(raw_post))
cleaned_target.append(target_to_words(raw_text['tags'][i]))
if((i+1)%1000==0):
print "Cleanning %d of %d\n" % (i+1,sz)
#print cleaned_post[1]
#%% step3: creating features from a bag of words
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = None, \
max_features = 5000)
X_train_counts = count_vect.fit_transform(cleaned_post)
#X_target_counts = count_vect.fit_transform(cleaned_target)
from sklearn.feature_extraction.text import TfidfTransformer
tf_transformer = TfidfTransformer(use_idf = False).fit(X_train_counts)
X_train_tf = tf_transformer.transform(X_train_counts)
#%% training a linear model
# METHOD 1: BUILD randomforestclassifier...
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators = 10)
forest = rf.fit(X_train_tf, cleaned_target)
#%% examine the result produced by METHOD 1:
pred = rf.predict(X_train_tf)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from collections import OrderedDict
import matplotlib.pyplot as plt
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
cnf_matrix = confusion_matrix(cleaned_target,pred)
#target_names = set(cleaned_target)
#np.set_printoptions(precision = 2)
#plt.figure()
#plot_confusion_matrix(cnf_matrix,classes = target_names,normalize = True,title='Normalized confusion matrix')
#plt.show()
target_names = list(OrderedDict.fromkeys(cleaned_target))
print(classification_report(cleaned_target,pred,target_names = target_names))
#######
#%% Method 2: directly predicted as the highest frequency element
# find the highest tf-idf
#step1: select a random sample
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from collections import OrderedDict
sample = np.random.choice(87000,1000,replace = False)
tf_pred = []
tf_target = []
for i in range(0,1000):
r = sample[i];
tf_target.append(cleaned_target[r])
tf_post = X_train_tf.getrow(r).toarray()
tf_post_max = tf_post.argmax()
tf_pred.append(count_vect.get_feature_names()[tf_post_max])
tf_cnf_matrix = confusion_matrix(tf_target,tf_pred)
target_names = list(OrderedDict.fromkeys(tf_pred+tf_target))
print(classification_report(tf_target, tf_pred,target_names =target_names))
#%% evaluate test set
test = pd.read_csv('test/test.csv')
cleaned_test = []
test_sz = test.shape[0]
for i in range(0,test_sz):
test_post = test['title'][i]+' '+test['content'][i]
test_post = BeautifulSoup(test_post).get_text()
cleaned_test.append(text_to_words(test_post))
if((i+1)%1000==0):
print "Cleanning %d of %d\n" % (i+1,test_sz)
#%% use random forest
X_test_counts = count_vect.fit_transform(cleaned_test)
X_test_tf = tf_transformer.transform(X_test_counts)
result = forest.predict(X_test_counts)
# use max tf-idf
#%%
test_pred = []
for i in range(0,test_sz):
tf_test = X_test_tf.getrow(i).toarray()
# just return one tag
#tf_test_max = tf_test.argmax()
#test_pred.append(count_vect.get_feature_names()[tf_test_max])
ind = np.argpartition(tf_test,-4)[:,-4:]
pred_tags = [count_vect.get_feature_names()[j] for j in ind[0,:].tolist()]
test_pred.append( " ".join(pred_tags))
if((i+1)%1000==0):
print "Predicting %d of %d\n" % (i+1,test_sz)
result = test_pred
#%% prepare submission
submission = pd.read_csv('test/sample_submission.csv')
submission.iloc[:,1] = result
submission.to_csv('test/submission.csv',index = None)
#%% try to use NMF model can not be mapped to specific question...
n_features = 5000
n_topics = 10
n_samples = test_sz
n_top_words = 4
def get_top_words(model, feature_names, n_top_words):
res = []
for topic_idx, topic in enumerate(model.components_):
tags = " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
res.append(tags)
return res
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF
from time import time
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(cleaned_test)
# Fit the NMF model
print("Fitting the NMF model (Frobenius norm) with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (Frobenius norm):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
#print_top_words(nmf, tfidf_feature_names, n_top_words)
result = get_top_words(nmf,tfidf_feature_names,n_top_words) | 38.253731 | 110 | 0.694629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,104 | 0.273638 |
277d5ee604563ea1b0d6b261faa48f33c379cba6 | 1,210 | py | Python | tests/test_types.py | apparentlymart/python-stl | befec2266b4a6b201ecc3c4c42007a5c24e90566 | [
"MIT"
]
| 9 | 2015-01-25T19:46:31.000Z | 2018-07-04T03:32:46.000Z | tests/test_types.py | apparentlymart/python-stl | befec2266b4a6b201ecc3c4c42007a5c24e90566 | [
"MIT"
]
| 10 | 2015-03-10T19:04:34.000Z | 2018-11-03T04:46:57.000Z | tests/test_types.py | apparentlymart/python-stl | befec2266b4a6b201ecc3c4c42007a5c24e90566 | [
"MIT"
]
| 15 | 2015-03-07T21:46:44.000Z | 2021-12-02T20:03:21.000Z | import unittest
from stl.types import *
class TestTypes(unittest.TestCase):
def test_facet_geometry(self):
facet = Facet(
(1, 0, 0),
[
(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
],
)
self.assertEqual(facet.a, 1.0)
self.assertEqual(facet.b, 1.0)
self.assertAlmostEqual(facet.c, 1.4142135623730951)
self.assertAlmostEqual(
facet.perimeter,
1.0 + 1.0 + 1.4142135623730951,
)
self.assertAlmostEqual(facet.area, 0.5)
def test_solid_geometry(self):
solid = Solid(
"test",
[
Facet(
(1, 0, 0),
[
(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
],
),
Facet(
(1, 0, 0),
[
(0, 0, 0),
(1, 0, 0),
(0, 0, 1),
],
),
],
)
self.assertAlmostEqual(solid.surface_area, 0.5 + 0.5)
| 23.72549 | 61 | 0.333884 | 1,167 | 0.964463 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.004959 |
277d94d70e278173e780c161c144680891ccd6df | 116 | py | Python | server/domain/datasets/exceptions.py | multi-coop/catalogage-donnees | 1d70401ff6c7b01ec051460a253cb105adf65911 | [
"MIT"
]
| null | null | null | server/domain/datasets/exceptions.py | multi-coop/catalogage-donnees | 1d70401ff6c7b01ec051460a253cb105adf65911 | [
"MIT"
]
| 14 | 2022-01-25T17:56:52.000Z | 2022-01-28T17:47:59.000Z | server/domain/datasets/exceptions.py | multi-coop/catalogage-donnees | 1d70401ff6c7b01ec051460a253cb105adf65911 | [
"MIT"
]
| null | null | null | from ..common.exceptions import DoesNotExist
class DatasetDoesNotExist(DoesNotExist):
entity_name = "Dataset"
| 19.333333 | 44 | 0.793103 | 68 | 0.586207 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.077586 |
277dc18fc44eab6c4ec0aeec52c4030e30b5d869 | 967 | py | Python | pysimplegui/DemoPrograms/Demo_Design_Pattern_Multiple_Windows2.py | konsan1101/py-etc | bcca13119b0d2453866988404fd1c4976f55d4d5 | [
"MIT"
]
| null | null | null | pysimplegui/DemoPrograms/Demo_Design_Pattern_Multiple_Windows2.py | konsan1101/py-etc | bcca13119b0d2453866988404fd1c4976f55d4d5 | [
"MIT"
]
| 2 | 2020-06-06T00:30:56.000Z | 2021-06-10T22:30:37.000Z | pysimplegui/DemoPrograms/Demo_Design_Pattern_Multiple_Windows2.py | konsan1101/py-etc | bcca13119b0d2453866988404fd1c4976f55d4d5 | [
"MIT"
]
| null | null | null | import PySimpleGUI as sg
"""
PySimpleGUI The Complete Course
Lesson 7
Multiple Independent Windows
"""
# Design pattern 2 - First window remains active
layout = [[ sg.Text('Window 1'),],
[sg.Input()],
[sg.Text('', size=(20,1), key='-OUTPUT-')],
[sg.Button('Launch 2'), sg.Button('Exit')]]
window1 = sg.Window('Window 1', layout)
window2_active = False
while True:
event1, values1 = window1.read(timeout=100)
window1['-OUTPUT-'].update(values1[0])
if event1 is None or event1 == 'Exit':
break
if not window2_active and event1 == 'Launch 2':
window2_active = True
layout2 = [[sg.Text('Window 2')],
[sg.Button('Exit')]]
window2 = sg.Window('Window 2', layout2)
if window2_active:
ev2, vals2 = window2.read(timeout=100)
if ev2 is None or ev2 == 'Exit':
window2_active = False
window2.close()
window1.close()
| 24.794872 | 53 | 0.584281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.251293 |
2780f36861b56fbdc559cec62df68437bb7c262c | 6,277 | py | Python | pexpo.py | joeyspacerocks/pexpo | 1b668bd3d8b399570f938d0b5e9e9545db2ad059 | [
"Apache-2.0"
]
| 1 | 2016-05-07T12:32:36.000Z | 2016-05-07T12:32:36.000Z | pexpo.py | joeyspacerocks/pexpo | 1b668bd3d8b399570f938d0b5e9e9545db2ad059 | [
"Apache-2.0"
]
| 33 | 2020-07-02T10:24:56.000Z | 2020-08-03T17:26:23.000Z | pexpo.py | joeyspacerocks/pexpo | 1b668bd3d8b399570f938d0b5e9e9545db2ad059 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
import os
import sys
import StringIO
import argparse
import json
import zipfile
from PIL import Image, ImageColor
import xml.etree.cElementTree as ET
# TODO: compose multiple layers
def compose_image(indexes, archive):
file = 'layer' + str(indexes[0]) + '.png'
return Image.open(StringIO.StringIO(archive.read(file)))
def aggregate_sprites(sprites, data):
for d in data:
for s in d['sprites']:
sprites.append(s)
if 'mask' in s: sprites.append(s['mask'])
def pack_images(filename, data):
sprites = []
aggregate_sprites(sprites, data['anims'])
aggregate_sprites(sprites, data['tiles'])
# sort sprites by height
sprites.sort(key = lambda s: s['image'].size[1], reverse = True)
# pack
dest = Image.new("RGBA", (256, 256))
mask = Image.new("1", (256, 256))
dp = dest.load()
mp = mask.load()
# masq.resize(image_width, 1);
# dest.resize(image_width, 1);
for s in sprites:
idx = s['image'].size[0]
idy = s['image'].size[1]
# assert(idx <= image_width);
found = False
for ty in range(2048):
if found: break
# if(ty + idy > dest.dat.size()) {
# masq.resize(image_width, ty + idy);
# dest.resize(image_width, ty + idy);
# }
for tx in range(dest.size[0] - idx):
if found: break
valid = not(mp[tx, ty] or mp[tx, ty + idy - 1] or mp[tx + idx - 1, ty] or mp[tx + idx - 1, ty + idy - 1])
if valid:
for ity in range(idy):
if not valid: break
for itx in range(idx):
if not valid: break
if mp[tx + itx, ty + ity]:
valid = False
if valid:
dest.paste(s['image'], (tx, ty))
mask.paste(int(True), (tx, ty, tx + idx, ty + idy))
s["x"] = tx
s["y"] = ty
s["w"] = idx
s["h"] = idy
found = True
# write image
dest.save(filename, 'png')
def write_meta(filename, imagefile, data):
root = ET.Element("spritesheet", image=imagefile)
aroot = ET.SubElement(root, "anims")
for a in data['anims']:
anode = ET.SubElement(aroot, "a", name=a['name'])
for s in a["sprites"]:
ET.SubElement(anode, "s", x=str(s['x']), y=str(s['y']), w=str(s['w']), h=str(s['h']), d=str(s['duration']))
if 'mask' in s:
mnode = ET.SubElement(snode, "mask")
ET.SubElement(mnode, "s", x=str(s['x']), y=str(s['y']), w=str(s['w']), h=str(s['h']))
sroot = ET.SubElement(root, "sprites")
for t in data['tiles']:
snode = ET.SubElement(sroot, "sprite", name=t['name'])
for s in t["sprites"]:
mnode = ET.SubElement(snode, "s", x=str(s['x']), y=str(s['y']), w=str(s['w']), h=str(s['h']))
if 'mask' in s:
mask = s['mask']
mnode.set('mx', str(mask['x']))
mnode.set('my', str(mask['y']))
tree = ET.ElementTree(root)
tree.write(filename)
def grab_tiles(data, duration, img, mask, base, count, tw, th):
img_w = img.size[0]
tpr = img_w / tw
x = (base % tpr) * tw
y = (base / tpr) * th
sprites = []
data['sprites'] = sprites
for i in range(count):
box = (x, y, x + tw, y + th)
sprite = {}
sprites.append(sprite)
sprite['image'] = img.crop(box)
if mask is not None:
sprite['mask'] = { 'image': mask.crop(box) }
sprite['duration'] = duration
x += tw
if x >= img_w:
x = 0
y += th
def generate_tileset(path, file, outpng):
archive = zipfile.ZipFile(os.path.join(path, file), 'r')
src = json.loads(archive.read('docData.json'))
tileset = src['tileset']
tw = tileset['tileWidth']
th = tileset['tileHeight']
per_row = tileset['tilesWide']
tile_count = tileset['numTiles']
iw = per_row * tw
ih = (tile_count / per_row) * th
dest = Image.new("RGBA", (iw, ih))
tx = 0
ty = 0
for i in range(tile_count):
tile_img = Image.open(StringIO.StringIO(archive.read("tile%d.png" % i)))
dest.paste(tile_img, (tx * tw, ty * th))
tx += 1
if tx >= per_row:
tx = 0
ty += 1
dest.save(outpng, 'png')
def compile_sprite_data(data, path, file):
archive = zipfile.ZipFile(os.path.join(path, file), 'r')
src = json.loads(archive.read('docData.json'))
canvas = src['canvas']
anims = src['animations']
w = canvas['width']
h = canvas['height']
tw = canvas['tileWidth']
th = canvas['tileHeight']
if tw == 0 or tw > w: tw = w
if th == 0 or th > h: th = h
# compose all visible layers, except for the magic 'mask' layer
layers = []
masks = []
for i, layer in canvas['layers'].items():
if not layer['hidden']:
if layer['name'] == 'mask':
masks.append(i)
else:
layers.append(i)
img = compose_image(layers, archive)
if len(masks) > 0:
mask = compose_image(masks, archive)
else:
mask = None
name = os.path.splitext(file)[0]
if len(anims) > 0:
print ' - ' + name + ' - export animations (' + str(len(anims)) + ')'
for ai in anims.keys():
anim = anims[ai]
base = anim['baseTile']
length = anim['length']
duration = anim['frameDuration']
out = {}
out['name'] = name + '-' + anim['name']
grab_tiles(out, duration, img, mask, base, length, tw, th)
data['anims'].append(out)
else:
print ' - ' + name + ' - export tilemap'
out = { 'name': name }
grab_tiles(out, 0, img, mask, 0, (w / tw) * (h / th), tw, th)
data['tiles'].append(out)
return data
def main(script, argv):
parser = argparse.ArgumentParser(description='Export PNGs and meta-data from PyxelEdit files.')
parser.add_argument('path', help='path to pyxel files (directory or single file)', metavar='<path>')
parser.add_argument('-t', '--tileset', help='generate tileset instead of spritesheet', action='store_true', dest='tileset')
parser.add_argument('-o', '--out', help='filename of assembled PNG', required=True, metavar='<file>', dest='outpng')
args = parser.parse_args()
path = args.path
if args.tileset:
generate_tileset(os.path.dirname(path), os.path.basename(path), args.outpng)
else:
data = { 'anims':[], 'tiles':[] }
if os.path.isfile(path):
compile_sprite_data(data, os.path.dirname(path), os.path.basename(path))
else:
for i in os.listdir(path):
if i.endswith(".pyxel"):
compile_sprite_data(data, path, i)
pack_images(args.outpng, data)
write_meta(os.path.splitext(args.outpng)[0] + '.xml', os.path.basename(args.outpng), data)
if __name__ == '__main__':
sys.exit(main(sys.argv[0], sys.argv[1:]))
| 25.51626 | 124 | 0.608252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,319 | 0.210132 |
27811ac83801b7707ced28bf3be304104b0b4fe0 | 212 | py | Python | marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/onsets/admin.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
]
| null | null | null | marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/onsets/admin.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
]
| null | null | null | marsyas-vamp/marsyas/src/django/birdsong/application/birdsong/onsets/admin.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
]
| null | null | null | from django.contrib import admin
from calls.onsets.models import Recording
class RecordingAdmin(admin.ModelAdmin):
list_display = ('audio', 'image', 'length')
admin.site.register(Recording, RecordingAdmin)
| 26.5 | 47 | 0.783019 | 87 | 0.410377 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.103774 |
27822976fe232c6827409ee540cdeeef7ab6474f | 505 | py | Python | secrets_sample.py | barbudor/upy-door-sensor | ce34226203c0f08b7b10dcdcb6f6f4f2f8689754 | [
"MIT"
]
| 5 | 2020-04-12T17:10:23.000Z | 2021-09-24T19:56:01.000Z | secrets_sample.py | barbudor/upy-door-sensor | ce34226203c0f08b7b10dcdcb6f6f4f2f8689754 | [
"MIT"
]
| null | null | null | secrets_sample.py | barbudor/upy-door-sensor | ce34226203c0f08b7b10dcdcb6f6f4f2f8689754 | [
"MIT"
]
| null | null | null | """
Your secrets and configurations items
you must rename this file secrets.py and customize as needed
"""
# Wifi settings
SSID = "mywifi"
PASSWD = "thesecretpassword"
# Network - comment to use DHCP (but slighly slower)
IPADDR = "192.168.0.100"
MASK = "255.255.255.0"
GW = "192.168.0.1"
DNS = "1.1.1.1"
# MQTT settings
MQTT_SVR = "192.168.0.200"
MQTT_USER = "mqttuser"
MQTT_PWD = "mqttpasswd"
# Door status topic
MQTT_TOPIC = "stat/door1/STATUS"
MQTT_PAYLOAD = '{{"door":"open", "vcc":{:4.2f}}}'
| 25.25 | 64 | 0.683168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 384 | 0.760396 |
27824e9b0a8bb25e5664e1e7337a726628d28d71 | 2,000 | py | Python | src/d007/index.py | Yangfan2016/learn-python | 84a375cda9d51349ae0a0faf1dc6444ac83ed948 | [
"MIT"
]
| null | null | null | src/d007/index.py | Yangfan2016/learn-python | 84a375cda9d51349ae0a0faf1dc6444ac83ed948 | [
"MIT"
]
| null | null | null | src/d007/index.py | Yangfan2016/learn-python | 84a375cda9d51349ae0a0faf1dc6444ac83ed948 | [
"MIT"
]
| null | null | null | # 练习1:在屏幕上显示跑马灯文字
from random import randint
import os
import time
def marquee():
content = "我很开心。。。"
while True:
os.system("clear")
print(content)
time.sleep(.2)
content = content[1:]+content[0]
# marquee()
# 练习2:设计一个函数产生指定长度的验证码,验证码由大小写字母和数字构成
def genrentae_code(l=4):
all = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
res = ''
for _ in range(l):
index = randint(0, len(all)-1)
res += all[index]
return res
# print(genrentae_code())
# print(genrentae_code(6))
# 练习3:设计一个函数返回给定文件名的后缀名
def get_suffix(filename):
pos = filename.rfind(".")
if pos > 0:
return filename[pos+1:]
return ""
# print(get_suffix("a.doc"))
# print(get_suffix("a.tmp.txt"))
# print(get_suffix("abac"))
# 练习4:设计一个函数返回传入的列表中最大和第二大的元素的值
def max2(arr):
l = len(arr)
m1 = arr[0]
m2 = arr[1]
if l < 2:
return m1, m2 if m1 > m2 else m2, m1
for i in range(2, l):
if arr[i] > m1:
m2 = m1
m1 = arr[i]
elif arr[i] > m2:
m2 = arr[i]
return m1, m2
# print(max2([1,3,5,7]))
# 练习5:计算指定的年月日是这一年的第几天
def is_leap_year(y):
return y % 4 == 0 and y % 100 != 0 or y % 400 == 0
def which_day(y, m, d):
map = {
1: 31,
3: 31,
5: 31,
7: 31,
8: 31,
10: 31,
12: 31,
4: 30,
6: 30,
9: 30,
11: 30,
2: 29 if is_leap_year(y) else 28,
}
day = d
for i in range(1, m):
day += map[i]
return day
# print(which_day(2019, 5, 25))
# 练习6:打印杨辉三角
def pascal_triangle(row):
if row < 2:
return print("1")
# if row<3:
# return print("1\n1-1")
arr = [1]
brr = arr
for i in range(1, row+1):
arr = [1]*i
for j in range(1, len(arr)-1):
arr[j] = brr[j-1]+brr[j]
print('-'.join(str(i) for i in arr))
brr = arr
pascal_triangle(5)
| 15.151515 | 74 | 0.515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 740 | 0.327144 |
27830a56e67664f45133b9939b59406608cd3555 | 147 | py | Python | maintenance_mode/utils.py | fallen/django-maintenance-mode | 826b18ca3c768ab5dbd3f665886106b0eb702de9 | [
"MIT"
]
| 282 | 2015-06-08T22:57:44.000Z | 2022-03-28T17:44:40.000Z | maintenance_mode/utils.py | fallen/django-maintenance-mode | 826b18ca3c768ab5dbd3f665886106b0eb702de9 | [
"MIT"
]
| 82 | 2015-08-24T09:27:07.000Z | 2022-02-08T20:12:48.000Z | maintenance_mode/utils.py | fallen/django-maintenance-mode | 826b18ca3c768ab5dbd3f665886106b0eb702de9 | [
"MIT"
]
| 59 | 2015-06-16T13:06:10.000Z | 2022-02-08T20:10:20.000Z | # -*- coding: utf-8 -*-
def get_client_ip_address(request):
"""
Get the client IP Address.
"""
return request.META['REMOTE_ADDR']
| 18.375 | 38 | 0.612245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.530612 |
27845720229d1cbede001bd8fe6879c12c7dc607 | 2,117 | py | Python | dex/command/commands/DexUnreachable.py | TomWeaver18/dexter | d89b66ca6a0d1ba29a4c32f152773a34ce2881e8 | [
"MIT"
]
| null | null | null | dex/command/commands/DexUnreachable.py | TomWeaver18/dexter | d89b66ca6a0d1ba29a4c32f152773a34ce2881e8 | [
"MIT"
]
| null | null | null | dex/command/commands/DexUnreachable.py | TomWeaver18/dexter | d89b66ca6a0d1ba29a4c32f152773a34ce2881e8 | [
"MIT"
]
| null | null | null | # DExTer : Debugging Experience Tester
# ~~~~~~ ~ ~~ ~ ~~
#
# Copyright (c) 2019 by SN Systems Ltd., Sony Interactive Entertainment Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from dex.command.CommandBase import CommandBase
from dex.dextIR import ValueIR
class DexUnreachable(CommandBase):
"""Expect the source line this is found on will never be stepped on to.
DexUnreachable()
See Commands.md for more info.
"""
def __init(self):
super(DexUnreachable, self).__init__()
pass
@staticmethod
def get_name():
return __class__.__name__
def eval(self, debugger):
# If we're ever called, at all, then we're evaluating a line that has
# been marked as unreachable. Which means a failure.
vir = ValueIR(expression="Unreachable",
value="True", type_name=None,
error_string=None,
could_evaluate=True,
is_optimized_away=True,
is_irretrievable=False)
return {'DexUnreachable' : vir}
| 38.490909 | 79 | 0.691072 | 818 | 0.386396 | 0 | 0 | 67 | 0.031649 | 0 | 0 | 1,486 | 0.701937 |
2786bc277b70a50e0d89afd7f11a15c26b25b2fa | 1,825 | py | Python | tests/test_platform.py | rennerocha/bottery | a082cfa1c21f9aa32ea1526ea3004b581f9e0cd4 | [
"MIT"
]
| null | null | null | tests/test_platform.py | rennerocha/bottery | a082cfa1c21f9aa32ea1526ea3004b581f9e0cd4 | [
"MIT"
]
| null | null | null | tests/test_platform.py | rennerocha/bottery | a082cfa1c21f9aa32ea1526ea3004b581f9e0cd4 | [
"MIT"
]
| null | null | null | import inspect
import pytest
from bottery.platform import BaseEngine
def test_baseengine_platform_name_not_implemented():
"""Check if attributes from the public API raise NotImplementedError"""
engine = BaseEngine()
with pytest.raises(NotImplementedError):
getattr(engine, 'platform')
@pytest.mark.asyncio
@pytest.mark.parametrize('method_name', ['build_message', 'configure'])
async def test_baseengine_not_implemented_calls(method_name):
"""Check if method calls from public API raise NotImplementedError"""
engine = BaseEngine()
with pytest.raises(NotImplementedError):
method = getattr(engine, method_name)
if inspect.iscoroutinefunction(method):
await method()
else:
method()
def sync_view(message):
return 'pong'
async def async_view(message):
return 'pong'
@pytest.mark.asyncio
@pytest.mark.parametrize('view', [sync_view, async_view], ids=['sync', 'async']) # noqa
async def test_get_response_from_views(view):
"""
Test if get_response can call an async/sync view and get its response.
"""
engine = BaseEngine()
response = await engine.get_response(view, 'ping')
assert response == 'pong'
def test_baseengine_handling_message():
fake_handler = type('Handler', (object,), {'check': lambda msg: True})
view = True
engine = BaseEngine()
engine.registered_handlers = [(fake_handler, view)]
returned_view = engine.discovery_view('new message')
assert returned_view
def test_baseengine_handler_not_found():
fake_handler = type('Handler', (object,), {'check': lambda msg: False})
view = True
engine = BaseEngine()
engine.registered_handlers = [(fake_handler, view)]
returned_view = engine.discovery_view('new message')
assert not returned_view
| 27.651515 | 88 | 0.706301 | 0 | 0 | 0 | 0 | 813 | 0.445479 | 658 | 0.360548 | 382 | 0.209315 |
27871567eec68506ebdf03b82d43a00ac9173647 | 26,071 | py | Python | sarkas/potentials/core.py | lucianogsilvestri/sarkas | f4ab00014d09976561fbd4349b9d0610e47a61e1 | [
"MIT"
]
| null | null | null | sarkas/potentials/core.py | lucianogsilvestri/sarkas | f4ab00014d09976561fbd4349b9d0610e47a61e1 | [
"MIT"
]
| null | null | null | sarkas/potentials/core.py | lucianogsilvestri/sarkas | f4ab00014d09976561fbd4349b9d0610e47a61e1 | [
"MIT"
]
| null | null | null | """
Module handling the potential class.
"""
from copy import deepcopy
from fmm3dpy import hfmm3d, lfmm3d
from numpy import array, ndarray, pi, sqrt, tanh
from warnings import warn
from ..utilities.exceptions import AlgorithmWarning
from ..utilities.fdints import fdm1h, invfd1h
from .force_pm import force_optimized_green_function as gf_opt
from .force_pm import update as pm_update
from .force_pp import update as pp_update
from .force_pp import update_0D as pp_update_0D
class Potential:
r"""
Parameters specific to potential choice.
Attributes
----------
a_rs : float
Short-range cutoff to deal with divergence of the potential for r -> 0.
box_lengths : array
Pointer to :attr:`sarkas.core.Parameters.box_lengths`.
box_volume : float
Pointer to :attr:`sarkas.core.Parameters.box_volume`.
force_error : float
Force error due to the choice of the algorithm.
fourpie0 : float
Coulomb constant :math:`4 \pi \epsilon_0`.
kappa : float
Inverse screening length.
linked_list_on : bool
Flag for choosing the Linked cell list algorithm.
matrix : numpy.ndarray
Matrix of potential's parameters.
measure : bool
Flag for calculating the histogram for the radial distribution function.
It is set to `False` during equilibration phase and changed to `True` during production phase.
method : str
Algorithm method. Choices = `["PP", "PPPM", "FMM", "Brute"]`. \n
`"PP"` = Linked Cell List (default).
`"PPPM"` = Particle-Particle Particle-Mesh.
`"FMM"` = Fast Multipole Method.
`"Brute"` = corresponds to calculating the distance between all pair of particles within a distance :math:`L/2`.
pbox_lengths : numpy.ndarray
Pointer to :attr:`sarkas.core.Parameters.pbox_lengths`
pbox_volume : float
Pointer to :attr:`sarkas.core.Parameters.pbox_lengths`
pppm_on : bool
Flag for turning on the PPPM algorithm.
QFactor : float
Sum of the squared of the charges.
rc : float
Cutoff radius for the Linked Cell List algorithm.
screening_length_type : str
Choice of ways to calculate the screening length. \n
Choices = `[thomas-fermi, tf, debye, debye-huckel, db, moliere, custom, unscreened]`. \n
Default = thomas-fermi
screening_length : float
Value of the screening length.
total_net_charge : float
Sum of all the charges.
type : str
Type of potential. \n
Choices = [`"coulomb"`, `"egs"`, `"lennardjones"`, `"moliere"`, `"qsp"`].
"""
a_rs: float = 0.0
box_lengths: ndarray = None
box_volume: float = 0.0
force_error: float = 0.0
fourpie0: float = 0.0
kappa: float = None
linked_list_on: bool = True
matrix: ndarray = None
measure: bool = False
method: str = "pp"
pbox_lengths: ndarray = None
pbox_volume: float = 0.0
pppm_on: bool = False
pppm_aliases: ndarray = array([3, 3, 3], dtype=int)
pppm_alpha_ewald: float = 0.0
pppm_cao: ndarray = array([3, 3, 3], dtype=int)
pppm_mesh: ndarray = array([8, 8, 8], dtype=int)
pppm_h_array: ndarray = array([1.0, 1.0, 1.0], dtype=float)
pppm_pm_err: float = 0.0
pppm_pp_err: float = 0.0
QFactor: float = 0.0
rc: float = None
num_species: ndarray = None
screening_length_type: str = "thomas-fermi"
screening_length: float = None
species_charges: ndarray = None
species_masses: ndarray = None
total_net_charge: float = 0.0
total_num_density: float = 0.0
total_num_ptcls: float = 0.0
type: str = "yukawa"
def __copy__(self):
"""
Make a shallow copy of the object using copy by creating a new instance of the object and copying its __dict__.
"""
# Create a new object
_copy = type(self)()
# copy the dictionary
_copy.from_dict(input_dict=self.__dict__)
return _copy
def __deepcopy__(self, memodict={}):
"""
Make a deepcopy of the object.
Parameters
----------
memodict: dict
Dictionary of id's to copies
Returns
-------
_copy: :class:`sarkas.potentials.core.Potential`
A new Potential class.
"""
id_self = id(self) # memorization avoids unnecessary recursion
_copy = memodict.get(id_self)
if _copy is None:
_copy = type(self)()
# Make a deepcopy of the mutable arrays using numpy copy function
for k, v in self.__dict__.items():
_copy.__dict__[k] = deepcopy(v, memodict)
return _copy
def __repr__(self):
sortedDict = dict(sorted(self.__dict__.items(), key=lambda x: x[0].lower()))
disp = "Potential( \n"
for key, value in sortedDict.items():
disp += "\t{} : {}\n".format(key, value)
disp += ")"
return disp
@staticmethod
def calc_electron_properties(params):
"""Calculate electronic parameters.
See Electron Properties webpage in documentation website.
Parameters
----------
params : :class:`sarkas.core.Parameters`
Simulation's parameters.
"""
warn(
"Deprecated feature. It will be removed in the v2.0.0 release. \n"
"Use parameters.calc_electron_properties(species). You need to pass the species list.",
category=DeprecationWarning,
)
twopi = 2.0 * pi
spin_degeneracy = 2.0 # g in the notes
# Inverse temperature for convenience
beta_e = 1.0 / (params.kB * params.electron_temperature)
# Plasma frequency
params.electron_plasma_frequency = sqrt(
4.0 * pi * params.qe**2 * params.electron_number_density / (params.fourpie0 * params.me)
)
params.electron_debye_length = sqrt(
params.fourpie0 / (4.0 * pi * params.qe**2 * params.electron_number_density * beta_e)
)
# de Broglie wavelength
params.electron_deBroglie_wavelength = sqrt(twopi * params.hbar2 * beta_e / params.me)
lambda3 = params.electron_deBroglie_wavelength**3
# Landau length 4pi e^2 beta. The division by fourpie0 is needed for MKS units
params.electron_landau_length = 4.0 * pi * params.qe**2 * beta_e / params.fourpie0
# chemical potential of electron gas/(kB T), obtained by inverting the density equation.
params.electron_dimensionless_chemical_potential = invfd1h(
lambda3 * sqrt(pi) * params.electron_number_density / 4.0
)
# Thomas-Fermi length obtained from compressibility. See eq.(10) in Ref. [3]_
lambda_TF_sq = lambda3 / params.electron_landau_length
lambda_TF_sq /= spin_degeneracy / sqrt(pi) * fdm1h(params.electron_dimensionless_chemical_potential)
params.electron_TF_wavelength = sqrt(lambda_TF_sq)
# Electron WS radius
params.electron_WS_radius = (3.0 / (4.0 * pi * params.electron_number_density)) ** (1.0 / 3.0)
# Brueckner parameters
params.electron_rs = params.electron_WS_radius / params.a0
# Fermi wave number
params.electron_Fermi_wavenumber = (3.0 * pi**2 * params.electron_number_density) ** (1.0 / 3.0)
# Fermi energy
params.electron_Fermi_energy = params.hbar2 * params.electron_Fermi_wavenumber**2 / (2.0 * params.me)
# Other electron parameters
params.electron_degeneracy_parameter = params.kB * params.electron_temperature / params.electron_Fermi_energy
params.electron_relativistic_parameter = params.hbar * params.electron_Fermi_wavenumber / (params.me * params.c0)
# Eq. 1 in Murillo Phys Rev E 81 036403 (2010)
params.electron_coupling = params.qe**2 / (
params.fourpie0
* params.electron_Fermi_energy
* params.electron_WS_radius
* sqrt(1 + params.electron_degeneracy_parameter**2)
)
# Warm Dense Matter Parameter, Eq.3 in Murillo Phys Rev E 81 036403 (2010)
params.wdm_parameter = 2.0 / (params.electron_degeneracy_parameter + 1.0 / params.electron_degeneracy_parameter)
params.wdm_parameter *= 2.0 / (params.electron_coupling + 1.0 / params.electron_coupling)
if params.magnetized:
b_mag = sqrt((params.magnetic_field**2).sum()) # magnitude of B
if params.units == "cgs":
params.electron_cyclotron_frequency = params.qe * b_mag / params.c0 / params.me
else:
params.electron_cyclotron_frequency = params.qe * b_mag / params.me
params.electron_magnetic_energy = params.hbar * params.electron_cyclotron_frequency
tan_arg = 0.5 * params.hbar * params.electron_cyclotron_frequency * beta_e
# Perpendicular correction
params.horing_perp_correction = (params.electron_plasma_frequency / params.electron_cyclotron_frequency) ** 2
params.horing_perp_correction *= 1.0 - tan_arg / tanh(tan_arg)
params.horing_perp_correction += 1
# Parallel correction
params.horing_par_correction = 1 - (params.hbar * beta_e * params.electron_plasma_frequency) ** 2 / 12.0
# Quantum Anisotropy Parameter
params.horing_delta = params.horing_perp_correction - 1
params.horing_delta += (params.hbar * beta_e * params.electron_cyclotron_frequency) ** 2 / 12
params.horing_delta /= params.horing_par_correction
def calc_screening_length(self, species):
# Consistency
self.screening_length_type = self.screening_length_type.lower()
if self.screening_length_type in ["thomas-fermi", "tf"]:
# Check electron properties
if hasattr(self, "electron_temperature_eV"):
self.electron_temperature = self.eV2K * self.electron_temperature_eV
else:
self.electron_temperature = species[-1].temperature
self.screening_length = species[-1].ThomasFermi_wavelength
elif self.screening_length_type in ["debye", "debye-huckel", "dh"]:
self.screening_length = species[-1].debye_length
elif self.screening_length_type in ["kappa", "from_kappa"]:
self.screening_length = self.a_ws / self.kappa
elif self.screening_length_type in ["custom"]:
if self.screening_length is None:
raise AttributeError("potential.screening_length not defined!")
if not self.screening_length and not self.kappa:
warn("You have not defined the screening_length nor kappa. I will use the Thomas-Fermi length")
self.screening_length_type = "thomas-fermi"
self.screening_length = species[-1].ThomasFermi_wavelength
def copy_params(self, params):
"""
Copy necessary parameters.
Parameters
----------
params: :class:`sarkas.core.Parameters`
Simulation's parameters.
"""
self.measure = params.measure
self.units = params.units
self.dimensions = params.dimensions
# Copy needed parameters
self.box_lengths = params.box_lengths.copy()
self.pbox_lengths = params.pbox_lengths.copy()
self.box_volume = params.box_volume
self.pbox_volume = params.pbox_volume
# Needed physical constants
self.fourpie0 = params.fourpie0
self.a_ws = params.a_ws
self.kB = params.kB
self.eV2K = params.eV2K
self.eV2J = params.eV2J
self.hbar = params.hbar
self.QFactor = params.QFactor
self.T_desired = params.T_desired
self.coupling_constant = params.coupling_constant
self.total_num_ptcls = params.total_num_ptcls
self.total_net_charge = params.total_net_charge
self.total_num_density = params.total_num_density
self.num_species = params.num_species
self.species_charges = params.species_charges.copy()
self.species_masses = params.species_masses.copy()
if self.type == "lj":
self.species_lj_sigmas = params.species_lj_sigmas.copy()
def from_dict(self, input_dict: dict) -> None:
"""
Update attributes from input dictionary.
Parameters
----------
input_dict: dict
Dictionary to be copied.
"""
self.__dict__.update(input_dict)
def method_pretty_print(self):
"""Print algorithm information."""
print("\nALGORITHM: ", self.method)
# PP section
if self.method != "fmm":
print(f"rcut = {self.rc / self.a_ws:.4f} a_ws = {self.rc:.6e} ", end="")
print("[cm]" if self.units == "cgs" else "[m]")
pp_cells = (self.box_lengths / self.rc).astype(int)
print(f"No. of PP cells per dimension = {pp_cells}")
ptcls_in_loop = int(self.total_num_density * (self.dimensions * self.rc) ** self.dimensions)
print(f"No. of particles in PP loop = {ptcls_in_loop}")
dim_const = (self.dimensions + 1) / 3.0 * pi
pp_neighbors = int(self.total_num_density * dim_const * self.rc**self.dimensions)
print(f"No. of PP neighbors per particle = {pp_neighbors}")
if self.method == "pppm":
# PM Section
print(f"Charge assignment orders: {self.pppm_cao}")
print(f"FFT aliases: {self.pppm_aliases}")
print(f"Mesh: {self.pppm_mesh}")
print(
f"Ewald parameter alpha = {self.pppm_alpha_ewald * self.a_ws:.4f} / a_ws = {self.pppm_alpha_ewald:.6e} ",
end="",
)
print("[1/cm]" if self.units == "cgs" else "[1/m]")
h_a = self.pppm_h_array / self.a_ws
print(f"Mesh width = {h_a[0]:.4f}, {h_a[1]:.4f}, {h_a[2]:.4f} a_ws")
print(
f" = {self.pppm_h_array[0]:.4e}, {self.pppm_h_array[1]:.4e}, {self.pppm_h_array[2]:.4e} ",
end="",
)
print("[cm]" if self.units == "cgs" else "[m]")
halpha = self.pppm_h_array * self.pppm_alpha_ewald
inv_halpha = (1.0 / halpha).astype(int)
print(f"Mesh size * Ewald_parameter (h * alpha) = {halpha[0]:.4f}, {halpha[1]:.4f}, {halpha[2]:.4f} ")
print(f" ~ 1/{inv_halpha[0]}, 1/{inv_halpha[1]}, 1/{inv_halpha[2]}")
print(f"PP Force Error = {self.pppm_pp_err:.6e}")
print(f"PM Force Error = {self.pppm_pm_err:.6e}")
print(f"Tot Force Error = {self.force_error:.6e}")
def method_setup(self):
"""Setup algorithm's specific parameters."""
# Check for cutoff radius
if not self.method == "fmm":
self.linked_list_on = True # linked list on
mask = self.box_lengths > 0.0
min_length = self.box_lengths[mask].min()
if not self.rc:
warn(
f"\nThe cut-off radius is not defined. I will use the brute force method.",
category=AlgorithmWarning,
)
self.rc = min_length / 2.0
self.linked_list_on = False # linked list off
if self.rc > min_length / 2.0:
warn(
f"\nThe cut-off radius is larger than half of the minimum box length. "
f"I will use the brute force method.",
# f"L_min/ 2 = {0.5 * min_length:.4e} will be used as rc",
category=AlgorithmWarning,
)
self.rc = min_length / 2.0
self.linked_list_on = False # linked list off
if self.a_rs != 0.0:
warn("\nShort-range cut-off enabled. Use this feature with care!", category=AlgorithmWarning)
# renaming
if self.method == "p3m":
self.method == "pppm"
# Compute pppm parameters
if self.method == "pppm":
self.pppm_on = True
self.pppm_setup()
else:
self.linked_list_on = False
self.pppm_on = False
if self.type == "coulomb":
self.force_error = self.fmm_precision
else:
self.force_error = self.fmm_precision
def pppm_setup(self):
"""Calculate the pppm parameters."""
# Change lists to numpy arrays for Numba compatibility
if isinstance(self.pppm_mesh, list):
self.pppm_mesh = array(self.pppm_mesh, dtype=int)
elif not isinstance(self.pppm_mesh, ndarray):
raise TypeError(f"pppm_mesh is a {type(self.pppm_mesh)}. Please pass a list or numpy array.")
# Mesh array should be 3 even in 2D
if not len(self.pppm_mesh) == 3:
raise AlgorithmWarning(
f"len(potential.pppm_mesh) = {len(self.pppm_mesh)}.\n"
f"The PPPM mesh array should be of length 3 even in non 3D simulations."
)
if isinstance(self.pppm_aliases, list):
self.pppm_aliases = array(self.pppm_aliases, dtype=int)
elif not isinstance(self.pppm_aliases, ndarray):
raise TypeError(f"pppm_aliases is a {type(self.pppm_aliases)}. Please pass a list or numpy array.")
# In case you pass one number and not a list
if isinstance(self.pppm_cao, int):
caos = array([1, 1, 1], dtype=int) * self.pppm_cao
self.pppm_cao = caos.copy()
elif isinstance(self.pppm_cao, list):
self.pppm_cao = array(self.pppm_cao, dtype=int)
elif not isinstance(self.pppm_cao, ndarray):
raise TypeError(f"pppm_cao is a {type(self.pppm_cao)}. Please pass a list or numpy array.")
if self.pppm_cao.max() > 7:
raise AttributeError("\nYou have chosen a charge assignment order bigger than 7. Please choose a value <= 7")
# pppm parameters
self.pppm_h_array = self.box_lengths / self.pppm_mesh
# To avoid division by zero
mask = self.pppm_h_array == 0.0
self.pppm_h_array[mask] = 1.0
self.pppm_h_volume = self.pppm_h_array.prod()
# To avoid unnecessary loops
self.pppm_aliases[mask] = 0
# Pack constants together for brevity in input list
kappa = 1.0 / self.screening_length if self.type == "yukawa" else 0.0
constants = array([kappa, self.pppm_alpha_ewald, self.fourpie0])
# Calculate the Optimized Green's Function
self.pppm_green_function, self.pppm_kx, self.pppm_ky, self.pppm_kz, self.pppm_pm_err = gf_opt(
self.box_lengths, self.pppm_h_array, self.pppm_mesh, self.pppm_aliases, self.pppm_cao, constants
)
# Complete PM Force error calculation
self.pppm_pm_err *= sqrt(self.total_num_ptcls) * self.a_ws**2 * self.fourpie0
self.pppm_pm_err /= self.box_volume ** (2.0 / 3.0)
# Total Force Error
self.force_error = sqrt(self.pppm_pm_err**2 + self.pppm_pp_err**2)
def pretty_print(self):
"""Print potential information in a user-friendly way."""
print("\nPOTENTIAL: ", self.type)
self.pot_pretty_print(potential=self)
self.method_pretty_print()
def setup(self, params, species) -> None:
"""Setup the potential class.
Parameters
----------
params : :class:`sarkas.core.Parameters`
Simulation's parameters.
"""
# Enforce consistency
self.type = self.type.lower()
self.method = self.method.lower()
self.copy_params(params)
self.type_setup(species)
self.method_setup()
def type_setup(self, species):
# Update potential-specific parameters
# Coulomb potential
if self.type == "coulomb":
if self.method == "pp":
warn("Use the PP method with care for pure Coulomb interactions.", category=AlgorithmWarning)
from .coulomb import pretty_print_info, update_params
self.pot_update_params = update_params
update_params(self)
elif self.type == "yukawa":
# Yukawa potential
from .yukawa import pretty_print_info, update_params
self.calc_screening_length(species)
self.pot_update_params = update_params
update_params(self)
elif self.type == "egs":
# exact gradient-corrected screening (EGS) potential
from .egs import pretty_print_info, update_params
self.calc_screening_length(species)
self.pot_update_params = update_params
update_params(self)
elif self.type == "lj":
# Lennard-Jones potential
from .lennardjones import pretty_print_info, update_params
self.pot_update_params = update_params
update_params(self)
elif self.type == "moliere":
# Moliere potential
from .moliere import pretty_print_info, update_params
self.pot_update_params = update_params
update_params(self)
elif self.type == "qsp":
# QSP potential
from .qsp import pretty_print_info, update_params
self.pot_update_params = update_params
update_params(self, species)
elif self.type == "hs_yukawa":
# Hard-Sphere Yukawa
from .hs_yukawa import update_params
self.calc_screening_length(species)
self.pot_update_params = update_params
update_params(self)
self.pot_pretty_print = pretty_print_info
def update_linked_list(self, ptcls):
"""
Calculate the pp part of the acceleration.
Parameters
----------
ptcls : :class:`sarkas.particles.Particles`
Particles data.
"""
ptcls.potential_energy, ptcls.acc, ptcls.virial = pp_update(
ptcls.pos,
ptcls.id,
ptcls.masses,
self.box_lengths,
self.rc,
self.matrix,
self.force,
self.measure,
ptcls.rdf_hist,
)
if self.type != "lj":
# Mie Energy of charged systems
# J-M.Caillol, J Chem Phys 101 6080(1994) https: // doi.org / 10.1063 / 1.468422
dipole = ptcls.charges @ ptcls.pos
ptcls.potential_energy += 2.0 * pi * (dipole**2).sum() / (3.0 * self.box_volume * self.fourpie0)
def update_brute(self, ptcls):
"""
Calculate particles' acceleration and potential brutally.
Parameters
----------
ptcls: :class:`sarkas.particles.Particles`
Particles data.
"""
ptcls.potential_energy, ptcls.acc, ptcls.virial = pp_update_0D(
ptcls.pos,
ptcls.id,
ptcls.masses,
self.box_lengths,
self.rc,
self.matrix,
self.force,
self.measure,
ptcls.rdf_hist,
)
if self.type != "lj":
# Mie Energy of charged systems
# J-M.Caillol, J Chem Phys 101 6080(1994) https: // doi.org / 10.1063 / 1.468422
dipole = ptcls.charges @ ptcls.pos
ptcls.potential_energy += 2.0 * pi * (dipole**2).sum() / (3.0 * self.box_volume * self.fourpie0)
def update_pm(self, ptcls):
"""Calculate the pm part of the potential and acceleration.
Parameters
----------
ptcls : :class:`sarkas.particles.Particles`
Particles' data
"""
U_long, acc_l_r = pm_update(
ptcls.pos,
ptcls.charges,
ptcls.masses,
self.pppm_mesh,
self.pppm_h_array,
self.pppm_h_volume,
self.box_volume,
self.pppm_green_function,
self.pppm_kx,
self.pppm_ky,
self.pppm_kz,
self.pppm_cao,
)
# Ewald Self-energy
U_long += self.QFactor * self.pppm_alpha_ewald / sqrt(pi)
# Neutrality condition
U_long += -pi * self.total_net_charge**2.0 / (2.0 * self.box_volume * self.pppm_alpha_ewald**2)
ptcls.potential_energy += U_long
ptcls.acc += acc_l_r
def update_pppm(self, ptcls):
"""Calculate particles' potential and accelerations using pppm method.
Parameters
----------
ptcls : :class:`sarkas.particles.Particles`
Particles' data.
"""
self.update_linked_list(ptcls)
self.update_pm(ptcls)
def update_fmm_coulomb(self, ptcls):
"""Calculate particles' potential and accelerations using FMM method.
Parameters
----------
ptcls : sarkas.core.Particles
Particles' data
"""
out_fmm = lfmm3d(eps=self.fmm_precision, sources=ptcls.pos.transpose(), charges=ptcls.charges, pg=2)
potential_energy = ptcls.charges @ out_fmm.pot.real / self.fourpie0
acc = -(ptcls.charges * out_fmm.grad.real / ptcls.masses) / self.fourpie0
ptcls.acc = acc.transpose()
return potential_energy
def update_fmm_yukawa(self, ptcls):
"""Calculate particles' potential and accelerations using FMM method.
Parameters
----------
ptcls : sarkas.core.Particles
Particles' data
"""
out_fmm = hfmm3d(
eps=self.fmm_precision,
zk=1j / self.screening_length,
sources=ptcls.pos.transpose(),
charges=ptcls.charges,
pg=2,
)
potential_energy = ptcls.charges @ out_fmm.pot.real / self.fourpie0
acc = -(ptcls.charges * out_fmm.grad.real / ptcls.masses) / self.fourpie0
ptcls.acc = acc.transpose()
return potential_energy
| 36.260083 | 121 | 0.603199 | 25,593 | 0.981665 | 0 | 0 | 4,622 | 0.177285 | 0 | 0 | 8,907 | 0.341644 |
27877c156aab625b9201be4f2bcf3caf8ade21c6 | 328 | py | Python | forest_lite/server/routers/api.py | uk-gov-mirror/MetOffice.forest-lite | 9406b53f7e6a9651eb675e0ac2e5945421b25557 | [
"BSD-3-Clause"
]
| 6 | 2020-08-05T16:12:57.000Z | 2022-01-06T01:34:19.000Z | forest_lite/server/routers/api.py | uk-gov-mirror/MetOffice.forest-lite | 9406b53f7e6a9651eb675e0ac2e5945421b25557 | [
"BSD-3-Clause"
]
| 49 | 2020-08-14T13:58:32.000Z | 2021-06-29T11:42:32.000Z | forest_lite/server/routers/api.py | uk-gov-mirror/MetOffice.forest-lite | 9406b53f7e6a9651eb675e0ac2e5945421b25557 | [
"BSD-3-Clause"
]
| 2 | 2020-12-03T09:24:13.000Z | 2021-04-11T06:10:36.000Z | from fastapi import APIRouter
router = APIRouter()
@router.get("/api")
async def api():
"""Discoverable API by hitting root endpoint"""
return {
"links": {
"datasets": "/datasets",
"natural_earth_feature": "/natural_earth_feature",
"viewport": "/viewport"
}
}
| 19.294118 | 62 | 0.564024 | 0 | 0 | 0 | 0 | 272 | 0.829268 | 252 | 0.768293 | 149 | 0.454268 |
278849dea2ba5567753d1997063d53e6a66da898 | 593 | py | Python | tests/lupin/validators/test_match.py | Clustaar/lupin | 9ef73642d84a99adb80abf5a922a9422ddae9254 | [
"MIT"
]
| 22 | 2017-10-18T08:27:20.000Z | 2022-03-25T18:53:43.000Z | tests/lupin/validators/test_match.py | Clustaar/lupin | 9ef73642d84a99adb80abf5a922a9422ddae9254 | [
"MIT"
]
| 5 | 2019-09-16T15:31:55.000Z | 2022-02-10T08:29:14.000Z | tests/lupin/validators/test_match.py | Clustaar/lupin | 9ef73642d84a99adb80abf5a922a9422ddae9254 | [
"MIT"
]
| null | null | null | import re
import pytest
from lupin.errors import InvalidMatch
from lupin.validators import Match
@pytest.fixture
def validator():
regexp = re.compile("hello")
return Match(regexp)
class TestCall(object):
def test_raise_error_if_does_not_match(self, validator):
with pytest.raises(InvalidMatch) as exc:
validator("bye", [])
def test_does_nothing_if_match(self, validator):
validator("hello", [])
def test_raise_error_value_is_not_a_string(self, validator):
with pytest.raises(InvalidMatch) as exc:
validator(None, [])
| 22.807692 | 64 | 0.699831 | 398 | 0.671164 | 0 | 0 | 90 | 0.151771 | 0 | 0 | 19 | 0.03204 |
2788acf8b816fa004267a5988eb11b0290795f1e | 289 | py | Python | scripts/face_wall.py | Meeshbhoombah/burnt | c10113e7904c5d13c0feedc337681fb41f1006a7 | [
"MIT"
]
| null | null | null | scripts/face_wall.py | Meeshbhoombah/burnt | c10113e7904c5d13c0feedc337681fb41f1006a7 | [
"MIT"
]
| null | null | null | scripts/face_wall.py | Meeshbhoombah/burnt | c10113e7904c5d13c0feedc337681fb41f1006a7 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
spacing = 0.1 # m
lines = []
for c in range(0, 8):
rs = [range(11), reversed(range(11))][c % 2]
for r in rs:
lines.append(' {"point": [%.2f, %.2f, %.2f]}' %
(c*spacing, 0, (r)*spacing))
print '[\n' + ',\n'.join(lines) + '\n]'
| 24.083333 | 56 | 0.460208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.249135 |
27893fac576198dea3db960f02abcae0e0707306 | 1,237 | py | Python | 201220/Q8958.py | JongGuk/BOJ | dd447d9f65481de19a3c0b4f8bb8b5f3d6277c15 | [
"MIT"
]
| null | null | null | 201220/Q8958.py | JongGuk/BOJ | dd447d9f65481de19a3c0b4f8bb8b5f3d6277c15 | [
"MIT"
]
| null | null | null | 201220/Q8958.py | JongGuk/BOJ | dd447d9f65481de19a3c0b4f8bb8b5f3d6277c15 | [
"MIT"
]
| null | null | null | '''There is an objective test result such as “OOXXOXXOOO”.
An ‘O’ means a correct answer of a problem and an ‘X’ means a wrong answer.
The score of each problem of this test is calculated by itself and
its just previous consecutive ‘O’s only when the answer is correct.
For example, the score of the 10th problem is 3 that is obtained by itself and its two previous consecutive ‘O’s.
Therefore, the score of “OOXXOXXOOO” is 10 which is calculated by “1+2+0+0+1+0+0+1+2+3”.
You are to write a program calculating the scores of test results.
Your program is to read from standard input. The input consists of T test cases.
The number of test cases T is given in the first line of the input.
Each test case starts with a line containing a string composed by ‘O’ and ‘X’
and the length of the string is more than 0 and less than 80. There is no spaces between ‘O’ and ‘X’.
Your program is to write to standard output.
Print exactly one line for each test case. The line is to contain the score of the test case.
'''
num_test_case = int(input())
for num in range(num_test_case):
test_case = input().split("X")
result = 0
for i in test_case:
n = i.count("O")
result += n*(1+n)/2
print(int(result)) | 49.48 | 114 | 0.720291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,070 | 0.835285 |
278941aab99fac086f38eb414bccf8c30fd00617 | 328 | py | Python | common/tasks.py | gaomugong/flask-demo | 83bfb04634355565456cc16a5e98421338e3f562 | [
"MIT"
]
| 12 | 2017-12-24T13:58:17.000Z | 2021-04-06T16:21:00.000Z | common/tasks.py | gaomugong/flask-demo | 83bfb04634355565456cc16a5e98421338e3f562 | [
"MIT"
]
| null | null | null | common/tasks.py | gaomugong/flask-demo | 83bfb04634355565456cc16a5e98421338e3f562 | [
"MIT"
]
| 1 | 2021-10-17T14:45:44.000Z | 2021-10-17T14:45:44.000Z | # -*- coding: utf-8 -*-
from app import celery, app
@celery.task()
def add_together(a, b):
app.logger.info('hello world')
return a + b
# from common.extensions import celery
# from flask import current_app
#
#
# @celery.task()
# def add_together(a, b):
# current_app.logger.info('hello world')
# return a + b
| 18.222222 | 44 | 0.652439 | 0 | 0 | 0 | 0 | 90 | 0.27439 | 0 | 0 | 210 | 0.640244 |
2789b658170df8dfdb61f142d656fdcfa80baf19 | 11,411 | py | Python | Code/3_linear_regression_on_pixels.py | jjatinggoyal/accessbility-indicators | 7126d3bc33dc6dadbcf6d9000d6ef6d432f093c0 | [
"MIT"
]
| null | null | null | Code/3_linear_regression_on_pixels.py | jjatinggoyal/accessbility-indicators | 7126d3bc33dc6dadbcf6d9000d6ef6d432f093c0 | [
"MIT"
]
| null | null | null | Code/3_linear_regression_on_pixels.py | jjatinggoyal/accessbility-indicators | 7126d3bc33dc6dadbcf6d9000d6ef6d432f093c0 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""3_Linear_regression_on_pixels.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1nhECM9OxwIw8BjEqsQcSUwojX2KYrh1I
"""
from google.colab import drive #to retrieve data from drive
drive.mount('/content/drive/')
cd 'drive/My Drive/Data/OSM'
import numpy as np
import pandas as pd
from pylab import *
from PIL import Image
from scipy import ndimage
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import os, sys
# districts=['Bangalore', 'Chennai', 'Delhi', 'Gurgaon', 'Hyderabad', 'Kolkata', 'Mumbai']
# years = ['2016', '2017', '2018', '2019']
districts=['Bokaro', 'Jamui']
years = ['2016', '2019']
# defining required functions here
'''
This function is used to prepare district image for the application of smoothing filters.
The background and builtup pixels are given value 0 and the non-built-up pixels are given value 1.
This is because the filters should perform smoothing over BU and NBU pixels only and not background.
Input:
a) original_image: The BU/NBU maps with background pixels having value 0, BU pixels having value 65, and NBU value 130
Output:
a) prepped_image: The Background and BU pixels will have value 0 and NBU pixels will have value 1.
'''
def Prepare_image_for_filters(original_image):
prepped_image = original_image//130
return prepped_image
'''
This function removes the background pixels from the 1D array of the smoothed image using original image.
A pixel is retained in the smoothed array only if it's value in original image is either 1 (for BU) or 2 (for NBU)
'''
def Remove_background_pixels(original_1D_image, smoothed_1D_image):
smooth_temp = [ smoothed_1D_image[i] for i in range(len(smoothed_1D_image)) if original_1D_image[i] > 0]
return smooth_temp
"""'''
Driver code starts here
'''
for district in districts:
print (district)
year_to_pixel_matrix = [] # this matrix stores for each year the value of all pixels
for year in years:
original_image = np.array( Image.open('BU_NBU_maps/'+district+'/'+district+'_BU_NBU_'+year+'.png') )
prepped_image_for_filters = Prepare_image_for_filters(original_image)
# Apply Convolution and gaussian filters over prepped image. All filter parameters are hyper-parameters
kernel = np.array([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
smoothed_image = ndimage.convolve( prepped_image_for_filters, kernel, mode='constant', cval=0.0)
smoothed_image = ndimage.gaussian_filter(smoothed_image, sigma=0.2, truncate=11.0, output=float)
# convert the 2D images into 1D arrays for further pixel-wise processing
original_1D_image = original_image.flatten()
smoothed_1D_image = smoothed_image.flatten()
assert(len(original_1D_image) == len(smoothed_1D_image))
smoothed_1D_image = Remove_background_pixels(original_1D_image, smoothed_1D_image)
year_to_pixel_matrix.append(smoothed_1D_image)
# transpose is taken to store pixel values in rows against the years in columns
pixel_to_year_matrix = np.array(year_to_pixel_matrix, copy=False).T
# Applying linear regression on the values of each pixel over differen years i.e each row of pixel_to_year_matrix
# For this, the boundary pixels of a district should be avoided as their smooth value is impacted by background pixels
relabelled_original_image = original_image//65 # 0 for background, 1 for BU, and 2 for NBU
dimensions = relabelled_original_image.shape
background_vs_non_background_image = np.sign(relabelled_original_image) # using signum function, background pixels remain 0 and non-background become 1
# using convolution filter, each non-boundary pixel inside the district will have value 9 in the mask
boundary_identifying_kernel = np.array([[1,1,1],[1,1,1],[1,1,1]]) # this should be a 5x5 filter but we'll loose out on double boundary pixels
boundary_vs_non_boundary_mask = ndimage.convolve(background_vs_non_background_image, boundary_identifying_kernel, mode='constant', cval=0.0)
current_pixel = 0 # refers to current pixel position we check for being boundary pixel or not
# Define variables for applying linear regression
year_list_as_input = np.reshape(range(len(years)), (-1,1)) # matrix of type [[1],[2],...,[len(year)]], -1 refers to unspecified no. of rows here
# following values are found corresponding to each pixel using its value in all years
slope = []
intercept = []
cost_array = []
for j in range(dimensions[0]):
for k in range(dimensions[1]):
if (background_vs_non_background_image[j][k]): # if pixel is inside the district
if (boundary_vs_non_boundary_mask[j][k] == 9): # if pixel is not boundary pixel
linear_model = LinearRegression()
# we predict value of pixel for a given year and find best fit of linear regression on it
# so year_list_as_input is our input variable for linear regression
regression = linear_model.fit(year_list_as_input, pixel_to_year_matrix[current_pixel])
cost = np.mean((pixel_to_year_matrix[current_pixel] - linear_model.predict(year_list_as_input))**2)
cost_array.append(cost)
slope.append(round(regression.coef_[0], 4)) #coef.shape is (1,1)
intercept.append(round(regression.intercept_, 4)) #intercept.shape is (1)
current_pixel += 1
cost_array = np.array(cost_array)
print(cost_array)
# Save the cost array
os.makedirs('Cost_results_from_Regression/'+district, exist_ok = True)
# multiply each cost value by 1000 to overcome data loss from storing small values
np.savetxt('Cost_results_from_Regression/'+district+'/'+district+'_regression_cost_array.txt', cost_array*1000, fmt='%d')
# creating and saving CDFs against the cost values of pixels for each district
unique_cost_values, cost_frequencies = np.unique(cost_array, return_counts=True)
total_cost_values = (float) (cost_frequencies.sum())
cost_frequencies = cost_frequencies/total_cost_values
cdf = np.cumsum(cost_frequencies)
plt.plot(unique_cost_values,cdf,label = 'data')
# check if a CDF file already exists, since matplotlib doesn't overwrite, delete previous file
if os.path.isfile('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf'):
os.remove('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf')
savefig('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf')
plt.clf()
print("Done")
"""
for year in years:
original_image = np.array( Image.open('BU_NBU_maps/'+district+'/'+district+'_BU_NBU_'+year+'.png') )
prepped_image_for_filters = Prepare_image_for_filters(original_image)
# Apply Convolution and gaussian filters over prepped image. All filter parameters are hyper-parameters
kernel = np.array([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
smoothed_image = ndimage.convolve( prepped_image_for_filters, kernel, mode='constant', cval=0.0)
smoothed_image = ndimage.gaussian_filter(smoothed_image, sigma=0.2, truncate=11.0, output=float)
# convert the 2D images into 1D arrays for further pixel-wise processing
original_1D_image = original_image.flatten()
smoothed_1D_image = smoothed_image.flatten()
assert(len(original_1D_image) == len(smoothed_1D_image))
smoothed_1D_image = Remove_background_pixels(original_1D_image, smoothed_1D_image)
year_to_pixel_matrix.append(smoothed_1D_image)
# transpose is taken to store pixel values in rows against the years in columns
pixel_to_year_matrix = np.array(year_to_pixel_matrix, copy=False).T
# Applying linear regression on the values of each pixel over differen years i.e each row of pixel_to_year_matrix
# For this, the boundary pixels of a district should be avoided as their smooth value is impacted by background pixels
relabelled_original_image = original_image//65 # 0 for background, 1 for BU, and 2 for NBU
dimensions = relabelled_original_image.shape
background_vs_non_background_image = np.sign(relabelled_original_image) # using signum function, background pixels remain 0 and non-background become 1
# using convolution filter, each non-boundary pixel inside the district will have value 9 in the mask
boundary_identifying_kernel = np.array([[1,1,1],[1,1,1],[1,1,1]]) # this should be a 5x5 filter but we'll loose out on double boundary pixels
boundary_vs_non_boundary_mask = ndimage.convolve(background_vs_non_background_image, boundary_identifying_kernel, mode='constant', cval=0.0)
current_pixel = 0 # refers to current pixel position we check for being boundary pixel or not
# Define variables for applying linear regression
year_list_as_input = np.reshape(range(len(years)), (-1,1)) # matrix of type [[1],[2],...,[len(year)]], -1 refers to unspecified no. of rows here
# following values are found corresponding to each pixel using its value in all years
slope = []
intercept = []
cost_array = []
for j in range(dimensions[0]):
for k in range(dimensions[1]):
if (background_vs_non_background_image[j][k]): # if pixel is inside the district
if (boundary_vs_non_boundary_mask[j][k] == 9): # if pixel is not boundary pixel
linear_model = LinearRegression()
# we predict value of pixel for a given year and find best fit of linear regression on it
# so year_list_as_input is our input variable for linear regression
regression = linear_model.fit(year_list_as_input, pixel_to_year_matrix[current_pixel])
cost = np.mean((pixel_to_year_matrix[current_pixel] - linear_model.predict(year_list_as_input))**2)
cost_array.append(cost)
slope.append(round(regression.coef_[0], 4)) #coef.shape is (1,1)
intercept.append(round(regression.intercept_, 4)) #intercept.shape is (1)
current_pixel += 1
cost_array = np.array(cost_array)
print(cost_array)
# Save the cost array
os.makedirs('Cost_results_from_Regression/'+district, exist_ok = True)
# multiply each cost value by 1000 to overcome data loss from storing small values
np.savetxt('Cost_results_from_Regression/'+district+'/'+district+'_regression_cost_array.txt', cost_array*1000, fmt='%d')
# creating and saving CDFs against the cost values of pixels for each district
unique_cost_values, cost_frequencies = np.unique(cost_array, return_counts=True)
total_cost_values = (float) (cost_frequencies.sum())
cost_frequencies = cost_frequencies/total_cost_values
cdf = np.cumsum(cost_frequencies)
plt.plot(unique_cost_values,cdf,label = 'data')
# check if a CDF file already exists, since matplotlib doesn't overwrite, delete previous file
if os.path.isfile('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf'):
os.remove('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf')
savefig('Cost_results_from_Regression/'+district+'/'+district+'_linear_regression_cdf')
plt.clf()
print("Done") | 52.344037 | 155 | 0.729997 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,238 | 0.721935 |
278a8dffe7af41773e706a3b3b7522cea3642494 | 127 | py | Python | ccc/cfg.py | zkdev/cc-utils | 042c6632ca6f61a484bc0a71f85957aeba7f7278 | [
"BSD-3-Clause"
]
| 15 | 2018-04-18T13:25:30.000Z | 2022-03-04T09:25:41.000Z | ccc/cfg.py | zkdev/cc-utils | 042c6632ca6f61a484bc0a71f85957aeba7f7278 | [
"BSD-3-Clause"
]
| 221 | 2018-04-12T06:29:43.000Z | 2022-03-27T03:01:40.000Z | ccc/cfg.py | zkdev/cc-utils | 042c6632ca6f61a484bc0a71f85957aeba7f7278 | [
"BSD-3-Clause"
]
| 29 | 2018-04-11T14:42:23.000Z | 2021-11-09T16:26:32.000Z | import functools
import ci.util
ctx = ci.util.ctx()
@functools.lru_cache()
def cfg_factory():
return ctx.cfg_factory()
| 11.545455 | 28 | 0.716535 | 0 | 0 | 0 | 0 | 70 | 0.551181 | 0 | 0 | 0 | 0 |
278e170fcb4a1f505f51883094b18a872922bd6e | 2,459 | py | Python | scripts/create_grids.py | edwardoughton/taddle | f76ca6067e6fca6b699675ab038c31c9444e0a79 | [
"MIT"
]
| 9 | 2020-08-18T04:25:00.000Z | 2022-03-18T16:42:33.000Z | scripts/create_grids.py | edwardoughton/arpu_predictor | f76ca6067e6fca6b699675ab038c31c9444e0a79 | [
"MIT"
]
| null | null | null | scripts/create_grids.py | edwardoughton/arpu_predictor | f76ca6067e6fca6b699675ab038c31c9444e0a79 | [
"MIT"
]
| 4 | 2020-01-27T01:48:30.000Z | 2021-12-01T16:48:17.000Z | """
Create 10km x 10km grid using the country shapefile.
Written by Ed Oughton.
Winter 2020
"""
import argparse
import os
import configparser
import geopandas as gpd
from shapely.geometry import Polygon, mapping
import pandas as pd
import numpy as np
import rasterio
from rasterstats import zonal_stats
BASE_DIR = '.'
# repo imports
import sys
sys.path.append(BASE_DIR)
from config import VIS_CONFIG
COUNTRY_ABBRV = VIS_CONFIG['COUNTRY_ABBRV']
COUNTRIES_DIR = os.path.join(BASE_DIR, 'data', 'countries')
SHAPEFILE_DIR = os.path.join(COUNTRIES_DIR, COUNTRY_ABBRV, 'shapefile')
GRID_DIR = os.path.join(COUNTRIES_DIR, COUNTRY_ABBRV, 'grid')
def create_folders():
"""
Function to create new folder.
"""
os.makedirs(GRID_DIR, exist_ok=True)
def generate_grid(country):
"""
Generate a 10x10km spatial grid for the chosen country.
"""
filename = 'national_outline_{}.shp'.format(country)
country_outline = gpd.read_file(os.path.join(SHAPEFILE_DIR, filename))
country_outline.crs = "epsg:4326"
country_outline = country_outline.to_crs("epsg:3857")
xmin,ymin,xmax,ymax = country_outline.total_bounds
#10km sides, leading to 100km^2 area
length = 1e4
wide = 1e4
cols = list(range(int(np.floor(xmin)), int(np.ceil(xmax)), int(wide)))
rows = list(range(int(np.floor(ymin)), int(np.ceil(ymax)), int(length)))
rows.reverse()
polygons = []
for x in cols:
for y in rows:
polygons.append( Polygon([(x,y), (x+wide, y), (x+wide, y-length), (x, y-length)]))
grid = gpd.GeoDataFrame({'geometry': polygons})
intersection = gpd.overlay(grid, country_outline, how='intersection')
intersection.crs = "epsg:3857"
intersection = intersection.to_crs("epsg:4326")
final_grid = query_settlement_layer(intersection)
final_grid = final_grid[final_grid.geometry.notnull()]
final_grid.to_file(os.path.join(GRID_DIR, 'grid.shp'))
print('Completed grid generation process')
def query_settlement_layer(grid):
"""
Query the settlement layer to get an estimated population for each grid square.
"""
path = os.path.join(SHAPEFILE_DIR, f'{COUNTRY_ABBRV}.tif')
grid['population'] = pd.DataFrame(
zonal_stats(vectors=grid['geometry'], raster=path, stats='sum'))['sum']
grid = grid.replace([np.inf, -np.inf], np.nan)
return grid
if __name__ == '__main__':
create_folders()
generate_grid(COUNTRY_ABBRV)
| 26.44086 | 94 | 0.699471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 613 | 0.249288 |
27907e0aff23ef4fbe2d4a38e27570505c4caa34 | 366 | py | Python | test/test_all.py | beremaran/spdown | 59e5ea6996be51ad015f9da6758e2ce556b9fb94 | [
"MIT"
]
| 2 | 2019-08-13T15:13:58.000Z | 2019-10-04T09:09:24.000Z | test/test_all.py | beremaran/spdown | 59e5ea6996be51ad015f9da6758e2ce556b9fb94 | [
"MIT"
]
| 4 | 2021-02-08T20:23:42.000Z | 2022-03-11T23:27:07.000Z | test/test_all.py | beremaran/spdown | 59e5ea6996be51ad015f9da6758e2ce556b9fb94 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import unittest
test_modules = [
'test.test_config',
'test.test_secrets',
'test.test_spotify',
'test.test_youtube'
]
if __name__ == "__main__":
suite = unittest.TestSuite()
for tm in test_modules:
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(tm))
unittest.TextTestRunner().run(test=suite)
| 19.263158 | 71 | 0.691257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.289617 |
2790d75b4b157f35c41640a672fd75216eb8137c | 1,281 | py | Python | tests/rec_util.py | pablohawz/tfg-Scan-Paint-clone | 056cd50d9e4274620cf085a41ed9d326e16dd47b | [
"MIT"
]
| null | null | null | tests/rec_util.py | pablohawz/tfg-Scan-Paint-clone | 056cd50d9e4274620cf085a41ed9d326e16dd47b | [
"MIT"
]
| null | null | null | tests/rec_util.py | pablohawz/tfg-Scan-Paint-clone | 056cd50d9e4274620cf085a41ed9d326e16dd47b | [
"MIT"
]
| null | null | null | import os
import tempfile
from time import time
import numpy as np
import sounddevice as sd
from PySide2.QtWidgets import QApplication, QFileDialog
from scipy.io.wavfile import write
# Config
t = 3 # s
fs = 44100
def save(x, fs):
# You have to create a QApp in order to use a
# Widget (QFileDialg)
app = QApplication([])
fname, _ = QFileDialog.getSaveFileName(
None,
caption='Save audio to disk',
dir='C:/users/pablo/tfg',
filter='Audio Wav File (.wav)')
if fname == '':
return
if not fname.endswith('.wav'):
fname += '.wav'
write(fname, fs, x)
def main():
with tempfile.TemporaryDirectory() as dir:
# Rec
print('Rec!')
audio = sd.rec(frames=int(t*fs), samplerate=fs, channels=2)
sd.wait()
print('End!')
# Sum to mono
audio_mono = np.sum(audio, axis=1)
# Calculate dB
spl = 20 * np.log10(np.std(audio_mono) / 2e-5)
print(round(spl, 2))
path = os.path.join(dir, repr(time())+'.wav')
write(path, 44100, audio_mono)
r = input('Do you want to save it? [y]/n: ')
if r == '' or r == 'y':
save(audio_mono, fs)
print('Ciao')
if __name__ == '__main__':
main()
| 20.66129 | 67 | 0.565964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.201405 |
2792236e3960ae778ac604767d58c8cfaef78404 | 10,977 | py | Python | test/comptests/TestHybridQuasiGaussian.py | sschlenkrich/HybridMonteCarlo | 72f54aa4bcd742430462b27b72d70369c01f9ac4 | [
"MIT"
]
| 3 | 2021-08-18T18:34:41.000Z | 2021-12-24T07:05:19.000Z | test/comptests/TestHybridQuasiGaussian.py | sschlenkrich/HybridMonteCarlo | 72f54aa4bcd742430462b27b72d70369c01f9ac4 | [
"MIT"
]
| null | null | null | test/comptests/TestHybridQuasiGaussian.py | sschlenkrich/HybridMonteCarlo | 72f54aa4bcd742430462b27b72d70369c01f9ac4 | [
"MIT"
]
| 3 | 2021-01-31T11:41:19.000Z | 2022-03-25T19:51:20.000Z | #!/usr/bin/python
import sys
sys.path.append('./')
import unittest
import copy
import numpy as np
from hybmc.mathutils.Helpers import BlackImpliedVol, BlackVega
from hybmc.termstructures.YieldCurve import YieldCurve
from hybmc.models.AssetModel import AssetModel
from hybmc.models.HybridModel import HybridModel
from hybmc.models.HullWhiteModel import HullWhiteModel
from hybmc.models.QuasiGaussianModel import QuasiGaussianModel
from hybmc.simulations.McSimulation import McSimulation
from hybmc.simulations.Payoffs import Fixed, Pay, Asset, LiborRate, Max
import matplotlib.pyplot as plt
# a quick way to get a model
def HWModel(rate=0.01, vol=0.0050, mean=0.03):
curve = YieldCurve(rate)
times = np.array([ 10.0 ])
vols = np.array([ vol ])
return HullWhiteModel(curve, mean, times, vols)
def fwd(mcSim,p):
samples = np.array([
p.discountedAt(mcSim.path(k)) for k in range(mcSim.nPaths) ])
fwd = np.average(samples) / \
mcSim.model.domRatesModel.yieldCurve.discount(p.obsTime)
err = np.std(samples) / np.sqrt(samples.shape[0]) / \
mcSim.model.domRatesModel.yieldCurve.discount(p.obsTime)
return fwd, err
class TestHybridQuasiGaussian(unittest.TestCase):
# set up the stage for testing the models
def setUp(self):
### full smile/skew model
# domestic rates
domAlias = 'EUR'
eurCurve = YieldCurve(0.03)
d = 2
times = np.array([ 10.0 ])
sigma = np.array([ [ 0.0060 ],
[ 0.0040 ] ])
slope = np.array([ [ 0.10 ],
[ 0.15 ] ])
curve = np.array([ [ 0.05 ],
[ 0.10 ] ])
delta = np.array([ 1.0, 10.0 ])
chi = np.array([ 0.01, 0.15 ])
Gamma = np.array([ [1.0, 0.6],
[0.6, 1.0] ])
eurRatesModel = QuasiGaussianModel(eurCurve,d,times,sigma,slope,curve,delta,chi,Gamma)
# assets
forAliases = [ 'USD', 'GBP' ]
spotS0 = [ 1.0, 2.0 ]
spotVol = [ 0.3, 0.2 ]
forAssetModels = [
AssetModel(S0, vol) for S0, vol in zip(spotS0,spotVol) ]
# USD rates
usdCurve = YieldCurve(0.02)
d = 3
times = np.array([ 10.0 ])
sigma = np.array([ [ 0.0060 ],
[ 0.0050 ],
[ 0.0040 ] ])
slope = np.array([ [ 0.10 ],
[ 0.20 ],
[ 0.30 ] ])
curve = np.array([ [ 0.05 ],
[ 0.10 ],
[ 0.20 ] ])
delta = np.array([ 1.0, 5.0, 20.0 ])
chi = np.array([ 0.01, 0.05, 0.15 ])
Gamma = np.array([ [1.0, 0.8, 0.6],
[0.8, 1.0, 0.8],
[0.6, 0.8, 1.0] ])
usdRatesModel = QuasiGaussianModel(usdCurve,d,times,sigma,slope,curve,delta,chi,Gamma)
#
gbpRatesModel = HWModel()
#
# 'EUR_x_0', 'EUR_x_1', 'USD_logS', 'USD_x_0', 'USD_x_1', 'USD_x_2', 'GBP_logS', 'GBP_x'
corr = np.array([
[ 1.0, 0.0, 0.5, -0.5, 0.0, 0.0, -0.5, -0.5 ], # EUR_x_0
[ 0.0, 1.0, 0.0, 0.0, -0.5, 0.0, -0.5, 0.0 ], # EUR_x_1
[ 0.5, 0.0, 1.0, -0.5, -0.5, -0.5, 0.0, 0.0 ], # USD_logS
[ -0.5, 0.0, -0.5, 1.0, 0.0, 0.0, 0.0, 0.0 ], # USD_x_0
[ 0.0, -0.5, -0.5, 0.0, 1.0, 0.0, 0.0, 0.0 ], # USD_x_1
[ 0.0, 0.0, -0.5, 0.0, 0.0, 1.0, 0.0, 0.0 ], # USD_x_2
[ -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.5 ], # GBP_logS
[ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 1.0 ], # GBP_x
])
#
# corr = np.identity(2 + 1 + 3 + 1 + 1 ) # overwrite
#
self.model = HybridModel(domAlias,eurRatesModel,forAliases,forAssetModels,[usdRatesModel,gbpRatesModel],corr)
### Gaussian model
# domestic rates
domAlias = 'EUR'
eurCurve = YieldCurve(0.03)
d = 2
times = np.array([ 10.0 ])
sigma = np.array([ [ 0.0060 ],
[ 0.0040 ] ])
slope = np.array([ [ 0.00 ],
[ 0.00 ] ])
curve = np.array([ [ 0.00 ],
[ 0.00 ] ])
delta = np.array([ 1.0, 10.0 ])
chi = np.array([ 0.01, 0.15 ])
Gamma = np.array([ [1.0, 0.6],
[0.6, 1.0] ])
eurRatesModel = QuasiGaussianModel(eurCurve,d,times,sigma,slope,curve,delta,chi,Gamma)
# assets
forAliases = [ 'USD', 'GBP' ]
spotS0 = [ 1.0, 2.0 ]
spotVol = [ 0.3, 0.2 ]
forAssetModels = [
AssetModel(S0, vol) for S0, vol in zip(spotS0,spotVol) ]
# USD rates
usdCurve = YieldCurve(0.02)
d = 3
times = np.array([ 10.0 ])
sigma = np.array([ [ 0.0060 ],
[ 0.0050 ],
[ 0.0040 ] ])
slope = np.array([ [ 0.10 ],
[ 0.20 ],
[ 0.30 ] ])
curve = np.array([ [ 0.05 ],
[ 0.10 ],
[ 0.20 ] ])
delta = np.array([ 1.0, 5.0, 20.0 ])
chi = np.array([ 0.01, 0.05, 0.15 ])
Gamma = np.array([ [1.0, 0.8, 0.6],
[0.8, 1.0, 0.8],
[0.6, 0.8, 1.0] ])
self.gaussianModel = HybridModel(domAlias,eurRatesModel,forAliases,forAssetModels,[usdRatesModel,gbpRatesModel],corr)
def test_ModelSetup(self):
self.assertListEqual(self.model.stateAliases(),
['EUR_x_0', 'EUR_x_1',
'EUR_y_0_0', 'EUR_y_0_1',
'EUR_y_1_0', 'EUR_y_1_1',
'EUR_s',
'USD_logS', 'USD_x_0', 'USD_x_1', 'USD_x_2',
'USD_y_0_0', 'USD_y_0_1', 'USD_y_0_2',
'USD_y_1_0', 'USD_y_1_1', 'USD_y_1_2',
'USD_y_2_0', 'USD_y_2_1', 'USD_y_2_2',
'USD_s',
'GBP_logS', 'GBP_x', 'GBP_s'])
self.assertListEqual(self.model.factorAliases(),
['EUR_x_0', 'EUR_x_1',
'USD_logS', 'USD_x_0', 'USD_x_1', 'USD_x_2',
'GBP_logS', 'GBP_x'])
# @unittest.skip('Too time consuming')
def test_HybridSimulation(self):
times = np.concatenate([ np.linspace(0.0, 10.0, 11), [10.5] ])
nPaths = 2**13
seed = 314159265359
# risk-neutral simulation
print('')
mcSim = McSimulation(self.model,times,nPaths,seed,False)
#
T = 10.0
P = Pay(Fixed(1.0),T)
fw, err = fwd(mcSim,P)
# domestic numeraire
print('1.0 @ %4.1lfy %8.6lf - mc_err = %8.6lf' % (T,fw,err))
# foreign assets
for k, alias in enumerate(self.model.forAliases):
p = Asset(T,alias)
xT = self.model.forAssetModels[k].X0 * \
self.model.forRatesModels[k].yieldCurve.discount(T) / \
self.model.domRatesModel.yieldCurve.discount(T)
fw, err = fwd(mcSim,p)
print(alias + ' @ %4.1lfy %8.6lf vs %8.6lf (curve) - mc_err = %8.6lf' % (T,fw,xT,err))
# domestic Libor rate
Tstart = 10.0
Tend = 10.5
L = Pay(LiborRate(T,Tstart,Tend,alias='EUR'),Tend)
fw, err = fwd(mcSim,L)
Lref = (mcSim.model.domRatesModel.yieldCurve.discount(Tstart) / \
mcSim.model.domRatesModel.yieldCurve.discount(Tend) - 1) / \
(Tend - Tstart)
print('L_EUR @ %4.1lfy %8.6lf vs %8.6lf (curve) - mc_err = %8.6lf' % (T,fw,Lref,err))
# foreign Lbor rates
for k, alias in enumerate(self.model.forAliases):
L = Pay(LiborRate(T,Tstart,Tend,alias=alias)*Asset(Tend,alias),Tend)
fw, err = fwd(mcSim,L)
fw *= mcSim.model.domRatesModel.yieldCurve.discount(Tend) / \
mcSim.model.forRatesModels[k].yieldCurve.discount(Tend) / \
mcSim.model.forAssetModels[k].X0
err *= mcSim.model.domRatesModel.yieldCurve.discount(Tend) / \
mcSim.model.forRatesModels[k].yieldCurve.discount(Tend) / \
mcSim.model.forAssetModels[k].X0
Lref = (mcSim.model.forRatesModels[k].yieldCurve.discount(Tstart) / \
mcSim.model.forRatesModels[k].yieldCurve.discount(Tend) - 1) / \
(Tend - Tstart)
print('L_%s @ %4.1lfy %8.6lf vs %8.6lf (curve) - mc_err = %8.6lf' % (alias,T,fw,Lref,err))
def test_HybridVolAdjusterCalculation(self):
model = copy.deepcopy(self.model)
# model = copy.deepcopy(self.gaussianModel)
hybVolAdjTimes = np.linspace(0.0, 20.0, 21)
model.recalculateHybridVolAdjuster(hybVolAdjTimes)
plt.plot(model.hybAdjTimes,model.hybVolAdj[0], 'r*', label='USD')
plt.plot(model.hybAdjTimes,model.hybVolAdj[1], 'b*', label='GBP')
plt.legend()
#
times = np.linspace(0.0,20.0,101)
plt.plot(times,[ model.hybridVolAdjuster(0,t) for t in times ] , 'r-')
plt.plot(times,[ model.hybridVolAdjuster(1,t) for t in times ] , 'b-')
plt.show()
#
# return
times = np.linspace(0.0, 10.0, 11)
nPaths = 2**13
seed = 314159265359
# risk-neutral simulation
print('')
mcSim = McSimulation(model,times,nPaths,seed,False)
#
T = 10.0
for k, alias in enumerate(model.forAliases):
# ATM forward
xT = model.forAssetModels[k].X0 * \
model.forRatesModels[k].yieldCurve.discount(T) / \
model.domRatesModel.yieldCurve.discount(T)
K = Fixed(xT)
Z = Fixed(0.0)
C = Pay(Max(Asset(T,alias)-K,Z),T)
fw, err = fwd(mcSim,C)
vol = BlackImpliedVol(fw,xT,xT,T,1.0)
vega = BlackVega(xT,xT,vol,T)
err /= vega
volRef = model.forAssetModels[k].sigma
print('C_%s @ %4.1lfy %8.6lf vs %8.6lf (curve) - mc_err = %8.6lf' % (alias,T,vol,volRef,err))
P = Pay(Max(K-Asset(T,alias),Z),T)
fw, err = fwd(mcSim,P)
vol = BlackImpliedVol(fw,xT,xT,T,-1.0)
vega = BlackVega(xT,xT,vol,T)
err /= vega
volRef = model.forAssetModels[k].sigma
print('P_%s @ %4.1lfy %8.6lf vs %8.6lf (curve) - mc_err = %8.6lf' % (alias,T,vol,volRef,err))
if __name__ == '__main__':
unittest.main()
| 41.579545 | 125 | 0.484376 | 9,746 | 0.887856 | 0 | 0 | 0 | 0 | 0 | 0 | 1,376 | 0.125353 |
27939e55222086ece981ba3a6efa56f956601617 | 143 | py | Python | model/__init__.py | Blind-Aid/sentiment-discovery | 081c7c855e00864b52e97cac0b0e097cc86d9731 | [
"BSD-3-Clause"
]
| 1,093 | 2017-12-05T20:35:45.000Z | 2022-02-26T17:48:30.000Z | model/__init__.py | Blind-Aid/sentiment-discovery | 081c7c855e00864b52e97cac0b0e097cc86d9731 | [
"BSD-3-Clause"
]
| 67 | 2017-12-05T22:02:55.000Z | 2021-05-25T09:06:41.000Z | model/__init__.py | Blind-Aid/sentiment-discovery | 081c7c855e00864b52e97cac0b0e097cc86d9731 | [
"BSD-3-Clause"
]
| 207 | 2017-12-05T20:59:45.000Z | 2022-03-08T09:32:39.000Z | from .distributed import *
from .model import *
from .sentiment_classifier import *
from .transformer import *
from .transformer_utils import * | 28.6 | 35 | 0.797203 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2793c0f51d8fcdcbec81fb953d41e2dc42b499c2 | 2,927 | py | Python | SIP/src/peer/peer.py | trishantpahwa/Session-Initiation-Protocol | 5b770dbb9533fbe3a8ff31fc583576cc107e5ba8 | [
"MIT"
]
| 3 | 2019-06-18T18:21:05.000Z | 2021-07-15T06:28:25.000Z | SIP/src/peer/peer.py | trishantpahwa/Session-Initiation-Protocol | 5b770dbb9533fbe3a8ff31fc583576cc107e5ba8 | [
"MIT"
]
| 4 | 2019-01-30T11:31:13.000Z | 2019-03-06T12:36:54.000Z | SIP/src/peer/peer.py | trishantpahwa/Session-Initiation-Protocol | 5b770dbb9533fbe3a8ff31fc583576cc107e5ba8 | [
"MIT"
]
| 1 | 2019-08-12T11:31:23.000Z | 2019-08-12T11:31:23.000Z | import socket
class peer:
__db = None
__s = socket
__protocol = ''
__port = 5060
__s_address = ()
__buff_size = 4096
def __init__(self, protocol='TCP', port=5060, buff_size=4096):
self.__set_protocol(protocol)
self.__set_port(port)
self.__set_buff_size(buff_size)
self.__initialize_socket()
self.__set_s_address()
def __initialize_socket(self):
if self._get_protocol() == 'TCP':
self.__s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
if self._get_protocol() == 'UDP':
self.__s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
print('Unable to initialize socket')
exit(0)
def socket_bind(self):
self.__s.bind(self._get_s_address())
def socket_listen(self, backlog):
self.__s.listen(backlog)
def socket_accept(self):
(client_socket, addr) = self.__s.accept()
return (client_socket, addr)
def socket_connect(self, server_address):
self.__s.connect(server_address)
def socket_close(self):
self.__s.close()
def client_send_message(self, message, address=None):
message = message.encode('UTF-8')
if address:
self.__s.sendto(message, address)
else:
self.__s.send(message)
def client_receive_message(self, protocol):
if protocol == 'TCP':
message = self.__s.recv(self._get_buff_size())
return message.decode('UTF-8')
if protocol == 'UDP':
message, addr = self.__s.recvfrom(self._get_buff_size())
return (message.decode('UTF-8'), addr)
def server_send_message(self, message, client_socket=None,
address=None):
message = message.encode('UTF-8')
if client_socket:
client_socket.send(message)
else:
self.__s.sendto(message, address)
def server_receive_message(self, client_socket=None):
if client_socket:
message = client_socket.recv(self._get_buff_size())
return message.decode('UTF-8')
else:
message, addr = self.__s.recvfrom(self._get_buff_size())
return (message.decode('UTF-8'), addr)
def __set_protocol(self, protocol):
self.__protocol = protocol
def _get_protocol(self):
return self.__protocol
def __set_port(self, port):
self.__port = port
def _get_port(self):
return self.__port
def __set_buff_size(self, buff_size):
self.__buff_size = buff_size
def _get_buff_size(self):
return self.__buff_size
def __set_s_address(self):
self.__s_address = (socket.gethostbyname(socket.gethostname()),
self._get_port())
def _get_s_address(self):
return self.__s_address
| 28.696078 | 75 | 0.60574 | 2,910 | 0.994192 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.033481 |
279582d504d9da0d858f00c0d357db9ba41aecb7 | 3,387 | py | Python | champ_bringup/scripts/joint_calibrator_relay.py | lubitz99/champ | 2e4c8606db9a365866726ea84e8107c14ee9446d | [
"BSD-3-Clause"
]
| 923 | 2020-04-06T15:09:24.000Z | 2022-03-30T15:34:08.000Z | champ_bringup/scripts/joint_calibrator_relay.py | lubitz99/champ | 2e4c8606db9a365866726ea84e8107c14ee9446d | [
"BSD-3-Clause"
]
| 73 | 2020-05-12T09:23:12.000Z | 2022-03-28T06:22:16.000Z | champ_bringup/scripts/joint_calibrator_relay.py | lubitz99/champ | 2e4c8606db9a365866726ea84e8107c14ee9446d | [
"BSD-3-Clause"
]
| 229 | 2020-04-26T06:32:28.000Z | 2022-03-29T08:07:28.000Z | #!/usr/bin/env python
'''
Copyright (c) 2019-2020, Juan Miguel Jimeno
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import rospy
from champ_msgs.msg import Joints
from sensor_msgs.msg import JointState
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
import rosparam
import os, sys
class JointsCalibratorRelay:
def __init__(self):
rospy.Subscriber("joints_calibrator", JointState, self.joints_cmd_callback)
joint_controller_topic = rospy.get_param('champ_controller/joint_controller_topic')
self.joint_minimal_pub = rospy.Publisher('cmd_joints', Joints, queue_size = 100)
self.joint_trajectory_pub = rospy.Publisher(joint_controller_topic, JointTrajectory, queue_size = 100)
joints_map = [None,None,None,None]
joints_map[3] = rospy.get_param('/joints_map/left_front')
joints_map[2] = rospy.get_param('/joints_map/right_front')
joints_map[1] = rospy.get_param('/joints_map/left_hind')
joints_map[0] = rospy.get_param('/joints_map/right_hind')
self.joint_names = []
for leg in reversed(joints_map):
for joint in leg:
self.joint_names.append(joint)
def joints_cmd_callback(self, joints):
joint_minimal_msg = Joints()
for i in range(12):
joint_minimal_msg.position.append(joints.position[i])
self.joint_minimal_pub.publish(joint_minimal_msg)
joint_trajectory_msg = JointTrajectory()
joint_trajectory_msg.joint_names = self.joint_names
point = JointTrajectoryPoint()
point.time_from_start = rospy.Duration(1.0 / 60.0)
point.positions = joint_minimal_msg.position
joint_trajectory_msg.points.append(point)
self.joint_trajectory_pub.publish(joint_trajectory_msg)
if __name__ == "__main__":
rospy.init_node('joints_calibrator_relay', anonymous=True)
j = JointsCalibratorRelay()
rospy.spin() | 45.16 | 110 | 0.749631 | 1,502 | 0.44346 | 0 | 0 | 0 | 0 | 0 | 0 | 1,755 | 0.518158 |
279641443118aebc70b220bf9dae1dc53a9d2fc4 | 3,909 | py | Python | touchdown/aws/vpc/vpc.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
]
| 14 | 2015-01-05T18:18:04.000Z | 2022-02-07T19:35:12.000Z | touchdown/aws/vpc/vpc.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
]
| 106 | 2015-01-06T00:17:13.000Z | 2019-09-07T00:35:32.000Z | touchdown/aws/vpc/vpc.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
]
| 5 | 2015-01-30T10:18:24.000Z | 2022-02-07T19:35:13.000Z | # Copyright 2014 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument, serializers
from touchdown.core.plan import Plan
from touchdown.core.resource import Resource
from ..account import BaseAccount
from ..common import SimpleApply, SimpleDescribe, SimpleDestroy, TagsMixin
class VPC(Resource):
resource_name = "vpc"
name = argument.String(field="Name", group="tags")
cidr_block = argument.IPNetwork(field="CidrBlock")
tenancy = argument.String(
default="default", choices=["default", "dedicated"], field="InstanceTenancy"
)
tags = argument.Dict()
account = argument.Resource(BaseAccount)
enable_dns_support = argument.Boolean(
default=True,
field="EnableDnsSupport",
serializer=serializers.Dict(Value=serializers.Identity()),
group="dns_support_attribute",
)
enable_dns_hostnames = argument.Boolean(
default=True,
field="EnableDnsHostnames",
serializer=serializers.Dict(Value=serializers.Identity()),
group="dns_hostnames_attribute",
)
class Describe(SimpleDescribe, Plan):
resource = VPC
service_name = "ec2"
api_version = "2015-10-01"
describe_action = "describe_vpcs"
describe_envelope = "Vpcs"
key = "VpcId"
def get_describe_filters(self):
return {"Filters": [{"Name": "tag:Name", "Values": [self.resource.name]}]}
def annotate_object(self, obj):
obj["EnableDnsSupport"] = self.client.describe_vpc_attribute(
Attribute="enableDnsSupport", VpcId=obj["VpcId"]
)["EnableDnsSupport"]
obj["EnableDnsHostnames"] = self.client.describe_vpc_attribute(
Attribute="enableDnsHostnames", VpcId=obj["VpcId"]
)["EnableDnsHostnames"]
return obj
class Apply(TagsMixin, SimpleApply, Describe):
create_action = "create_vpc"
waiter = "vpc_available"
def update_dnssupport_attribute(self):
diff = self.resource.diff(
self.runner,
self.object.get("EnableDnsSupport", {}),
group="dns_support_attribute",
)
if not diff.matches():
yield self.generic_action(
["Configure DNS Support Setting"] + list(diff.lines()),
self.client.modify_vpc_attribute,
VpcId=serializers.Identifier(),
EnableDnsSupport=serializers.Argument("enable_dns_support"),
)
def update_dnshostnames_attribute(self):
diff = self.resource.diff(
self.runner,
self.object.get("EnableDnsHostnames", {}),
group="dns_hostnames_attribute",
)
if not diff.matches():
yield self.generic_action(
["Configure DNS Hostnames Setting"] + list(diff.lines()),
self.client.modify_vpc_attribute,
VpcId=serializers.Identifier(),
EnableDnsHostnames=serializers.Argument("enable_dns_hostnames"),
)
def update_object(self):
for action in super(Apply, self).update_object():
yield action
for action in self.update_dnssupport_attribute():
yield action
for action in self.update_dnshostnames_attribute():
yield action
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_vpc"
# waiter = 'vpc_terminated'
| 32.575 | 84 | 0.660783 | 3,078 | 0.787414 | 1,357 | 0.347148 | 0 | 0 | 0 | 0 | 1,188 | 0.303914 |
2796c1a0814e47f59cba47b456a1373f8929dd6d | 77 | py | Python | backend_django/type/__init__.py | mehranagh20/Typonent | 702ae3e018dcffb952c870b0a680f2475845b744 | [
"MIT"
]
| 4 | 2017-03-24T19:14:29.000Z | 2017-04-02T18:23:25.000Z | backend_django/type/__init__.py | mehranagh20/typing_site | 702ae3e018dcffb952c870b0a680f2475845b744 | [
"MIT"
]
| 4 | 2017-03-25T20:25:04.000Z | 2017-03-31T17:18:23.000Z | backend_django/type/__init__.py | mehranagh20/typing_site | 702ae3e018dcffb952c870b0a680f2475845b744 | [
"MIT"
]
| null | null | null |
# from type.models import User, Requirement, Involvement, Competition, Text | 25.666667 | 75 | 0.792208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.974026 |
2797b6dfc818a3de2bc52aaf5906014401475627 | 793 | py | Python | estructuras de control secuenciales/ejercicio10.py | svcuellar/algoritmos_programacion | 0813ee6a2ccb605557a7920bf82440b7388b49e8 | [
"MIT"
]
| null | null | null | estructuras de control secuenciales/ejercicio10.py | svcuellar/algoritmos_programacion | 0813ee6a2ccb605557a7920bf82440b7388b49e8 | [
"MIT"
]
| null | null | null | estructuras de control secuenciales/ejercicio10.py | svcuellar/algoritmos_programacion | 0813ee6a2ccb605557a7920bf82440b7388b49e8 | [
"MIT"
]
| null | null | null | """
entradas
cantidadchelinesaustriacos-->c-->float
cantidaddragmasgriegos-->dg-->float
cantidadpesetas-->p-->float
salidas
chelines_a_pesetas-->c_p-->float
dragmas_a_francosfrancese-->dg_ff-->float
pesetas_a_dolares-->p_d-->float
pesetas_a_lirasitalianas-->p_li-->float
"""
#entradas
c=float(input("Ingrese la cantidad de chelines austriacos "))
dg=float(input("Ingrese la cantidad de dragmas griegos "))
p=float(input("Ingrese la cantidad de pesetas "))
#caja negra
c_p=round((c*9.57), 2)
dg_ff=round(((c*0.957)/20.110), 2)
p_d=round((p/122.499), 2)
p_li=round((p/0.092289), 2)
#salidas
print(c, " chelines equivalen a", c_p, " pesetas")
print(dg, " dragmas griegos equivalen a", dg_ff, " francos franceses")
print(p, " pesetas equivalen a", p_d, " dolares y ", p_li, " liras italianas") | 28.321429 | 78 | 0.726356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 558 | 0.703657 |
27981338330ee315b120f4f29b8d0163c165b34b | 4,453 | py | Python | st_model.py | saras108/Sentiment_Analysis | 7e4e84637161cd005ebbcd303f68417726b5f098 | [
"MIT"
]
| null | null | null | st_model.py | saras108/Sentiment_Analysis | 7e4e84637161cd005ebbcd303f68417726b5f098 | [
"MIT"
]
| null | null | null | st_model.py | saras108/Sentiment_Analysis | 7e4e84637161cd005ebbcd303f68417726b5f098 | [
"MIT"
]
| null | null | null | #importing necessary libraries
import numpy as np
import pandas as pd
import string
import streamlit as st
header = st.container()
dataset = st.container()
fearure = st.container()
model_training = st.container()
def get_data(file_name):
df = pd.read_csv(file_name , header = None)
return df
with header:
st.title("Emotion detection using Text")
with dataset:
st.header("Emotion Detection Datasets")
df = get_data("1-P-3-ISEAR.csv")
df.columns = ['sn','Target','Sentence']
df.drop('sn',inplace=True,axis =1)
df.head()
df.duplicated().sum()
df.drop_duplicates(inplace = True)
st.subheader("Lets check if the dataset is fairly distrributed.")
col1 , col2 = st.columns(2)
target_count = df['Target'].value_counts()
col1.table(target_count)
col2.text("Line Chart of the total output counts")
col2.line_chart(target_count )
st.markdown("From the above data, we can easily say the data iss fairly distributed.")
with fearure:
st.header("Learning about Feature and converting them")
def lowercase(text):
text = text.lower()
return text
# df['Sentence'] = df['Sentence'].apply(lowercase)
def remove_punc(text):
text = "".join([char for char in text if char not in string.punctuation and not char.isdigit()])
return text
df['Sentence'] = df['Sentence'].apply(lowercase).apply(remove_punc)
#Removing the stop words
import nltk
nltk.download('omw-1.4')
nltk.download('stopwords')
from nltk.corpus import stopwords
def remove_stopwords(text):
text = [w for w in text.split() if w not in stopwords.words('english')]
return ' '.join(text)
df['Sentence'] = df['Sentence'].apply(remove_stopwords)
#Lemmatization i.e changing words into it's root form
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
from nltk.corpus import wordnet
lemmatizer = WordNetLemmatizer()
def lemmatize(text):
text = [lemmatizer.lemmatize(word,'v') for word in text.split()]
return ' '.join(text)
df['Sentence'] = df['Sentence'].apply(lemmatize)
st.markdown('As the part of data pre-processing, we have done the following things:')
st.text(" - Converting the sentence to lowercase ")
st.text(" -Removing the Punction ")
st.text(" -Removing the stop words ")
st.text(" -Lemmatization i.e changing words into it is root form ,")
st.markdown("After all these our data looks like-")
st.dataframe(df.head())
st.markdown("After doing Train Test split we will apply TGIF, It is technique to transform text into a meaningful vector of numbers. TFIDF penalizes words that come up too often and dont really have much use. So it rescales the frequency of words that are common which makes scoring more balanced")
with model_training:
from sklearn.model_selection import train_test_split
X = df['Sentence']
y = df['Target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2,random_state=10)
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(min_df=2, max_df=0.5, ngram_range=(1, 2))
train_tfidf = tfidf.fit_transform(X_train)
test_tfidf = tfidf.transform(X_test)
from sklearn.linear_model import LogisticRegression
logistic = LogisticRegression(max_iter=1000)
logistic.fit(train_tfidf,y_train)
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
nb.fit(train_tfidf,y_train)
st.header('Checking The Accuracy using diffrent model.')
import joblib
joblib.dump(logistic, './mymodel/logistic_model.joblib')
joblib.dump(nb, './mymodel/naive_bayes_model.joblib')
joblib.dump(tfidf, './mymodel/tfidf_model.joblib')
sel_col , disp_col = st.columns(2)
with sel_col:
sel_col.subheader("Logistic Regression")
sel_col.markdown("Logistic Regression Train Error")
sel_col.write(logistic.score(train_tfidf, y_train))
sel_col.markdown("Logistic Regression Test Error")
sel_col.write( logistic.score(test_tfidf, y_test))
with disp_col:
disp_col.subheader("Naive Bias")
disp_col.markdown("Naive Bias Train Error")
disp_col.write(nb.score(train_tfidf, y_train))
disp_col.markdown("Naive Bias Test Error")
disp_col.write(nb.score(test_tfidf, y_test))
| 29.885906 | 302 | 0.688974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,439 | 0.323153 |
279b274296a91748dfbdabae0134ce96287057e9 | 540 | py | Python | config.py | sheepmen/SpiderManage | 850d6357fd1117c16684dabb5d1e79de1854e61c | [
"MIT"
]
| 1 | 2018-06-13T00:38:53.000Z | 2018-06-13T00:38:53.000Z | config.py | sheepmen/SpiderManage | 850d6357fd1117c16684dabb5d1e79de1854e61c | [
"MIT"
]
| null | null | null | config.py | sheepmen/SpiderManage | 850d6357fd1117c16684dabb5d1e79de1854e61c | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
SECRET_KEY = 'some secret key'
TEMPLATES_AUTO_RELOAD = True
PROJECT_NAME = 'SpiderManage'
# Redis Config
REDIS_HOST = '120.25.227.8'
REDIS_PORT = 6379
REDIS_PASSWORD = 'xuxinredis'
# SQLite
# SQLALCHEMY_DATABASE_URI = 'sqlite:///C:/Users/sheep3/workplace/SpiderManage/data.db'
# SQLALCHEMY_TRACK_MODIFICATIONS = True
# MYSQL
SQLALCHEMY_DATABASE_URI = 'mysql://root:[email protected]:3306/spider_db?charset=utf8'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = True | 30 | 93 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 321 | 0.594444 |
279b776bfdce89147881347913d489e839a74293 | 3,989 | py | Python | PyPrometheus.py | RusDavies/PyPrometheus | 8c0bb9489f42423942982829024d7359a374d7b1 | [
"MIT"
]
| null | null | null | PyPrometheus.py | RusDavies/PyPrometheus | 8c0bb9489f42423942982829024d7359a374d7b1 | [
"MIT"
]
| null | null | null | PyPrometheus.py | RusDavies/PyPrometheus | 8c0bb9489f42423942982829024d7359a374d7b1 | [
"MIT"
]
| null | null | null | from PyPrometheusQueryClient import PrometheusQueryClient
import json
from pathlib import Path
from datetime import datetime
class Prometheus:
def __init__(self, url, metrics_config_file=None, cache_path=None, cache_ttl=3600, ssl_verify=True, starttime=None, endtime=None):
self._metrics_config_file = metrics_config_file
self._starttime = starttime
self._endtime = endtime
self.pqc = PrometheusQueryClient(url=url, cache_path=cache_path,
cache_ttl=cache_ttl, ssl_verify=ssl_verify)
self._load_metrics_config()
self.prometheus_data = {}
#---
def _load_metrics_config(self, metrics_config_file=None):
if (metrics_config_file):
self._metrics_config_file = metrics_config_file
if (not self._metrics_config_file):
raise ValueError('No metrics config file set. Cannot continue.')
path = Path(self._metrics_config_file)
if(not path.exists()):
raise ValueError("The configuration file '{}' does not exist".format(self._metrics_config_file))
with open(path, 'r') as f:
self._metrics_config = json.loads( f.read() )
return
def get_metrics(self, report_progress):
for (metric, metadata) in self._metrics_config.items():
if metadata['active'] == False:
continue
if (not metric in self.pqc.metrics):
raise ValueError("Metric '{}' is unknown".format(metric))
if (report_progress):
print("Getting results for metric '{}'{}".format(metric, ' ' * 40), end='\r')
_ = self.get_metric(metric, metadata)
def get_metric(self, metric, metadata=None, starttime:datetime=None, endtime:datetime=None):
# Order of precidence: start and end times passed as params first; otherwise those set on the class.
if(not starttime):
starttime = self._starttime
if(not endtime):
endtime = self._endtime
# Make sure we have actual start and end times
if(not starttime or not endtime):
raise ValueError('Both starttime and endtime must be set')
# Convert str objects to the expected datatime formats
# if( isinstance(starttime, str) ):
# starttime = datetime.strptime(starttime, '%Y-%m-%dT%H:%M:%SZ')
# if( isinstance(endtime, str) ):
# endtime = datetime.strptime(endtime, '%Y-%m-%dT%H:%M:%SZ')
# Make sure we're give an actual metric name
if (not metric or len(metric) == 0):
raise ValueError("Metric '{}' cannot be None")
# Make sure the metrics are present in the list retrived from the server
if (not metric in self.pqc.metrics):
raise ValueError("Metric '{}' is not available on the server".format(metric))
# If we're not passed the metadata, try to reocover it from our metrics config.
if (not metadata):
metadata = self._metrics_config.get(metric, {})
#
# Now do the real work
#
# Set up the stub of the result
self.prometheus_data[metric] = {}
self.prometheus_data[metric]['metadata'] = metadata
self.prometheus_data[metric]['title'] = metric
# Pull the data via the PrometheusQueryClient, depending on
deltas = metadata.get('deltas', None)
if (deltas == None):
(data, df) = self.pqc.get_metric(metric, start=starttime, end=endtime)
elif (deltas == True):
(data, df) = self.pqc.get_with_deltas(metric, start=starttime, end=endtime)
else:
(data, df) = self.pqc.get_without_deltas(metric, start=starttime, end=endtime)
self.prometheus_data[metric]['data'] = data
self.prometheus_data[metric]['df'] = df
return self.prometheus_data[metric] | 37.990476 | 134 | 0.613688 | 3,862 | 0.968162 | 0 | 0 | 0 | 0 | 0 | 0 | 1,028 | 0.257709 |
279d9301e8e9b967d31f0c36c000b8b79e8eab38 | 5,557 | py | Python | tests/validate_schema_guide.py | dieghernan/citation-file-format | cfad34b82aa882d8169a0bcb6a21ad19cb4ff401 | [
"CC-BY-4.0"
]
| 257 | 2017-12-18T14:09:32.000Z | 2022-03-28T17:58:19.000Z | tests/validate_schema_guide.py | Seanpm2001-DIGITAL-Command-Language/citation-file-format | 52647a247e9b1a5b04154934f39615b5ee8c4d65 | [
"CC-BY-4.0"
]
| 307 | 2017-10-16T12:17:40.000Z | 2022-03-18T11:18:49.000Z | tests/validate_schema_guide.py | Seanpm2001-DIGITAL-Command-Language/citation-file-format | 52647a247e9b1a5b04154934f39615b5ee8c4d65 | [
"CC-BY-4.0"
]
| 344 | 2018-09-19T03:00:26.000Z | 2022-03-31T01:39:11.000Z | import pytest
import os
import json
import jsonschema
from ruamel.yaml import YAML
def test():
def extract_snippets():
start = 0
end = len(markdown)
while start < end:
snippet_start = markdown.find("```yaml\n", start, end)
if snippet_start == -1:
break
snippet_end = markdown.find("```\n", snippet_start + 8, end)
text = markdown[snippet_start:snippet_end + 4]
indent_size = 0
while text[8:][indent_size] == " ":
indent_size += 1
unindented = "\n"
for line in text[8:-4].split("\n"):
unindented += line[indent_size:]
unindented += "\n"
snippets.append(dict(start=snippet_start, end=snippet_end + 4, text=unindented))
start = snippet_end + 4
return snippets
with open("schema-guide.md", "r") as f:
markdown = f.read()
snippets = list()
snippets = extract_snippets()
yaml = YAML(typ='safe')
yaml.constructor.yaml_constructors[u'tag:yaml.org,2002:timestamp'] = yaml.constructor.yaml_constructors[u'tag:yaml.org,2002:str']
schema_path = os.path.join(os.path.dirname(__file__), "..", "schema.json")
with open(schema_path, "r") as sf:
schema_data = json.loads(sf.read())
for i_snippet, snippet in enumerate(snippets):
if "# incorrect" in snippet["text"]:
continue
instance = yaml.load(snippet["text"])
passes = False
while not passes:
try:
jsonschema.validate(instance=instance, schema=schema_data, format_checker=jsonschema.FormatChecker())
passes = True
print("snippet {0}/{1} (chars {2}-{3}): OK".format(i_snippet + 1, len(snippets), snippet["start"], snippet["end"]))
except jsonschema.ValidationError as e:
path = "" if len(e.relative_path) == 0 else "/".join([str(p) for p in e.relative_path]) + "/"
if path == "":
if e.message.startswith("'authors' is a required property"):
instance["authors"] = []
elif e.message.startswith("'cff-version' is a required property"):
instance["cff-version"] = "1.2.0"
elif e.message.startswith("'message' is a required property"):
instance["message"] = "testmessage"
elif e.message.startswith("'title' is a required property"):
instance["title"] = "testtitle"
else:
raise Exception("undefined behavior: " + e.message)
elif path.startswith("authors"):
if e.message.startswith("[] is too short"):
instance["authors"].append({"name": "testname"})
else:
raise Exception("undefined behavior: " + e.message)
elif path.startswith("references"):
index = int(path.split("/")[1])
if e.message.startswith("'authors' is a required property"):
instance["references"][index]["authors"] = []
elif e.message.startswith("'title' is a required property"):
instance["references"][index]["title"] = "testtitle"
elif e.message.startswith("'type' is a required property"):
instance["references"][index]["type"] = "generic"
elif e.message.startswith("[] is too short"):
instance["references"][index]["authors"].append({"name": "testname"})
elif path.startswith("references/{0}/conference".format(index)):
if e.message.startswith("'name' is a required property"):
instance["references"][index]["conference"]["name"] = "testname"
else:
raise Exception("undefined behavior: " + e.message)
elif path.startswith("preferred-citation"):
if e.message.startswith("'authors' is a required property"):
instance["preferred-citation"]["authors"] = []
elif e.message.startswith("'title' is a required property"):
instance["preferred-citation"]["title"] = "testtitle"
elif e.message.startswith("'type' is a required property"):
instance["preferred-citation"]["type"] = "generic"
elif e.message.startswith("[] is too short"):
instance["preferred-citation"]["authors"].append({"name": "testname"})
else:
raise Exception("undefined behavior: " + e.message)
else:
print("Found a problem with snippet at char position {0}-{1}:\n {2}\n{3}".format(snippet["start"], snippet["end"], snippet["text"], e.message))
raise e
| 57.28866 | 171 | 0.479935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,271 | 0.228721 |
279ec732b8ac9028c087cef84952b665f1b41600 | 188 | py | Python | vkrpg/scripts/hello.py | Augmeneco/VKRPG | b071a490ae45a574a028af1eb831fff96782c06c | [
"Apache-2.0"
]
| null | null | null | vkrpg/scripts/hello.py | Augmeneco/VKRPG | b071a490ae45a574a028af1eb831fff96782c06c | [
"Apache-2.0"
]
| null | null | null | vkrpg/scripts/hello.py | Augmeneco/VKRPG | b071a490ae45a574a028af1eb831fff96782c06c | [
"Apache-2.0"
]
| null | null | null | import vkrpg
class MainContext(vkrpg.contexts.BaseContext):
def on_message(self, msg):
vkrpg.chat.send(msg['peer_id'], text='Привет, VKID#{}!'.format(msg['from_id']))
| 31.333333 | 87 | 0.659574 | 171 | 0.881443 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.216495 |
27a5750fd3834a5dd24fb63cbde3fd11a0fdfdd0 | 4,613 | py | Python | flaskprediction/routes.py | killswitchh/flask-prediction-app | a8bdff96fa2dc05544991a705970d1550ac9a034 | [
"MIT"
]
| null | null | null | flaskprediction/routes.py | killswitchh/flask-prediction-app | a8bdff96fa2dc05544991a705970d1550ac9a034 | [
"MIT"
]
| 1 | 2020-08-29T18:39:05.000Z | 2020-08-30T09:43:47.000Z | flaskprediction/routes.py | killswitchh/flask-prediction-app | a8bdff96fa2dc05544991a705970d1550ac9a034 | [
"MIT"
]
| null | null | null | import secrets
from flask import Flask , render_template , url_for , send_from_directory
from flaskprediction import app
from flaskprediction.utils.predict import Predictor
from flaskprediction.forms import CarDetailsForm , TitanicDetailsForm , BostonDetailsForm , HeightDetailsForm, CatImageForm
from PIL import Image
import os
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html')
@app.route("/classifier", methods=['GET' , 'POST'])
def classifier():
return render_template('classification.html')
@app.route("/regressor", methods=['GET' , 'POST'])
def regressor():
return render_template('regression.html')
@app.route("/classifier/titanic", methods=['GET' , 'POST'])
def titanic():
message = ""
form = TitanicDetailsForm()
if form.validate_on_submit():
parameter_list = [form.p_id.data , form.p_class.data, form.sex.data ,form.age.data,form.sibsp.data,form.parch.data,form.fare.data,form.embarked.data]
predictor = Predictor()
print(parameter_list)
answer = predictor.calculate_probability_titanic(parameter_list)
message = ""
return render_template('titanic.html' , title='Titanic Classifier' , form = form , message= message,answer = answer)
else:
message = "Enter Passenger Details"
return render_template('titanic.html' , title='Titanic Classifier' , form = form , message= message)
@app.route("/classifier/car" , methods=['GET' , 'POST'])
def car():
message = ""
form = CarDetailsForm()
if form.validate_on_submit():
parameter_list = list(map(int,[form.price.data , form.maintenance.data,form.no_of_doors.data, form.capacity.data ,form.size_of_luggage_boot.data,form.safety.data]))
predictor = Predictor()
answer = predictor.calculate_probability_car(parameter_list)
message = ""
return render_template('car.html' , title='Car Classifier' , form = form , message= message,answer = answer)
else:
message = "Select All Values"
return render_template('car.html' , title='Car Classifier' , form = form , message= message)
@app.route("/regressor/boston" , methods=['GET' , 'POST'])
def boston():
message = ""
form = BostonDetailsForm()
if form.validate_on_submit():
parameter_list = [form.crim.data , form.zn.data, form.chas.data ,form.nox.data,form.rm.data,form.age.data,form.dis.data,form.ptratio.data , form.black.data , form.lstat.data]
predictor = Predictor()
answer = predictor.calculate_price_boston(parameter_list)
message = ""
return render_template('boston.html' , title='Boston Regressor' , form = form , message= message,answer = answer)
else:
message = "Select All Values"
return render_template('boston.html' , title='boston Regressor' , form = form , message= message)
@app.route("/regressor/height" , methods=['GET' , 'POST'])
def height():
message = ""
form = HeightDetailsForm()
if form.validate_on_submit():
parameter_list = [form.sex.data , form.height.data]
predictor = Predictor()
answer = predictor.calculate_weight(parameter_list)
message = ""
return render_template('height.html' , title='Weight Prediction' , form = form , message= message,answer = answer)
else:
message = "Select All Values"
return render_template('height.html' , title='Weight Prediction' , form = form , message= message)
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/pics', picture_fn)
output_size = (64, 64)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_path
@app.route("/classifier/cat" , methods=['GET' , 'POST'])
def cat():
message = ""
form = CatImageForm()
if form.validate_on_submit():
picture_file = form.cat_picture.data
image_file = save_picture(picture_file)
predictor = Predictor()
answer = predictor.find_cat(image_file)
message = ""
return render_template('cat.html' , title='Cat Prediction' , form = form , message= message,answer = answer)
else:
message = "Upload A Picture"
return render_template('cat.html' , title='Cat Prediction' , form = form , message= message) | 39.767241 | 182 | 0.681986 | 0 | 0 | 0 | 0 | 3,884 | 0.841968 | 0 | 0 | 746 | 0.161717 |
27a61a2b957091652c5a6b1dfcf40f524e7bd75a | 14,349 | py | Python | air/gather.py | krisbukovi/AutomatedIngestReport | 87cc65c9028382a0860069d86b69b8517d93f59c | [
"MIT"
]
| null | null | null | air/gather.py | krisbukovi/AutomatedIngestReport | 87cc65c9028382a0860069d86b69b8517d93f59c | [
"MIT"
]
| null | null | null | air/gather.py | krisbukovi/AutomatedIngestReport | 87cc65c9028382a0860069d86b69b8517d93f59c | [
"MIT"
]
| null | null | null |
import requests
import re
from time import sleep
from datetime import datetime
import shutil
import elasticsearch2
from elasticsearch_dsl import Search, Q
from collections import OrderedDict
from sqlalchemy import create_engine
from subprocess import Popen, PIPE, STDOUT
import shlex
import glob
import csv
# from apiclient.discovery import build
from utils import Filename, FileType, Date, conf, logger, sort
class Gather:
"""gather data from various sources (canonical list, solr, etc.)
ads files are placed in a while known directory with a name based on date and their contents"""
def __init__(self, date=Date.TODAY):
"""use passed date as prefix in filenames"""
self.date = date
self.values = {}
self.values['passed_tests'] = []
self.values['failed_tests'] = []
def all(self):
self.solr_admin()
jobid = self.solr_bibcodes_start()
self.canonical()
self.elasticsearch()
self.postgres()
self.classic()
self.solr_bibcodes_finish(jobid)
self.fulltext()
def canonical(self):
"""create local copy of canonical bibcodes"""
c = conf['CANONICAL_FILE']
air = Filename.get(self.date, FileType.CANONICAL)
logger.info('making local copy of canonical bibcodes file, from %s to %s', c, air)
shutil.copy(c, air)
sort(air)
def solr(self):
self.solr_admin()
self.solr_bibcodes()
def solr_admin(self):
"""obtain admin oriented data from solr instance """
url = conf.get('SOLR_URL', 'http://localhost:9983/solr/collection1/')
query = 'admin/mbeans?stats=true&cat=UPDATEHANDLER&wt=json'
rQuery = requests.get(url + query)
if rQuery.status_code != 200:
logger.error('failed to obtain stats on update handler, status code = %s', rQuery.status_code)
else:
j = rQuery.json()
self.values['solr_cumulative_adds'] = j['solr-mbeans'][1]['updateHandler']['stats']['cumulative_adds']
self.values['solr_cumulative_errors'] = j['solr-mbeans'][1]['updateHandler']['stats']['cumulative_errors']
self.values['solr_errors'] = j['solr-mbeans'][1]['updateHandler']['stats']['errors']
def solr_bibcodes(self):
jobid = self.solr_bibcodes_start()
self.solr_bibcodes_finish(jobid)
def solr_bibcodes_start(self):
"""use solr batch api to get list of all bibcode it has
based on http://labs.adsabs.harvard.edu/trac/adsabs/wiki/SearchEngineBatch#Example4:Dumpdocumetsbyquery"""
url = conf.get('SOLR_URL', 'http://localhost:9983/solr/collection1/')
query = 'batch?command=dump-docs-by-query&q=*:*&fl=bibcode&wt=json'
# use for testing
# query = 'batch?command=dump-docs-by-query&q=bibcode:2003ASPC..295..361M&fl=bibcode&wt=json'
start = 'batch?command=start&wt=json'
logger.info('sending initial batch query to solr at %s', url)
rQuery = requests.get(url + query)
if rQuery.status_code != 200:
logger.error('initial batch solr query failed, status: %s, text: %s',
rQuery.status_code, rQuery.text)
return False
j = rQuery.json()
jobid = j['jobid']
logger.info('sending solr start batch command')
rStart = requests.get(url + start)
if rStart.status_code != 200:
logger.error('solr start batch processing failed, status %s, text: %s',
rStart.status_code, rStart.text)
return False
return jobid
def solr_bibcodes_finish(self, jobid):
"""get results from earlier submitted job"""
url = conf.get('SOLR_URL', 'http://localhost:9983/solr/collection1/')
status = 'batch?command=status&wt=json&jobid='
get_results = 'batch?command=get-results&wt=json&jobid='
# now we wait for solr to process batch query
finished = False
startTime = datetime.now()
while not finished:
rStatus = requests.get(url + status + jobid)
if rStatus.status_code != 200:
logger.error('batch status check failed, status: %s, text: %s',
rStatus.status_code, rStatus.text)
return False
j = rStatus.json()
if j['job-status'] == 'finished':
finished = True
else:
sleep(10)
if (datetime.now() - startTime).total_seconds() > 3600 * 2:
logger.error('solr batch process taking too long, seconds: %s;',
(datetime.now() - startTime).total_seconds())
return False
logger.info('solr bacth completed in %s seconds, now fetching bibcodes',
(datetime.now() - startTime).total_seconds())
rResults = requests.get(url + get_results + jobid)
if rResults.status_code != 200:
logger.error('failed to obtain bibcodes from solr batch query, status: %s, text: %s,',
rResults.status_code, rResults.text)
return False
# finally save bibcodes to file
bibs = rResults.text # all 12 million bibcodes are in this one text field
# convert to json-ish text to simple string, response includes newlines between bibcodes
bibs = re.sub(r'{"bibcode":"|,|"}', '', bibs)
filename = Filename.get(self.date, FileType.SOLR)
with open(filename, 'w') as f:
f.write(bibs)
sort(filename)
return True
def elasticsearch(self):
"""obtain error counts from elasticsearch """
u = conf['ELASTICSEARCH_URL']
es = elasticsearch2.Elasticsearch(u)
# first get total errors for last 24 hours
s = Search(using=es, index='_all') \
.query('match', **{'@message': 'error'}) \
.filter('range', **{'@timestamp': {'gte': 'now-24h', 'lt': 'now'}}) \
.count()
errors = OrderedDict() # using ordered dict to control order in report
errors['total'] = s
# now get errors individually for each pipeline
pipelines = ('backoffice-master_pipeline',
'backoffice-import_pipeline',
'backoffice-data_pipeline',
'backoffice-fulltext_pipeline',
'backoffice-orcid_pipeline',
'backoffice-citation_capture_pipeline')
for pipeline in pipelines:
s = Search(using=es, index='_all') \
.filter('range', **{'@timestamp': {'gte': 'now-24h', 'lt': 'now'}}) \
.query('match', **{'@message': 'error'}) \
.filter('match', **{'_type': pipeline}) \
.count()
self.values[pipeline] = s
self.values['backoffice-fulltext_pipeline'] = '123'
# next, check on specific errors that should have been fixed
# message must be in double quotes to force exact phrase match
tests = (('backoffice-master_pipeline', '"too many records to add to db"'),
('backoffice-fulltext_pipeline', '"is linked to a non-existent file"'),
('backoffice-nonbib_pipeline', '"Unbalanced Parentheses"'))
passed_tests = []
failed_tests = []
for pipeline, message in tests:
count = Search(using=es, index='_all') \
.filter('range', **{'@timestamp': {'gte': 'now-24h', 'lt': 'now'}}) \
.query('query_string', query=message) \
.filter('match', **{'_type': pipeline}) \
.count()
if count == 0:
passed_tests.append('{}, message {}\n'.format(pipeline, message))
else:
failed_tests.append('Unexpected error in {}: {} occured {} times' \
.format(pipeline, message, count))
if len(failed_tests):
errors['failed_tests'] = failed_tests
if len(passed_tests):
errors['passed_tests'] = passed_tests
print errors
self.values['failed_tests'].extend(failed_tests)
self.values['passed_tests'].extend(passed_tests)
def classic(self):
"""are there errors from the classic pipeline"""
files = ('/proj/ads/abstracts/sources/ArXiv/log/update.log',
'/proj/ads/abstracts/sources/ArXiv/log/usage.log')
for f in files:
self.classic_file_check(f)
def classic_file_check(self, f):
x = Popen(['grep', '-i', 'error', f], stdout=PIPE, stderr=STDOUT)
resp = x.communicate()[0]
if x.returncode == 1:
# no errors found in log files
msg = 'passed arxiv check: file {}'.format(f)
print msg
self.values['passed_tests'].extend(msg)
else:
# return code = 0 if grep matched
# return code = 2 if grep encounted an error
msg = 'failed arxiv check: file {}, error {}'.format(f, resp)
msg = 'failed arxiv check: file {}, error = \n{}'.format(f, resp)
print msg
self.values['failed_tests'].extend(msg)
def postgres(self):
# consider building on ADSPipelineUtils
engine = create_engine(conf['SQLALCHEMY_URL_NONBIB'], echo=False)
connection = engine.connect()
self.values['nonbib_ned_row_count'] = self.exec_sql(connection, "select count(*) from nonbib.ned;")
print 'from nonbib database, ned table has {} rows'.format(self.values['nonbib_ned_row_count'])
connection.close()
engine = create_engine(conf['SQLALCHEMY_URL_MASTER'], echo=False)
connection = engine.connect()
self.values['metrics_updated_count'] = self.exec_sql(connection,
"select count(*) from records where metrics_updated>now() - interval ' 1 day';")
self.values['metrics_null_count'] = self.exec_sql(connection,
"select count(*) from records where metrics is null;")
self.values['master_total_changed'] = self.exec_sql(connection,
"select count(*) from records where processed >= NOW() - '1 day'::INTERVAL;")
self.values['master_solr_changed'] = self.exec_sql(connection,
"select count(*) from records where solr_processed >= NOW() - '1 day'::INTERVAL;")
self.values['master_bib_changed'] = self.exec_sql(connection,
"select count(*) from records where bib_data_updated >= NOW() - '1 day'::INTERVAL;")
self.values['master_fulltext_changed'] = self.exec_sql(connection,
"select count(*) from records where fulltext_updated >= NOW() - '1 day'::INTERVAL;")
self.values['master_orcid_changed'] = self.exec_sql(connection,
"select count(*) from records where orcid_claims_updated >= NOW() - '1 day'::INTERVAL;")
self.values['master_nonbib_changed'] = self.exec_sql(connection,
"select count(*) from records where nonbib_data_updated >= NOW() - '1 day'::INTERVAL;")
connection.close()
print 'from metrics database, null count = {}, 1 day updated count = {}'.format(self.values['metrics_null_count'], self.values['metrics_updated_count'])
def exec_sql(self, connection, query):
result = connection.execute(query)
count = result.first()[0]
return str(count)
def fulltext(self):
"""Get errors from todays fulltext logs and generate a list for each
type of error of corresponding bibcodes and source directories. These
lists are written to files that are further processed in compute.py"""
# types of errors with corresponding file names
errors = conf['FULLTEXT_ERRORS']
# get todays date
now = datetime.strftime(datetime.now(), "%Y-%m-%d")
# loop through types of errors messages
for err_msg in errors.keys():
bibs = []
dirs = []
# location of bibcode and directory in message field
"""example log:
{"asctime": "2019-08-26T11:38:34.201Z", "msecs": 201.6739845275879,
"levelname": "ERROR", "process": 13411, "threadName": "MainThread",
"filename": "checker.py", "lineno": 238, "message": "Bibcode '2019arXiv190105463B'
is linked to a non-existent file '/some/directory/filename.xml'",
"timestamp": "2019-08-26T11:38:34.201Z", "hostname": "adsvm05"}"""
loc_bib = 1
loc_dir = 3
if (err_msg == "No such file or directory"):
loc_bib = 3
loc_dir = 11
elif (err_msg == "format not currently supported for extraction"):
loc_bib = 7
loc_dir = 23
# loop through files
for name in glob.glob(errors[err_msg]):
command = "awk -F\: '/" + err_msg + "/ && /" + now + "/ && /ERROR/ {print $0}' " + name
args = shlex.split(command)
x = Popen(args, stdout=PIPE, stderr=STDOUT)
# get bibcodes/directories from todays errors
resp = x.communicate()[0].split("\n")
for r in resp:
if r:
r = r.split("'")
bibs.append(r[loc_bib])
dirs.append(r[loc_dir])
# create filename based on error message and date
fname = Filename.get(self.date, FileType.FULLTEXT, adjective=None, msg="_" + ("_".join(err_msg.split())).replace('-', '_') + "_")
# write bibcodes and directories for each error type to file
with open(fname, 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerows(zip(bibs, dirs))
sort(fname)
| 45.122642 | 160 | 0.566451 | 13,933 | 0.971008 | 0 | 0 | 0 | 0 | 0 | 0 | 5,868 | 0.408948 |
27a686fae11eeb59ce17fe8f4cf6412be0900891 | 8,180 | py | Python | python_aternos/atconf.py | DarkCat09/python-aternos | a75d729e938a181449f304e849762dd9bb0e51f3 | [
"Apache-2.0"
]
| 11 | 2021-10-01T13:04:44.000Z | 2022-03-31T19:19:48.000Z | python_aternos/atconf.py | DarkCat09/python-aternos | a75d729e938a181449f304e849762dd9bb0e51f3 | [
"Apache-2.0"
]
| 7 | 2021-10-01T14:00:20.000Z | 2022-03-21T12:29:24.000Z | python_aternos/atconf.py | DarkCat09/python-aternos | a75d729e938a181449f304e849762dd9bb0e51f3 | [
"Apache-2.0"
]
| 4 | 2022-01-07T13:47:39.000Z | 2022-02-22T21:51:28.000Z | import enum
import re
import lxml.html
from typing import Any, Dict, List, Union, Optional
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .atserver import AternosServer
class ServerOpts(enum.Enum):
"""server.options file"""
players = 'max-players'
gm = 'gamemode'
difficulty = 'difficulty'
whl = 'white-list'
online = 'online-mode'
pvp = 'pvp'
cmdblock = 'enable-command-block'
flight = 'allow-flight'
animals = 'spawn-animals'
monsters = 'spawn-monsters'
villagers = 'spawn-npcs'
nether = 'allow-nether'
forcegm = 'force-gamemode'
spawnlock = 'spawn-protection'
cmds = 'allow-cheats'
packreq = 'require-resource-pack'
pack = 'resource-pack'
DAT_PREFIX = 'Data:'
DAT_GR_PREFIX = 'Data:GameRules:'
class WorldOpts(enum.Enum):
"""level.dat file"""
seed12 = 'randomseed'
seed = 'seed'
hardcore = 'hardcore'
difficulty = 'Difficulty'
class WorldRules(enum.Enum):
"""/gamerule list"""
advs = 'announceAdvancements'
univanger = 'universalAnger'
cmdout = 'commandBlockOutput'
elytra = 'disableElytraMovementCheck'
raids = 'disableRaids'
daynight = 'doDaylightCycle'
entdrop = 'doEntityDrops'
fire = 'doFireTick'
phantoms = 'doInsomnia'
immrespawn = 'doImmediateRespawn'
limitcraft = 'doLimitedCrafting'
mobloot = 'doMobLoot'
mobs = 'doMobSpawning'
patrols = 'doPatrolSpawning'
blockdrop = 'doTileDrops'
traders = 'doTraderSpawning'
weather = 'doWeatherCycle'
drowndmg = 'drowningDamage'
falldmg = 'fallDamage'
firedmg = 'fireDamage'
snowdmg = 'freezeDamage'
forgive = 'forgiveDeadPlayers'
keepinv = 'keepInventory'
deathmsg = 'showDeathMessages'
admincmdlog = 'logAdminCommands'
cmdlen = 'maxCommandChainLength'
entcram = 'maxEntityCramming'
mobgrief = 'mobGriefing'
regen = 'naturalRegeneration'
sleeppct = 'playersSleepingPercentage'
rndtick = 'randomTickSpeed'
spawnradius = 'spawnRadius'
reducedf3 = 'reducedDebugInfo'
spectchunkgen = 'spectatorsGenerateChunks'
cmdfb = 'sendCommandFeedback'
class Gamemode(enum.IntEnum):
"""/gamemode numeric list"""
survival = 0
creative = 1
adventure = 2
spectator = 3
class Difficulty(enum.IntEnum):
"""/difficulty numeric list"""
peaceful = 0
easy = 1
normal = 2
hard = 3
# checking timezone format
tzcheck = re.compile(r'(^[A-Z]\w+\/[A-Z]\w+$)|^UTC$')
# options types converting
convert = {
'config-option-number': int,
'config-option-select': int,
'config-option-toggle': bool
}
class AternosConfig:
"""Class for editing server settings
:param atserv: :class:`python_aternos.atserver.AternosServer` object
:type atserv: python_aternos.atserver.AternosServer
"""
def __init__(self, atserv:'AternosServer') -> None:
self.atserv = atserv
def get_timezone(self) -> str:
"""Parses timezone from options page
:return: Area/Location
:rtype: str
"""
optreq = self.atserv.atserver_request(
'https://aternos.org/options', 'GET'
)
opttree = lxml.html.fromstring(optreq)
tzopt = opttree.xpath('//div[@class="options-other-input timezone-switch"]')[0]
tztext = tzopt.xpath('.//div[@class="option current"]')[0].text
return tztext.strip()
def set_timezone(self, value:str) -> None:
"""Sets new timezone
:param value: New timezone
:type value: str
:raises ValueError: If given string
doesn't match Area/Location format
"""
matches_tz = tzcheck.search(value)
if not matches_tz:
raise ValueError('Timezone must match zoneinfo format: Area/Location')
self.atserv.atserver_request(
'https://aternos.org/panel/ajax/timezone.php',
'POST', data={'timezone': value},
sendtoken=True
)
def get_java(self) -> int:
"""Parses Java version from options page
:return: Java image version
:rtype: int
"""
optreq = self.atserv.atserver_request(
'https://aternos.org/options', 'GET'
)
opttree = lxml.html.fromstring(optreq)
imgopt = opttree.xpath('//div[@class="options-other-input image-switch"]')[0]
imgver = imgopt.xpath('.//div[@class="option current"]/@data-value')[0]
jdkver = str(imgver or '').removeprefix('openjdk:')
return int(jdkver)
def set_java(self, value:int) -> None:
"""Sets new Java version
:param value: New Java image version
:type value: int
"""
self.atserv.atserver_request(
'https://aternos.org/panel/ajax/image.php',
'POST', data={'image': f'openjdk:{value}'},
sendtoken=True
)
#
# server.properties
#
def set_server_prop(self, option:str, value:Any) -> None:
"""Sets server.properties option
:param option: Option name
:type option: str
:param value: New value
:type value: Any
"""
self.__set_prop(
'/server.properties',
option, value
)
def get_server_props(self, proptyping:bool=True) -> Dict[str,Any]:
"""Parses all server.properties from options page
:param proptyping: If the returned dict should contain value
that matches property type (e.g. max-players will be int)
instead of string, defaults to True
:type proptyping: bool, optional
:return: Server.properties dict
:rtype: Dict[str,Any]
"""
return self.__get_all_props('https://aternos.org/options', proptyping)
def set_server_props(self, props:Dict[str,Any]) -> None:
"""Updates server.properties options with the given dict
:param props: Dict with properties `{key:value}`
:type props: Dict[str,Any]
"""
for key in props:
self.set_server_prop(key, props[key])
#
# level.dat
#
def set_world_prop(
self, option:Union[WorldOpts,WorldRules],
value:Any, gamerule:bool=False,
world:str='world') -> None:
"""Sets level.dat option for specified world
:param option: Option name
:type option: Union[WorldOpts,WorldRules]
:param value: New value
:type value: Any
:param gamerule: If the option
is a gamerule, defaults to False
:type gamerule: bool, optional
:param world: Name of the world which
level.dat must be edited, defaults to 'world'
:type world: str, optional
"""
prefix = DAT_PREFIX
if gamerule:
prefix = DAT_GR_PREFIX
self.__set_prop(
f'/{world}/level.dat',
f'{prefix}{option}',
value
)
def get_world_props(
self, world:str='world',
proptyping:bool=True) -> Dict[str,Any]:
"""Parses level.dat from specified world's options page
:param world: Name of the world, defaults to 'world'
:type world: str, optional
:param proptyping: If the returned dict should contain the value
that matches property type (e.g. randomTickSpeed will be bool)
instead of string, defaults to True
:type proptyping: bool, optional
:return: Level.dat dict
:rtype: Dict[str,Any]
"""
self.__get_all_props(
f'https://aternos.org/files/{world}/level.dat',
proptyping, [DAT_PREFIX, DAT_GR_PREFIX]
)
def set_world_props(self, props:Dict[str,Any]) -> None:
for key in props:
self.set_world_prop(key, props[key])
#
# helpers
#
def __set_prop(self, file:str, option:str, value:Any) -> None:
self.atserv.atserver_request(
'https://aternos.org/panel/ajax/config.php',
'POST', data={
'file': file,
'option': option,
'value': value
}, sendtoken=True
)
def __get_all_props(
self, url:str, proptyping:bool=True,
prefixes:Optional[List[str]]=None) -> Dict[str,Any]:
optreq = self.atserv.atserver_request(url, 'GET')
opttree = lxml.html.fromstring(optreq.content)
configs = opttree.xpath('//div[@class="config-options"]')
for i, conf in enumerate(configs):
opts = conf.xpath('/div[contains(@class,"config-option ")]')
result = {}
for opt in opts:
key = opt.xpath('.//span[@class="config-option-output-key"]')[0].text
value = opt.xpath('.//span[@class="config-option-output-value"]')[0].text
if prefixes != None:
key = f'{prefixes[i]}{key}'
opttype = opt.xpath('/@class').split(' ')[1]
if proptyping and opttype in convert:
value = convert[opttype](value)
result[key] = value
return result
| 24.638554 | 82 | 0.667604 | 7,687 | 0.939731 | 0 | 0 | 0 | 0 | 0 | 0 | 4,142 | 0.506357 |
27a6c1cdc477a10a4c9b691137650bb8e9980229 | 11,859 | py | Python | examples/cadre_dymos.py | johnjasa/CADRE | a4ffd61582b8474953fc309aa540838a14f29dcf | [
"Apache-2.0"
]
| null | null | null | examples/cadre_dymos.py | johnjasa/CADRE | a4ffd61582b8474953fc309aa540838a14f29dcf | [
"Apache-2.0"
]
| null | null | null | examples/cadre_dymos.py | johnjasa/CADRE | a4ffd61582b8474953fc309aa540838a14f29dcf | [
"Apache-2.0"
]
| null | null | null | from __future__ import print_function
import unittest
import numpy as np
from openmdao.api import Problem, Group, pyOptSparseDriver, DirectSolver, SqliteRecorder
from dymos import Phase
from dymos.utils.indexing import get_src_indices_by_row
from dymos.phases.components import ControlInterpComp
from CADRE.odes_dymos.cadre_orbit_ode import CadreOrbitODE
from CADRE.attitude_dymos.angular_velocity_comp import AngularVelocityComp
from CADRE.odes_dymos.cadre_systems_ode import CadreSystemsODE
GM = 398600.44
rmag = 7000.0
period = 2 * np.pi * np.sqrt(rmag ** 3 / GM)
vcirc = np.sqrt(GM / rmag)
duration = period
duration = 6 * 3600.0
p = Problem(model=Group())
p.driver = pyOptSparseDriver()
p.driver.options['optimizer'] = 'SNOPT'
p.driver.options['dynamic_simul_derivs'] = True
p.driver.opt_settings['Major iterations limit'] = 1000
p.driver.opt_settings['Major feasibility tolerance'] = 1.0E-4
p.driver.opt_settings['Major optimality tolerance'] = 1.0E-4
p.driver.opt_settings['Major step limit'] = 0.1
p.driver.opt_settings['iSumm'] = 6
p.driver.recording_options['includes'] = ['*']
p.driver.recording_options['record_objectives'] = True
p.driver.recording_options['record_constraints'] = True
p.driver.recording_options['record_desvars'] = True
recorder = SqliteRecorder("cases.sql")
p.driver.add_recorder(recorder)
NUM_SEG = 30
TRANSCRIPTION_ORDER = 3
orbit_phase = Phase('radau-ps',
ode_class=CadreOrbitODE,
num_segments=NUM_SEG,
transcription_order=TRANSCRIPTION_ORDER,
compressed=False)
p.model.add_subsystem('orbit_phase', orbit_phase)
orbit_phase.set_time_options(fix_initial=True, fix_duration=True, duration_ref=duration)
orbit_phase.set_state_options('r_e2b_I', defect_scaler=1000, fix_initial=True, units='km')
orbit_phase.set_state_options('v_e2b_I', defect_scaler=1000, fix_initial=True, units='km/s')
# orbit_phase.set_state_options('SOC', defect_scaler=1, fix_initial=True, units=None)
# orbit_phase.add_design_parameter('P_bat', opt=False, units='W')
orbit_phase.add_control('Gamma', opt=True, lower=-90, upper=90, units='deg', ref0=-90, ref=90,
continuity=True, rate_continuity=True)
# Add a control interp comp to interpolate the rates of O_BI from the orbit phase.
faux_control_options = {'O_BI': {'units': None, 'shape': (3, 3)}}
p.model.add_subsystem('obi_rate_interp_comp',
ControlInterpComp(control_options=faux_control_options,
time_units='s',
grid_data=orbit_phase.grid_data),
promotes_outputs=[('control_rates:O_BI_rate', 'Odot_BI')])
control_input_nodes_idxs = orbit_phase.grid_data.subset_node_indices['control_input']
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3, 3))
p.model.connect('orbit_phase.rhs_all.O_BI', 'obi_rate_interp_comp.controls:O_BI',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('orbit_phase.time.dt_dstau',
('obi_rate_interp_comp.dt_dstau', 'w_B_rate_interp_comp.dt_dstau'))
# Use O_BI and Odot_BI to compute the angular velocity vector
p.model.add_subsystem('angular_velocity_comp',
AngularVelocityComp(num_nodes=orbit_phase.grid_data.num_nodes))
p.model.connect('orbit_phase.rhs_all.O_BI', 'angular_velocity_comp.O_BI')
p.model.connect('Odot_BI', 'angular_velocity_comp.Odot_BI')
# Add another interpolation comp to compute the rate of w_B
faux_control_options = {'w_B': {'units': '1/s', 'shape': (3,)}}
p.model.add_subsystem('w_B_rate_interp_comp',
ControlInterpComp(control_options=faux_control_options,
time_units='s',
grid_data=orbit_phase.grid_data),
promotes_outputs=[('control_rates:w_B_rate', 'wdot_B')])
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3,))
p.model.connect('angular_velocity_comp.w_B', 'w_B_rate_interp_comp.controls:w_B',
src_indices=src_idxs, flat_src_indices=True)
# Now add the systems phase
systems_phase = Phase('radau-ps',
ode_class=CadreSystemsODE,
num_segments=NUM_SEG,
transcription_order=TRANSCRIPTION_ORDER,
compressed=False)
p.model.add_subsystem('systems_phase', systems_phase)
systems_phase.set_time_options(fix_initial=True, fix_duration=True, duration_ref=duration)
systems_phase.set_state_options('SOC', defect_ref=10, lower=0.2, fix_initial=True, units=None)
systems_phase.set_state_options('w_RW', defect_ref=10000, fix_initial=True, units='1/s')
systems_phase.set_state_options('data', defect_ref=10, fix_initial=True, units='Gibyte')
systems_phase.set_state_options('temperature', ref0=273, ref=373, defect_ref=1000,
fix_initial=True, units='degK')
systems_phase.add_design_parameter('LD', opt=False, units='d')
systems_phase.add_design_parameter('fin_angle', opt=True, lower=0., upper=np.pi / 2.)
systems_phase.add_design_parameter('antAngle', opt=True, lower=-np.pi / 4, upper=np.pi / 4)
systems_phase.add_design_parameter('cellInstd', opt=True, lower=0.0, upper=1.0, ref=1.0)
# Add r_e2b_I and O_BI as non-optimized controls, allowing them to be connected to external sources
systems_phase.add_control('r_e2b_I', opt=False, units='km')
systems_phase.add_control('O_BI', opt=False)
systems_phase.add_control('w_B', opt=False)
systems_phase.add_control('wdot_B', opt=False)
systems_phase.add_control('P_comm', opt=True, lower=0.0, upper=30.0, units='W')
systems_phase.add_control('Isetpt', opt=True, lower=1.0E-4, upper=0.4, units='A')
systems_phase.add_objective('data', loc='final', ref=-1.0)
# Connect r_e2b_I and O_BI values from all nodes in the orbit phase to the input values
# in the attitude phase.
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3,))
p.model.connect('orbit_phase.states:r_e2b_I', 'systems_phase.controls:r_e2b_I',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('angular_velocity_comp.w_B', 'systems_phase.controls:w_B',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('wdot_B', 'systems_phase.controls:wdot_B',
src_indices=src_idxs, flat_src_indices=True)
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3, 3))
p.model.connect('orbit_phase.rhs_all.O_BI', 'systems_phase.controls:O_BI',
src_indices=src_idxs, flat_src_indices=True)
p.model.options['assembled_jac_type'] = 'csc'
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.setup(check=True)
# from openmdao.api import view_model
# view_model(p.model)
# Initialize values in the orbit phase
p['orbit_phase.t_initial'] = 0.0
p['orbit_phase.t_duration'] = duration
# p['systems_phase.states:w_RW'][:, 0] = 0.0
# p['systems_phase.states:w_RW'][:, 1] = 0.0
# p['systems_phase.states:w_RW'][:, 2] = 0.0
# Default starting orbit
# [ 2.89078958e+03 5.69493134e+03 -2.55340189e+03 2.56640460e-01
# 3.00387409e+00 6.99018448e+00]
p['orbit_phase.states:r_e2b_I'][:, 0] = 2.89078958e+03
p['orbit_phase.states:r_e2b_I'][:, 1] = 5.69493134e+03
p['orbit_phase.states:r_e2b_I'][:, 2] = -2.55340189e+03
p['orbit_phase.states:v_e2b_I'][:, 0] = 2.56640460e-01
p['orbit_phase.states:v_e2b_I'][:, 1] = 3.00387409e+00
p['orbit_phase.states:v_e2b_I'][:, 2] = 6.99018448e+00
# Initialize values in the systems phase
p['systems_phase.t_initial'] = 0.0
p['systems_phase.t_duration'] = duration
# p['systems_phase.states:w_RW'][:, 0] = 0.0
# p['systems_phase.states:w_RW'][:, 1] = 0.0
# p['systems_phase.states:w_RW'][:, 2] = 0.0
p['systems_phase.states:SOC'] = systems_phase.interpolate(ys=[1, .5], nodes='state_input')
p['systems_phase.states:w_RW'] = 100.0
p['systems_phase.states:data'] = systems_phase.interpolate(ys=[0, 10], nodes='state_input')
p['systems_phase.states:temperature'] = 273.0
# p['systems_phase.states:v_e2b_I'][:, 0] = 0.0
# p['systems_phase.states:v_e2b_I'][:, 1] = vcirc
# p['systems_phase.states:v_e2b_I'][:, 2] = 0.0
p['systems_phase.controls:P_comm'] = 0.01
p['systems_phase.controls:Isetpt'] = 0.1
p['systems_phase.design_parameters:LD'] = 5233.5
p['systems_phase.design_parameters:fin_angle'] = np.radians(70.0)
p['systems_phase.design_parameters:cellInstd'] = 0.0
p.run_model()
# Simulate the orbit phase to get a (exact) guess to the orbit history solution.
exp_out = orbit_phase.simulate()
# import matplotlib.pyplot as plt
# from mpl_toolkits import mplot3d
#
# plt.figure()
# ax = plt.axes(projection='3d')
# # plt.plot(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], 'b-')
# ax.plot3D(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], exp_out.get_values('r_e2b_I')[:, 2], 'b-')
# plt.show()
p['orbit_phase.states:r_e2b_I'] = orbit_phase.interpolate(ys=exp_out.get_values('r_e2b_I'), xs=exp_out.get_values('time'), nodes='state_input')
p['orbit_phase.states:v_e2b_I'] = orbit_phase.interpolate(ys=exp_out.get_values('v_e2b_I'), xs=exp_out.get_values('time'), nodes='state_input')
p.run_driver()
r_e2b_I = p.model.orbit_phase.get_values('r_e2b_I')
v_e2b_I = p.model.orbit_phase.get_values('v_e2b_I')
rmag_e2b = p.model.orbit_phase.get_values('rmag_e2b_I')
# exp_out = systems_phase.simulate(times=500)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(orbit_phase.get_values('r_e2b_I')[:, 0], orbit_phase.get_values('r_e2b_I')[:, 1], 'ro')
plt.figure()
# plt.plot(exp_out.get_values('time')[:, 0], exp_out.get_values('data')[:, 1], 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('data'), 'ro')
plt.figure()
# plt.plot(exp_out.get_values('time')[:, 0], exp_out.get_values('data')[:, 1], 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_comm'), 'r-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_sol'), 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_RW'), 'g-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_bat'), 'k-')
plt.figure()
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('SOC'), 'r-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('dXdt:SOC'), 'r--')
plt.show()
# plt.figure()
# plt.plot(exp_out.get_values('time'), exp_out.get_values('SOC'), 'b-')
# plt.plot(phase.get_values('time'), phase.get_values('SOC'), 'ro')
# assert_rel_error(self, rmag_e2b, rmag * np.ones_like(rmag_e2b), tolerance=1.0E-9)
# delta_trua = 2 * np.pi * (duration / period)
# assert_rel_error(self, r_e2b_I[-1, :],
# rmag * np.array([np.cos(delta_trua), np.sin(delta_trua), 0]),
# tolerance=1.0E-9)
# assert_rel_error(self, v_e2b_I[-1, :],
# vcirc * np.array([-np.sin(delta_trua), np.cos(delta_trua), 0]),
# tolerance=1.0E-9)
# def test_partials(self):
# np.set_printoptions(linewidth=10000, edgeitems=1024)
# cpd = self.p.check_partials(compact_print=True, out_stream=None)
# assert_check_partials(cpd, atol=1.0E-4, rtol=1.0)
#
# def test_simulate(self):
# phase = self.p.model.orbit_phase
# exp_out = phase.simulate(times=500)
#
# import matplotlib.pyplot as plt
#
# plt.figure()
# plt.plot(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], 'b-')
# plt.plot(phase.get_values('r_e2b_I')[:, 0], phase.get_values('r_e2b_I')[:, 1], 'ro')
#
# # plt.figure()
# # plt.plot(exp_out.get_values('time'), exp_out.get_values('SOC'), 'b-')
# # plt.plot(phase.get_values('time'), phase.get_values('SOC'), 'ro')
#
# plt.show()
| 42.812274 | 143 | 0.70436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,328 | 0.449279 |
27a730a5c6d3019f232b6aef55d357908663ff70 | 959 | py | Python | deso/Media.py | AdityaChaudhary0005/DeSo.py | 5cb3c757fb21bad472da921c0148675c8957eb17 | [
"MIT"
]
| 11 | 2021-11-12T18:20:22.000Z | 2022-03-16T02:12:06.000Z | deso/Media.py | AdityaChaudhary0005/DeSo.py | 5cb3c757fb21bad472da921c0148675c8957eb17 | [
"MIT"
]
| 6 | 2021-11-25T04:30:44.000Z | 2021-12-15T12:33:24.000Z | deso/Media.py | AdityaChaudhary0005/DeSo.py | 5cb3c757fb21bad472da921c0148675c8957eb17 | [
"MIT"
]
| 8 | 2021-11-19T19:14:50.000Z | 2022-01-31T21:27:32.000Z | from deso.utils import getUserJWT
import requests
class Media:
def __init__(self, publicKey=None, seedHex=None, nodeURL="https://node.deso.org/api/v0/"):
self.SEED_HEX = seedHex
self.PUBLIC_KEY = publicKey
self.NODE_URL = nodeURL
def uploadImage(self, fileList):
#uploads image to images.deso.org
try:
if type(fileList) == type("str"):
fileList = [
('file', (fileList, open(
fileList, "rb"), 'image/png'))
]
jwt_token = getUserJWT(self.SEED_HEX)
# print(encoded_jwt)
endpointURL = self.NODE_URL + "upload-image"
payload = {'UserPublicKeyBase58Check': self.PUBLIC_KEY,
'JWT': jwt_token}
response = requests.post(endpointURL, data=payload, files=fileList)
return response
except Exception as e:
return e
| 31.966667 | 95 | 0.554745 | 906 | 0.944734 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.161627 |
27a8998af1db32b395a9af2dbb6c8a21bc35a70c | 169 | py | Python | workSpace/boot.py | khutson/macequilt | a4a090ddf296fcea763825fda4243bc84b4d5f0d | [
"MIT"
]
| null | null | null | workSpace/boot.py | khutson/macequilt | a4a090ddf296fcea763825fda4243bc84b4d5f0d | [
"MIT"
]
| null | null | null | workSpace/boot.py | khutson/macequilt | a4a090ddf296fcea763825fda4243bc84b4d5f0d | [
"MIT"
]
| null | null | null | # This file is executed on every boot (including wake-boot from deepsleep)
import esp
esp.osdebug(None)
import wifi
wifi.connect(repl=False)
import gc
gc.collect()
| 13 | 74 | 0.763314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.43787 |
27a8cc8eee02c003f65618c441f8c80b6ada0052 | 1,790 | py | Python | s3-scan-tar/tests/test_models.py | omBratteng/mottak | b7d2e1d063b31c2ad89c66e5414297612f91ebe9 | [
"Apache-2.0"
]
| 4 | 2021-03-05T15:39:24.000Z | 2021-09-15T06:11:45.000Z | s3-scan-tar/tests/test_models.py | omBratteng/mottak | b7d2e1d063b31c2ad89c66e5414297612f91ebe9 | [
"Apache-2.0"
]
| 631 | 2020-04-27T10:39:18.000Z | 2022-03-31T14:51:38.000Z | s3-scan-tar/tests/test_models.py | omBratteng/mottak | b7d2e1d063b31c2ad89c66e5414297612f91ebe9 | [
"Apache-2.0"
]
| 3 | 2020-02-20T15:48:03.000Z | 2021-12-16T22:50:40.000Z | import pytest
from app.models import AVScanResult
@pytest.fixture
def _scan_result() -> AVScanResult:
clean = 10
virus = 0
skipped = 0
return AVScanResult(clean, virus, skipped)
def test_avscanresult_init(_scan_result):
result = AVScanResult(10, 0, 0)
assert result == _scan_result
def test_status_has_correct_values():
scan_found_virus = AVScanResult(9, 1, 0)
scan_found_nothing = AVScanResult(10, 0, 0)
assert scan_found_virus.get_status() == "Ikke ok"
assert scan_found_nothing.get_status() == "ok"
def test_correct_message_when_no_virus_found(_scan_result):
expected_message = (
"Status etter virus scan: ok\n\n"
"Antall filer kontrollert: 10 av 10\n"
" - Filer uten virus: 10\n"
" - Filer med virus: 0\n"
" - Filer ikke kontrollert pga. filstørrelse: 0"
)
assert expected_message == _scan_result.generate_message()
# assert _scan_result.get_message() == expected_message
def test_correct_message_when_virus_found():
expected_message = (
"Status etter virus scan: Ikke ok\n\n"
"Antall filer kontrollert: 10 av 10\n"
" - Filer uten virus: 8\n"
" - Filer med virus: 2\n"
" - Filer ikke kontrollert pga. filstørrelse: 0"
)
actual = AVScanResult(8, 2, 0)
assert expected_message == actual.generate_message()
def test_correct_message_when_skipped_files():
expected_message = (
"Status etter virus scan: Ikke ok\n\n"
"Antall filer kontrollert: 10 av 15\n"
" - Filer uten virus: 8\n"
" - Filer med virus: 2\n"
" - Filer ikke kontrollert pga. filstørrelse: 5"
)
actual = AVScanResult(8, 2, 5)
assert expected_message == actual.generate_message()
| 29.833333 | 62 | 0.655307 | 0 | 0 | 0 | 0 | 143 | 0.079755 | 0 | 0 | 619 | 0.345231 |
27a97aed4e6639ade2261db847e3a6e16989a40c | 1,424 | py | Python | autoload/activate_this.py | BonaBeavis/vim-venom | a4ed892bd844de51c92e7b59dbc975db02c939b9 | [
"Vim"
]
| 24 | 2020-04-26T11:50:40.000Z | 2022-02-22T08:05:36.000Z | autoload/activate_this.py | BonaBeavis/vim-venom | a4ed892bd844de51c92e7b59dbc975db02c939b9 | [
"Vim"
]
| 5 | 2021-01-26T12:41:12.000Z | 2022-01-11T15:40:43.000Z | autoload/activate_this.py | BonaBeavis/vim-venom | a4ed892bd844de51c92e7b59dbc975db02c939b9 | [
"Vim"
]
| 4 | 2020-05-02T21:45:36.000Z | 2022-03-25T13:51:00.000Z | # -*- coding: utf-8 -*-
"""Activate virtualenv for current interpreter:
Source: https://github.com/pypa/virtualenv
Use exec(open(this_file).read(), {'__file__': this_file}).
"""
import os
import site
import sys
try:
abs_file = os.path.abspath(__file__)
except NameError:
raise AssertionError(
"You must use exec(open(this_file).read(), {'__file__': this_file}))")
# Prepend bin to PATH (this file is inside the bin directory)
bin_dir = os.path.dirname(abs_file)
os.environ["PATH"] = os.pathsep.join(
[bin_dir] + os.environ.get("PATH", "").split(os.pathsep))
# Virtual env is right above bin directory
base = os.path.dirname(bin_dir)
os.environ["VIRTUAL_ENV"] = base
# Concat site-packages library path
IS_WIN = sys.platform == "win32"
IS_PYPY = hasattr(sys, "pypy_version_info")
IS_JYTHON = sys.platform.startswith("java")
if IS_JYTHON or IS_WIN:
site_packages = os.path.join(base, "Lib", "site-packages")
elif IS_PYPY:
site_packages = os.path.join(base, "site-packages")
else:
python_lib = "python{}.{}".format(*sys.version_info)
site_packages = os.path.join(base, "lib", python_lib, "site-packages")
# Add the virtual environment libraries to the host python import mechanism
prev_length = len(sys.path)
site.addsitedir(site_packages)
sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]
sys.real_prefix = sys.prefix
sys.prefix = base
# vim: set ts=4 sw=4 tw=80 et :
| 30.297872 | 78 | 0.714185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 618 | 0.433989 |
27abc06bb50512111945d911b3687183e05cd80c | 2,731 | py | Python | tattrdb/models.py | gmjosack/tattrdb | 88d46eb049d05a1f0531531c49c2209c2bbbf562 | [
"MIT"
]
| 1 | 2018-11-24T02:33:15.000Z | 2018-11-24T02:33:15.000Z | tattrdb/models.py | gmjosack/tattrdb | 88d46eb049d05a1f0531531c49c2209c2bbbf562 | [
"MIT"
]
| null | null | null | tattrdb/models.py | gmjosack/tattrdb | 88d46eb049d05a1f0531531c49c2209c2bbbf562 | [
"MIT"
]
| null | null | null | from sqlalchemy import create_engine
from sqlalchemy import (
Table, Column, Integer, String, Text, Boolean,
ForeignKey, Enum, DateTime
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, sessionmaker
Session = sessionmaker()
Model = declarative_base()
def connect(uri):
engine = create_engine(uri)
Session.configure(bind=engine)
return Session()
def _sync(connection):
""" This will build the database for whatever connection you pass."""
Model.metadata.create_all(connection.bind)
host_tags = Table("host_tags", Model.metadata,
Column("host_id", Integer, ForeignKey("hosts.id"), primary_key=True),
Column("tag_id", Integer, ForeignKey("tags.id"), primary_key=True)
)
class Tag(Model):
__tablename__ = 'tags'
id = Column(Integer(), primary_key=True, nullable=False)
tagname = Column(String(length=255), unique=True)
def as_dict(self):
return {
"id": self.id,
"tagname": self.tagname,
"hosts": [host.hostname for host in self.hosts],
}
class HostAttributes(Model):
__tablename__ = "host_attributes"
host_id = Column(Integer, ForeignKey("hosts.id"), primary_key=True)
attribute_id = Column(Integer, ForeignKey("attributes.id"), primary_key=True)
value = Column(String(length=255), nullable=False)
attribute = relationship("Attribute", lazy="joined", backref="host_assocs")
class Attribute(Model):
__tablename__ = 'attributes'
id = Column(Integer(), primary_key=True, nullable=False)
attrname = Column(String(length=255), unique=True)
hosts = relationship("Host", secondary="host_attributes", lazy="joined", backref="real_attributes")
def as_dict(self):
values = {}
for host_assoc in self.host_assocs:
if host_assoc.value not in values:
values[host_assoc.value] = []
values[host_assoc.value].append(host_assoc.host.hostname)
return {
"id": self.id,
"attrname": self.attrname,
"values": values,
}
class Host(Model):
__tablename__ = 'hosts'
id = Column(Integer(), primary_key=True, nullable=False)
hostname = Column(String(length=255), unique=True)
tags = relationship(
"Tag", secondary=host_tags, lazy="joined", backref="hosts")
attributes = relationship("HostAttributes", lazy="joined", backref="host")
def as_dict(self):
return {
"id": self.id,
"hostname": self.hostname,
"tags": [tag.tagname for tag in self.tags],
"attributes": {attr.attribute.attrname: attr.value for attr in self.attributes}
}
| 28.154639 | 103 | 0.656902 | 1,949 | 0.713658 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.141706 |
27abe035638fda37c09ec1990dca44e2161d8667 | 30 | py | Python | onepassword/__init__.py | jemmyw/1pass | 8dbfa5e062ce08e26c5619dbdb2b27323e5b3dc9 | [
"MIT"
]
| 1 | 2016-11-14T22:16:48.000Z | 2016-11-14T22:16:48.000Z | onepassword/__init__.py | elliotchance/1pass | 4bd45a52476c410c6e5b51f90fd46cbdd436807f | [
"MIT"
]
| null | null | null | onepassword/__init__.py | elliotchance/1pass | 4bd45a52476c410c6e5b51f90fd46cbdd436807f | [
"MIT"
]
| null | null | null | from keychain import Keychain
| 15 | 29 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
27ae7ed160d61ff6977fb0ea0dc61ee80279d33b | 152,955 | py | Python | modules/cockatoo/_knitnetwork.py | fstwn/Cockatoo | 0c5f9c515053bfc31e62d20fddc4ae9bece09d88 | [
"MIT"
]
| 9 | 2020-09-26T03:41:21.000Z | 2021-11-29T06:52:35.000Z | modules/cockatoo/_knitnetwork.py | fstwn/Cockatoo | 0c5f9c515053bfc31e62d20fddc4ae9bece09d88 | [
"MIT"
]
| 9 | 2020-08-10T19:38:03.000Z | 2022-02-24T08:41:32.000Z | modules/cockatoo/_knitnetwork.py | fstwn/Cockatoo | 0c5f9c515053bfc31e62d20fddc4ae9bece09d88 | [
"MIT"
]
| 3 | 2020-12-26T08:43:56.000Z | 2021-10-17T19:37:52.000Z | # PYTHON STANDARD LIBRARY IMPORTS ---------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import OrderedDict
from math import radians
from math import pi
from operator import itemgetter
# DUNDER ----------------------------------------------------------------------
__all__ = [
"KnitNetwork"
]
# THIRD PARTY MODULE IMPORTS --------------------------------------------------
import networkx as nx
# LOCAL MODULE IMPORTS --------------------------------------------------------
from cockatoo._knitnetworkbase import KnitNetworkBase
from cockatoo._knitmappingnetwork import KnitMappingNetwork
from cockatoo._knitdinetwork import KnitDiNetwork
from cockatoo.environment import RHINOINSIDE
from cockatoo.exception import KnitNetworkError
from cockatoo.exception import KnitNetworkGeometryError
from cockatoo.exception import NoEndNodesError
from cockatoo.exception import NoWeftEdgesError
from cockatoo.exception import MappingNetworkError
from cockatoo.utilities import pairwise
# RHINO IMPORTS ---------------------------------------------------------------
if RHINOINSIDE:
import rhinoinside
rhinoinside.load()
from Rhino.Geometry import Brep as RhinoBrep
from Rhino.Geometry import Curve as RhinoCurve
from Rhino.Geometry import Line as RhinoLine
from Rhino.Geometry import Interval as RhinoInterval
from Rhino.Geometry import Mesh as RhinoMesh
from Rhino.Geometry import NurbsSurface as RhinoNurbsSurface
from Rhino.Geometry import Point3d as RhinoPoint3d
from Rhino.Geometry import Polyline as RhinoPolyline
from Rhino.Geometry import Surface as RhinoSurface
from Rhino.Geometry import Vector3d as RhinoVector3d
else:
from Rhino.Geometry import Brep as RhinoBrep
from Rhino.Geometry import Curve as RhinoCurve
from Rhino.Geometry import Line as RhinoLine
from Rhino.Geometry import Interval as RhinoInterval
from Rhino.Geometry import Mesh as RhinoMesh
from Rhino.Geometry import NurbsSurface as RhinoNurbsSurface
from Rhino.Geometry import Point3d as RhinoPoint3d
from Rhino.Geometry import Polyline as RhinoPolyline
from Rhino.Geometry import Surface as RhinoSurface
from Rhino.Geometry import Vector3d as RhinoVector3d
# CLASS DECLARATION -----------------------------------------------------------
class KnitNetwork(KnitNetworkBase):
"""
Datastructure for representing a network (graph) consisting of nodes with
special attributes aswell as 'warp' edges, 'weft' edges and contour edges
which are neither 'warp' nor 'weft'.
Used for the automatic generation of knitting patterns based on mesh or
NURBS surface geometry.
Inherits from :class:`KnitNetworkBase`.
Notes
-----
The implemented algorithms are strongly based on the paper
*Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_.
Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
The implementation was further influenced by concepts and ideas presented
in the papers *Automatic Machine Knitting of 3D Meshes* [3]_,
*Visual Knitting Machine Programming* [4]_ and
*A Compiler for 3D Machine Knitting* [5]_.
References
----------
.. [1] Popescu, Mariana et al. *Automated Generation of Knit Patterns
for Non-developable Surfaces*
See: `Automated Generation of Knit Patterns for Non-developable
Surfaces <https://block.arch.ethz.ch/brg/files/
POPESCU_DMSP-2017_automated-generation-knit-patterns_1505737906.
pdf>`_
.. [2] Popescu, Mariana *KnitCrete - Stay-in-place knitted formworks for
complex concrete structures*
See: `KnitCrete - Stay-in-place knitted formworks for complex
concrete structures <https://block.arch.ethz.ch/brg/files/
POPESCU_2019_ETHZ_PhD_KnitCrete-Stay-in-place-knitted-fabric-
formwork-for-complex-concrete-structures_small_1586266206.pdf>`_
.. [3] Narayanan, Vidya; Albaugh, Lea; Hodgins, Jessica; Coros, Stelian;
McCann, James *Automatic Machine Knitting of 3D Meshes*
See: `Automatic Machine Knitting of 3D Meshes
<https://textiles-lab.github.io/publications/2018-autoknit/>`_
.. [4] Narayanan, Vidya; Wu, Kui et al. *Visual Knitting Machine
Programming*
See: `Visual Knitting Machine Programming
<https://textiles-lab.github.io/publications/2019-visualknit/>`_
.. [5] McCann, James; Albaugh, Lea; Narayanan, Vidya; Grow, April;
Matusik, Wojciech; Mankoff, Jen; Hodgins, Jessica
*A Compiler for 3D Machine Knitting*
See: `A Compiler for 3D Machine Knitting
<https://la.disneyresearch.com/publication/machine-knitting-
compiler/>`_
"""
# INITIALIZATION ----------------------------------------------------------
def __init__(self, data=None, **attr):
"""
Initialize a KnitNetwork (inherits NetworkX graph) with edges, name,
graph attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
network is created. The data can be an edge list, any
KnitNetworkBase or NetworkX graph object.
name : string, optional (default='')
An optional name for the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
"""
# initialize using original init method
super(KnitNetwork, self).__init__(data=data, **attr)
# also copy the mapping_network attribute if it is already available
if data and isinstance(data, KnitNetwork) and data.mapping_network:
self.mapping_network = data.mapping_network
else:
self.mapping_network = None
@classmethod
def create_from_contours(cls, contours, course_height,
reference_geometry=None):
"""
Create and initialize a KnitNetwork based on a set of contours, a
given course height and an optional reference geometry.
The reference geometry is a mesh or surface which should be described
by the network. While it is optional, it is **HIGHLY** recommended to
provide it!
Parameters
----------
contours : :obj:`list` of :class:`Rhino.Geometry.Polyline`
or :class:`Rhino.Geometry.Curve`
Ordered contours (i.e. isocurves, isolines) to initialize the
KnitNetwork with.
course_height : float
The course height for sampling the contours.
reference_geometry : :class:`Rhino.Geometry.Mesh`
or :class:`Rhino.Geometry.Surface`
Optional underlying geometry that this network is based on.
Returns
-------
KnitNetwork : KnitNetwork
A new, initialized KnitNetwork instance.
Notes
-----
This method will automatically call initialize_position_contour_edges()
on the newly created network!
Raises
------
KnitNetworkGeometryError
If a supplied contour is not a valid instance of
:obj:`Rhino.Geometry.Polyline` or :obj:`Rhino.Geometry.Curve`.
"""
# create network
network = cls(reference_geometry=reference_geometry)
# assign reference_geometry if present and valid
if reference_geometry:
if isinstance(reference_geometry, RhinoMesh):
network.graph["reference_geometry"] = reference_geometry
elif isinstance(reference_geometry, RhinoBrep):
if reference_geometry.IsSurface:
network.graph["reference_geometry"] = RhinoNurbsSurface(
reference_geometry.Surfaces[0])
elif isinstance(reference_geometry, RhinoSurface):
network.graph["reference_geometry"] = reference_geometry
else:
network.graph["reference_geometry"] = None
# divide the contours and fill network with nodes
nodenum = 0
for i, crv in enumerate(contours):
# check input
if not isinstance(crv, RhinoCurve):
if isinstance(crv, RhinoPolyline):
crv = crv.ToPolylineCurve()
else:
errMsg = ("Contour at index {} is not ".format(i) +
"a valid Curve or Polyline!")
raise KnitNetworkGeometryError(errMsg)
# compute divisioncount and divide contour
dc = round(crv.GetLength() / course_height)
tcrv = crv.DivideByCount(dc, True)
if not tcrv:
dpts = [crv.PointAtStart, crv.PointAtEnd]
else:
dpts = [crv.PointAt(t) for t in tcrv]
# loop over all nodes on the current contour
for j, point in enumerate(dpts):
# declare node attributes
vpos = i
vnum = j
if j == 0 or j == len(dpts) - 1:
vleaf = True
else:
vleaf = False
# create network node from rhino point
network.node_from_point3d(nodenum,
point,
position=vpos,
num=vnum,
leaf=vleaf,
start=False,
end=False,
segment=None,
increase=False,
decrease=False,
color=None)
# increment counter
nodenum += 1
# call position contour initialization
network.initialize_position_contour_edges()
return network
# TEXTUAL REPRESENTATION OF NETWORK ---------------------------------------
def __repr__(self):
"""
Return a textual description of the network.
Returns
-------
description : str
A textual description of the network.
"""
if self.name != "":
name = self.name
else:
name = "KnitNetwork"
nn = len(self.nodes())
ce = len(self.contour_edges)
wee = len(self.weft_edges)
wae = len(self.warp_edges)
data = ("({} Nodes, {} Position Contours, {} Weft, {} Warp)")
data = data.format(nn, ce, wee, wae)
return name + data
def ToString(self):
"""
Return a textual description of the network.
Returns
-------
description : str
A textual description of the network.
Notes
-----
Used for overloading the Grasshopper display in data parameters.
"""
return repr(self)
# INITIALIZATION OF POSITION CONTOUR EDGES --------------------------------
def initialize_position_contour_edges(self):
"""
Creates all initial position contour edges as neither 'warp' nor 'weft'
by iterating over all nodes in the network and grouping them based on
their 'position' attribute.
Notes
-----
This method is automatically called when creating a KnitNetwork using
the create_from_contours method!
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all nodes by position
posList = self.all_nodes_by_position(data=True)
for i, pos in enumerate(posList):
for j, node in enumerate(pos):
k = j + 1
if k < len(pos):
self.create_contour_edge(node, pos[k])
# INITIALIZATION OF 'WEFT' EDGES BETWEEN 'LEAF' NODES ---------------------
def initialize_leaf_connections(self):
"""
Create all initial connections of the 'leaf' nodes by iterating over
all position contours and creating 'weft' edges between the 'leaf'
nodes of the position contours.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all leaves
leafNodes = self.all_leaves_by_position(True)
# loop through all the positions leaves
for i, lpos in enumerate(leafNodes):
j = i + 1
# loop through pairs of leaves
if j < len(leafNodes):
startLeaf = lpos[0]
endLeaf = lpos[1]
nextStart = leafNodes[j][0]
nextEnd = leafNodes[j][1]
# add edges to the network
self.create_weft_edge(startLeaf, nextStart)
self.create_weft_edge(endLeaf, nextEnd)
# INITIALIZATION OF PRELIMINARY 'WEFT' EDGES ------------------------------
def attempt_weft_connection(self, node, candidate, source_nodes,
max_connections=4, verbose=False):
"""
Method for attempting a 'weft' connection to a candidate
node based on certain parameters.
Parameters
----------
node : :obj:`tuple`
2-tuple representing the source node for the possible 'weft' edge.
candidate ::obj:`tuple`
-tuple representing the target node for the possible 'weft' edge.
source_nodes : :obj:`list`
List of nodes on the position contour of node. Used to check if
the candidate node already has a connection.
max_connections : int, optional
The new 'weft' connection will only be made if the candidate nodes
number of connected neighbors is below this.
Defaults to ``4``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console.
Defaults to ``False``.
Returns
-------
bool
``True`` if the connection has been made,
``False`` otherwise.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
# get connected neighbors
connecting_neighbors = self[candidate[0]]
# only do something if the maximum is not reached
if len(connecting_neighbors) < max_connections:
# determine if the node is already connected to a node from
# the input source nodes
isConnected = False
for cn in connecting_neighbors:
if cn in [v[0] for v in source_nodes]:
isConnected = True
# print info on verbose setting
v_print("Candidate node {} is ".format(candidate[0]) +
"already connected! " +
"Skipping to next " +
"node...")
break
# check the flag and act accordingly
if not isConnected:
# print info on verbose setting
v_print("Connecting node {} to best ".format(node[0]) +
"candidate {}.".format(candidate[0]))
# if all conditions are met, make the 'weft' connection
if node[1]["position"] < candidate[1]["position"]:
self.create_weft_edge(node, candidate)
else:
self.create_weft_edge(candidate, node)
return True
else:
return False
else:
return False
def _create_initial_weft_connections(self,
contour_set,
force_continuous_start=False,
force_continuous_end=False,
max_connections=4,
precise=False,
verbose=False):
"""
Private method for creating initial 'weft' connections for the supplied
set of contours, starting from the first contour in the set and
propagating to the last contour in the set.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
if len(contour_set) < 2:
v_print("Not enough contours in contour set!")
return
# print info on verbose output
v_print("Creating initial 'weft' connections for contour set...")
# loop over all nodes of positions (list of lists of tuples)
for i, pos in enumerate(contour_set):
# pos is a list of tuples (nodes)
if i < len(contour_set):
j = i + 1
if j == len(contour_set):
break
# get initial and target nodes without 'leaf' nodes
initial_nodes = contour_set[i][1:-1]
target_nodes = contour_set[j][1:-1]
# options for continuous start and end
if force_continuous_start:
initial_nodes = initial_nodes[1:]
target_nodes = target_nodes[1:]
if force_continuous_end:
initial_nodes = initial_nodes[:-1]
target_nodes = target_nodes[:-1]
# skip if one of the contours has no nodes
if len(initial_nodes) == 0 or len(target_nodes) == 0:
continue
# define forbidden node index
forbidden_node = -1
# loop through all nodes on the current position
for k, node in enumerate(initial_nodes):
# print info on verbose setting
v_print("Processing node {} on position {}:".format(
node[0], node[1]["position"]))
# get the geometry for the current node
thisPt = node[1]["geo"]
# filtering according to forbidden nodes
target_nodes = [tn for tn in target_nodes
if tn[0] >= forbidden_node]
if len(target_nodes) == 0:
continue
# get four closest nodes on adjacent contour
if precise:
allDists = [thisPt.DistanceTo(tv[1]["geo"])
for tv in target_nodes]
else:
allDists = [thisPt.DistanceToSquared(tv[1]["geo"])
for tv in target_nodes]
# sort the target nodes by distance to current node
allDists, sorted_target_nodes = zip(
*sorted(zip(allDists,
target_nodes),
key=itemgetter(0)))
# the four closest nodes are the possible connections
possible_connections = sorted_target_nodes[:4]
# print info on verbose setting
v_print("Possible connections: {}".format(
[pc[0] for pc in possible_connections]))
# handle edge case where there is no possible
# connection or just one
if len(possible_connections) == 0:
# skip if there are no possible connections
continue
elif len(possible_connections) == 1:
# attempt to connect to only possible candidate
fCand = possible_connections[0]
res = self.attempt_weft_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand[0]
continue
# get the contours current direction
if k < len(initial_nodes)-1:
contourDir = RhinoLine(
thisPt,
initial_nodes[k+1][1]["geo"]).Direction
elif k == len(initial_nodes)-1:
contourDir = RhinoLine(
initial_nodes[k-1][1]["geo"], thisPt).Direction
contourDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"]
for pc in possible_connections]
candidateDirections = [RhinoLine(
thisPt, cp).Direction for cp in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between contour dir and possible conn dir
normals = [RhinoVector3d.CrossProduct(
contourDir, cd) for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(
contourDir, cd, n) for cd, n in zip(
candidateDirections, normals)]
# compute deltas as a mesaure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort possible connections by distance, then by delta
allDists, deltas, angles, most_perpendicular = zip(
*sorted(zip(
allDists,
deltas,
angles,
possible_connections[:]),
key=itemgetter(0, 1)))
# get node neighbors
nNeighbors = self[node[0]]
# compute angle difference
aDelta = angles[0] - angles[1]
# CONNECTION FOR LEAST ANGLE CHANGE -----------------------
if len(nNeighbors) > 2 and aDelta < radians(6.0):
# print info on verbose setting
v_print("Using procedure for least angle " +
"change connection...")
# get previous connected edge and its direction
prevEdges = self.node_weft_edges(node[0], data=True)
if len(prevEdges) > 1:
raise KnitNetworkError(
"More than one previous 'weft' connection! " +
"This was unexpeced...")
prevDir = prevEdges[0][2]["geo"].Direction
else:
prevDir = prevEdges[0][2]["geo"].Direction
prevDir.Unitize()
# get directions for the best two candidates
mpA = most_perpendicular[0]
mpB = most_perpendicular[1]
dirA = RhinoLine(thisPt, mpA[1]["geo"]).Direction
dirB = RhinoLine(thisPt, mpB[1]["geo"]).Direction
dirA.Unitize()
dirB.Unitize()
# get normals for angle measurement
normalA = RhinoVector3d.CrossProduct(prevDir, dirA)
normalB = RhinoVector3d.CrossProduct(prevDir, dirB)
# measure the angles
angleA = RhinoVector3d.VectorAngle(
prevDir,
dirA,
normalA)
angleB = RhinoVector3d.VectorAngle(
prevDir,
dirB,
normalB)
# select final candidate for connection by angle
if angleA < angleB:
fCand = mpA
else:
fCand = mpB
# attempt to connect to final candidate
res = self.attempt_weft_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node for next pass
if res:
forbidden_node = fCand[0]
# CONNECTION FOR MOST PERPENDICULAR --------------------
else:
# print info on verbose setting
v_print("Using procedure for most " +
"perpendicular connection...")
# define final candidate
fCand = most_perpendicular[0]
# attempt to connect to final candidate node
res = self.attempt_weft_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node if connection has been made
if res:
forbidden_node = fCand[0]
def _create_second_pass_weft_connections(self,
contour_set,
include_leaves=False,
least_connected=False,
precise=False,
verbose=False):
"""
Private method for creating second pass 'weft' connections for the
given set of contours.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
v_print = print if verbose else lambda *a, **k: None
# get attributes only once
position_attributes = nx.get_node_attributes(self, "position")
num_attributes = nx.get_node_attributes(self, "num")
if len(contour_set) < 2:
v_print("Not enough contours in contour set!")
return
# print info on verbose output
v_print("Creating second pass 'weft' connections for contour set...")
# loop over all nodes of positions (list of lists of tuples)
for i, pos in enumerate(contour_set):
# get initial nodes
initial_nodes = contour_set[i]
# get target position candidates
if (i > 0 and i < len(contour_set)-1 and
i != 0 and i != len(contour_set)-1):
target_positionA = contour_set[i-1][0][1]["position"]
target_positionB = contour_set[i+1][0][1]["position"]
elif i == 0:
target_positionA = None
target_positionB = contour_set[i+1][0][1]["position"]
elif i == len(contour_set)-1:
target_positionA = contour_set[i-1][0][1]["position"]
target_positionB = None
# loop through all nodes on current position
for k, node in enumerate(initial_nodes):
# print info on verbose setting
v_print(
"Processing node {} on position {}:".format(
node[0], node[1]["position"]))
# get connecting edges on target position
conWeftEdges = self.node_weft_edges(node[0], data=True)
conPos = []
if len(conWeftEdges) == 0 and verbose:
# print info on verbose setting
v_print("No previously connected weft edges...")
for weftEdge in conWeftEdges:
weftEdgeFrom = weftEdge[0]
weftEdgeTo = weftEdge[1]
if weftEdgeFrom != node[0]:
posEdgeTarget = position_attributes[weftEdgeFrom]
elif weftEdgeTo != node[0]:
posEdgeTarget = position_attributes[weftEdgeTo]
if posEdgeTarget not in conPos:
conPos.append(posEdgeTarget)
# select target position and continue in edge case scenarios
target_positions = []
if target_positionA == None:
if target_positionB in conPos:
v_print("Node is connected. Skipping...")
continue
target_positions.append(target_positionB)
elif target_positionB == None:
if target_positionA in conPos:
v_print("Node is connected. Skipping...")
continue
target_positions.append(target_positionA)
elif ((target_positionA in conPos) and
(target_positionB in conPos)):
v_print("Node is connected. Skipping...")
continue
elif ((target_positionB in conPos) and
(target_positionA not in conPos)):
target_positions.append(target_positionA)
elif ((target_positionA in conPos) and
(target_positionB not in conPos)):
target_positions.append(target_positionB)
elif (target_positionA != None and
target_positionB != None and len(conPos) == 0):
target_positions = [target_positionA, target_positionB]
# print info on verbose setting
if verbose and len(target_positions) > 1:
v_print("Two target positions: {}, {}".format(
*target_positions))
elif verbose and len(target_positions) == 1:
v_print("Target position: {}".format(target_positions[0]))
# skip if there are no target positions
if len(target_positions) == 0:
v_print("No target position! Skipping...")
continue
# only proceed if there is a target position
for target_position in target_positions:
# get target nodes
target_nodes = self.nodes_on_position(
target_position, True)
# get the point geo of this node
thisPt = node[1]["geo"]
# get a window of possible connections on the target
# position by looking for the previos node on this contour
# connected to target position, then propagating along
# the target position to the next node that is connected
# to this position. these two nodes will define the window
# NOTE: the current node should never have a connection
# to target position (theoretically!), otherwise it should
# have fallen through the checks by now
# print info on verbose setting
v_print("Target position is {}. ".format(target_position) +
"Computing window...")
# get the previous node on this contour
prevNode = initial_nodes[k-1]
# assume that the previous node has a connection
prevCon = self.node_weft_edges(prevNode[0], data=True)
# get possible connections from previous connection
possible_connections = []
for edge in prevCon:
edgeFrom = edge[0]
edgeTo = edge[1]
if edgeFrom != prevNode[0]:
prevNodeTargetPos = position_attributes[edgeFrom]
prevNodeTargetIndex = num_attributes[edgeFrom]
elif edgeTo != prevNode[0]:
prevNodeTargetPos = position_attributes[edgeTo]
prevNodeTargetIndex = num_attributes[edgeTo]
if prevNodeTargetPos == target_position:
possible_connections.append(
target_nodes[prevNodeTargetIndex])
# the farthest connection of the previous node is the first
# point for our window
if len(possible_connections) > 1:
possible_connections.sort(key=lambda x: x[1]["num"])
possible_connections.reverse()
start_of_window = possible_connections[0]
elif len(possible_connections) == 1:
start_of_window = possible_connections[0]
elif len(possible_connections) == 0:
# print info on verbose setting
v_print("No possible connection, skipping...")
continue
# get the next node on this pos that is
# connected to target position
if k < len(initial_nodes)-1:
future_nodes = initial_nodes[k+1:]
for futurenode in future_nodes:
filteredWeftEdges = []
futureWeftEdges = self.node_weft_edges(
futurenode[0], data=True)
for futureweft in futureWeftEdges:
fwn = (futureweft[1], self.node[futureweft[1]])
fwn_pos = fwn[1]["position"]
fwn_num = fwn[1]["num"]
if (fwn_pos == target_position and
fwn_num == start_of_window[1]["num"]):
# if the start of the window is found,
# it is the only possible connection
filteredWeftEdges = [futureweft]
break
if (fwn_pos == target_position and
fwn_num > start_of_window[1]["num"]):
filteredWeftEdges.append(futureweft)
else:
continue
if (not filteredWeftEdges or
len(filteredWeftEdges) == 0):
end_of_window = None
continue
# sort the filtered weft edges based on the 'num'
# attribute of their target node
filteredWeftEdges.sort(
key=lambda x: self.node[x[1]]["num"])
# get the end of the window from the first edge on
# the target position
end_of_window = (
filteredWeftEdges[0][1],
self.node[filteredWeftEdges[0][1]])
break
else:
end_of_window = None
# define the window
if end_of_window == None:
window = [start_of_window]
elif end_of_window == start_of_window:
window = [start_of_window]
else:
window = [(n, d) for n, d
in self.nodes_iter(data=True)
if n >= start_of_window[0]
and n <= end_of_window[0]]
if len(window) == 0:
# print info on verbose setting
v_print("Length of window is 0, skipping...")
elif len(window) == 1:
# print info on verbose setting
v_print("Window has only one node.")
v_print("Connecting to node {}".format(window[0][0]) +
" on position {}...".format(
window[0][1]["position"]))
# connect weft edge
if node[1]["position"] < window[0][1]["position"]:
self.create_weft_edge(node, window[0])
else:
self.create_weft_edge(window[0], node)
else:
# print info on verbose setting
v_print("Processing window nodes: {}".format(
[w[0] for w in window]))
# sort nodes in window by distance
if precise:
allDists = [thisPt.DistanceTo(pc[1]["geo"])
for pc in window]
else:
allDists = [thisPt.DistanceToSquared(pc[1]["geo"])
for pc in window]
allDists, window = zip(*sorted(zip(allDists, window),
key=itemgetter(0)))
if least_connected:
wn_count = [len(self[n[0]]) for n in window]
wn_count, allDists, window = zip(
*sorted(zip(allDists, wn_count, window),
key=itemgetter(0, 1)))
# set final candidate node
fCand = window[0]
else:
# get the contours current direction
if k < len(initial_nodes)-1:
contourDir = RhinoLine(
thisPt,
initial_nodes[k+1][1]["geo"]).Direction
elif k == len(initial_nodes)-1:
contourDir = RhinoLine(
initial_nodes[k-1][1]["geo"],
thisPt).Direction
contourDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"]
for pc in window]
candidateDirections = [
RhinoLine(thisPt, cp).Direction
for cp in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between contour dir and window dir
normals = [RhinoVector3d.CrossProduct(
contourDir, cd)
for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(
contourDir, cd, n) for cd, n in zip(
candidateDirections, normals)]
# compute deltas as a mesaure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort window by distance, then by delta
allDists, deltas, most_perpendicular = zip(*sorted(
zip(allDists,
deltas,
window),
key=itemgetter(0, 1)))
# set final candidate node for connection
fCand = most_perpendicular[0]
# print info on verbose setting
v_print("Connecting to node " +
"{} on position {}...".format(
fCand[0],
fCand[1]["position"]))
# connect weft edge to best target
if node[1]["position"] < fCand[1]["position"]:
self.create_weft_edge(node, fCand)
else:
self.create_weft_edge(fCand, node)
def initialize_weft_edges(self,
start_index=None,
propagate_from_center=False,
force_continuous_start=False,
force_continuous_end=False,
angle_threshold=radians(6.0),
max_connections=4,
least_connected=False,
precise=False,
verbose=False):
"""
Attempts to create all the preliminary 'weft' connections for the
network.
Parameters
----------
start_index : int, optional
This value defines at which index the list of contours is split.
If no index is supplied, will split the list at the longest
contour.
Defaults to ``None``.
propagate_from_center : bool, optional
If ``True``, will propagate left and right set of contours from
the center contour defined by start_index or the longest contour
( < | > ). Otherwise, the propagation of the contours left to the
center will start at the left boundary ( > | > ).
Defaults to ``False``
force_continuous_start : bool, optional
If ``True``, forces the first row of stitches to be continuous.
Defaults to ``False``.
force_continuous_end : bool, optional
If ``True``, forces the last row of stitches to be continuous.
Defaults to ``False``.
max_connections : int, optional
The maximum connections a node is allowed to have to be considered
for an additional 'weft' connection.
Defaults to ``4``.
least_connected : bool, optional
If ``True``, uses the least connected node from the found
candidates.
Defaults to ``False``
precise : bool, optional
If ``True``, the distance between nodes will be calculated using
the Rhino.Geometry.Point3d.DistanceTo method, otherwise the much
faster Rhino.Geometry.Point3d.DistanceToSquared method is used.
Defaults to ``False``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console. Great for
debugging and analysis.
Defaults to ``False``.
Raises
------
KnitNetworkError
If the supplied splitting index is too high.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all the positions / contours
AllPositions = self.all_nodes_by_position(data=True)
if start_index == None:
# get index of longest contour
start_index = self.longest_position_contour()[0]
elif start_index >= len(AllPositions):
raise KnitNetworkError("Supplied splitting index is too high!")
# if continuous start is True, connect the whole first row
if force_continuous_start:
chain = [pos[1] for pos in AllPositions]
for pair in pairwise(chain):
self.create_weft_edge(pair[0], pair[1])
# if continuous end is True, connect the whole last row
if force_continuous_end:
chain = [pos[-2] for pos in AllPositions]
for pair in pairwise(chain):
self.create_weft_edge(pair[0], pair[1])
# split position list into two sets based on start index
leftContours = AllPositions[0:start_index+1]
# optional propagation from center
# NOTE: this has shown problems / weird stitch geometries
if propagate_from_center:
leftContours.reverse()
rightContours = AllPositions[start_index:]
# create the initial weft connections
self._create_initial_weft_connections(
leftContours,
force_continuous_start=force_continuous_start,
force_continuous_end=force_continuous_end,
max_connections=max_connections,
precise=precise,
verbose=verbose)
self._create_initial_weft_connections(
rightContours,
force_continuous_start=force_continuous_start,
force_continuous_end=force_continuous_end,
max_connections=max_connections,
precise=precise,
verbose=verbose)
# create second pass weft connections
self._create_second_pass_weft_connections(
leftContours,
least_connected,
precise=precise,
verbose=verbose)
self._create_second_pass_weft_connections(
rightContours,
least_connected,
precise=precise,
verbose=verbose)
return True
# INITIALIZATION OF PRELIMINARY 'WARP' EDGES ------------------------------
def initialize_warp_edges(self, contour_set=None, verbose=False):
"""
Method for initializing first 'warp' connections once all preliminary
'weft' connections are made.
Parameters
----------
contour_set : :obj:`list`, optional
List of lists of nodes to initialize 'warp' edges. If none are
supplied, all nodes ordered by thei 'position' attributes are
used.
Defaults to ``None``.
verbose : bool, optional
If ``True``, will print verbose output to the console.
Defaults to ``False``.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# if no contour set is provided, use all contours of this network
if contour_set == None:
contour_set = self.all_nodes_by_position(data=True)
# loop through all positions in the set of contours
for i, pos in enumerate(contour_set):
# get all nodes on current contour
initial_nodes = contour_set[i]
# loop through all nodes on this contour
for k, node in enumerate(initial_nodes):
connected_edges = self.edges(node[0], data=True)
numweft = len(self.node_weft_edges(node[0]))
if (len(connected_edges) > 4 or numweft > 2
or i == 0 or i == len(contour_set)-1):
# set 'end' attribute for this node
self.node[node[0]]["end"] = True
# loop through all candidate edges
for j, edge in enumerate(connected_edges):
# if it's not a 'weft' edge, assign attributes
if not edge[2]["weft"]:
connected_node = edge[1]
# set 'end' attribute to conneted node
self.node[connected_node]["end"] = True
# set 'warp' attribute to current edge
self[edge[0]][edge[1]]["warp"] = True
# ASSIGNING OF 'SEGMENT' ATTRIBUTES FOR MAPPING NETWORK -------------------
def _traverse_weft_edge_until_end(self, start_end_node, start_node,
seen_segments, way_nodes=None,
way_edges=None, end_nodes=None):
"""
Private method for traversing a path of 'weft' edges until another
'end' node is discoverd.
"""
# initialize output lists
if way_nodes == None:
way_nodes = deque()
way_nodes.append(start_node[0])
if way_edges == None:
way_edges = deque()
if end_nodes == None:
end_nodes = deque()
# get the connected edges and filter them, sort out the ones that
# already have a 'segment' attribute assigned
connected_weft_edges = self.node_weft_edges(start_node[0], data=True)
filtered_weft_edges = []
for cwe in connected_weft_edges:
if cwe[2]["segment"] != None:
continue
if cwe in way_edges:
continue
elif (cwe[1], cwe[0], cwe[2]) in way_edges:
continue
filtered_weft_edges.append(cwe)
if len(filtered_weft_edges) > 1:
print(filtered_weft_edges)
print("More than one filtered candidate weft edge! " +
"Segment complete...?")
elif len(filtered_weft_edges) == 1:
fwec = filtered_weft_edges[0]
connected_node = (fwec[1], self.node[fwec[1]])
# if the connected node is an end node, the segment is finished
if connected_node[1]["end"]:
# find out which order to set segment attributes
if start_end_node > connected_node[0]:
segStart = connected_node[0]
segEnd = start_end_node
else:
segStart = start_end_node
segEnd = connected_node[0]
if (segStart, segEnd) in seen_segments:
segIndex = len([s for s in seen_segments
if s == (segStart, segEnd)])
else:
segIndex = 0
# append the relevant data to the lists
end_nodes.append(connected_node[0])
way_edges.append(fwec)
seen_segments.append((segStart, segEnd))
# set final 'segment' attributes to all the way nodes
for waynode in way_nodes:
self.node[waynode]["segment"] = (segStart,
segEnd,
segIndex)
# set final 'segment' attributes to all the way edges
for wayedge in way_edges:
self[wayedge[0]][wayedge[1]]["segment"] = (segStart,
segEnd,
segIndex)
# return the seen segments
return seen_segments
else:
# set the initial segment attribute to the node
self.node[connected_node[0]]["segment"] = (start_end_node,
None,
None)
# set the initial segment attribute to the edge
self[fwec[0]][fwec[1]]["segment"] = (start_end_node,
None,
None)
# append the relevant data to the lists
way_nodes.append(connected_node[0])
way_edges.append(fwec)
# call this method recursively until a 'end' node is found
return self._traverse_weft_edge_until_end(
start_end_node,
connected_node,
seen_segments,
way_nodes,
way_edges,
end_nodes)
else:
return seen_segments
def traverse_weft_edges_and_set_attributes(self, start_end_node):
"""
Traverse a path of 'weft' edges starting from an 'end' node until
another 'end' node is discovered. Set 'segment' attributes to nodes
and edges along the way.
start_end_node : :obj:`tuple`
2-tuple representing the node to start the traversal.
"""
# get connected weft edges and sort them by their connected node
weft_connections = self.node_weft_edges(start_end_node[0], data=True)
weft_connections.sort(key=lambda x: x[1])
# loop through all connected weft edges
seen_segments = []
for cwe in weft_connections:
# check if connected weft edge already has a segment attribute
if cwe[2]["segment"]:
continue
# get connected node
connected_node = (cwe[1], self.node[cwe[1]])
# check the connected node. if it is an end node, we are done
if connected_node[1]["end"]:
# get segment start and end
if start_end_node[0] > connected_node[0]:
segStart = connected_node[0]
segEnd = start_end_node[0]
else:
segStart = start_end_node[0]
segEnd = connected_node[0]
# get segment index
if (segStart, segEnd) in seen_segments:
segIndex = len([s for s in seen_segments
if s == (segStart, segEnd)])
else:
segIndex = 0
# set the final segment attribute to the edge
self[cwe[0]][cwe[1]]["segment"] = (segStart, segEnd, segIndex)
seen_segments.append((segStart, segEnd))
# if the connected node is not an end node, we need to travel
# until we find one
else:
seen_segments = self._traverse_weft_edge_until_end(
start_end_node[0],
connected_node,
seen_segments,
way_edges=[cwe])
def assign_segment_attributes(self):
"""
Get the segmentation for loop generation and assign 'segment'
attributes to 'weft' edges and nodes.
"""
if len(self.weft_edges) == 0:
errMsg = ("No 'weft' edges in KnitNetwork! Segmentation " +
"is impossible.")
raise NoWeftEdgesError(errMsg)
if len(self.end_nodes) == 0:
errMsg = ("No 'end' nodes in KnitNetwork! Segmentation " +
"is impossible.")
raise NoEndNodesError(errMsg)
# remove contour and 'warp' edges and store them
warp_storage = []
contour_storage = []
for edge in self.edges(data=True):
if not edge[2]["weft"]:
if edge[2]["warp"]:
warp_storage.append(edge)
else:
contour_storage.append(edge)
self.remove_edge(edge[0], edge[1])
# get all 'end' nodes ordered by their 'position' attribute
all_ends_by_position = self.all_ends_by_position(data=True)
# loop through all 'end' nodes
for position in all_ends_by_position:
for endnode in position:
self.traverse_weft_edges_and_set_attributes(endnode)
# add all previously removed edges back into the network
[self.add_edge(edge[0], edge[1], attr_dict=edge[2])
for edge in warp_storage + contour_storage]
# CREATION OF MAPPING NETWORK ---------------------------------------------
def create_mapping_network(self):
"""
Creates the corresponding mapping network for the final loop generation
from a KnitNetwork instance with fully assigned 'segment' attributes.
The created mapping network will be part of the KnitNetwork instance.
It can be accessed using the mapping_network property.
Notes
-----
All nodes without an 'end' attribute as well as all 'weft' edges are
removed by this step. Final nodes as well as final 'weft' and 'warp'
edges can only be created using the mapping network.
Returns
-------
success : bool
``True`` if the mapping network has been successfully created,
``False`` otherwise.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# create a new KnitMappingNetwork instance
MappingNetwork = KnitMappingNetwork()
# get all edges of the current network by segment
weft_edges = sorted(self.weft_edges, key=lambda x: x[2]["segment"])
warp_edges = self.warp_edges
# initialize deque container for segment ids
segment_ids = deque()
# loop through all 'weft' edges and fill container with unique ids
for edge in weft_edges:
segment_id = edge[2]["segment"]
if segment_id not in segment_ids:
segment_ids.append(segment_id)
# error checking
if len(segment_ids) == 0:
errMsg = (
"The network contains no 'weft' edges with a 'segment' " +
"attribute assigned to them. A KnitMappingNetwork can " +
"only be created from a KnitNetwork with initialized " +
"'weft' edges for courses and corresponding 'warp' " +
"edges connecting their 'end' nodes.")
raise NoWeftEdgesError(errMsg)
# loop through all unique segment ids
for id in segment_ids:
# get the corresponding edges for this id and sort them
segment_edges = [e for e in weft_edges if e[2]["segment"] == id]
segment_edges.sort(key=lambda x: x[0])
# extract start and end nodes
start_node = (id[0], self.node[id[0]])
endNode = (id[1], self.node[id[1]])
# get all the geometry of the individual edges
segment_geo = [e[2]["geo"] for e in segment_edges]
# create a segment contour edge in the mapping network
res = MappingNetwork.create_segment_contour_edge(
start_node,
endNode,
id,
segment_geo)
if not res:
errMsg = ("SegmentContourEdge at segment id {} could not be " +
"created!")
raise KnitNetworkError(errMsg)
# add all warp edges to the mapping network to avoid lookup hassle
for warp_edge in warp_edges:
if warp_edge[0] > warp_edge[1]:
warp_from = warp_edge[1]
warp_to = warp_edge[0]
else:
warp_from = warp_edge[0]
warp_to = warp_edge[1]
MappingNetwork.add_edge(warp_from, warp_to, attr_dict=warp_edge[2])
# set mapping network property for this instance
self.mapping_network = MappingNetwork
# ditch all edges that are not 'warp' and nodes without 'end' attribute
[self.remove_node(n) for n, d in self.nodes_iter(data=True)
if not d["end"]]
[self.remove_edge(s, e) for s, e, d in self.edges_iter(data=True)
if not d["warp"]]
return True
# MAPPING NETWORK PROPERTY ------------------------------------------------
def _get_mapping_network(self):
"""
Gets the associated mapping network for this KnitNetwork instance.
"""
return self._mapping_network
def _set_mapping_network(self, mapping_network):
"""
Setter for this instance's associated mapping network.
"""
# set mapping network to instance
if (isinstance(mapping_network, KnitMappingNetwork)
or mapping_network == None):
self._mapping_network = mapping_network
else:
raise ValueError("Input is not of type KnitMappingNetwork!")
mapping_network = property(_get_mapping_network,
_set_mapping_network,
None,
"The associated mapping network of this " +
"KnitNetwork instance.")
# RETRIEVAL OF NODES AND EDGES FROM MAPPING NETWORK -----------------------
def all_nodes_by_segment(self, data=False, edges=False):
"""
Returns all nodes of the network ordered by 'segment' attribute.
Note: 'end' nodes are not included!
Parameters
----------
data : bool, optional
If ``True``, the nodes contained in the output will be represented
as 2-tuples in the form of (node_identifier, node_data).
Defaults to ``False``
edges : bool, optional
If ``True``, the returned output list will contain 3-tuples in the
form of (segment_value, segment_nodes, segment_edge).
Defaults to ``False``.
Returns
-------
nodes_by_segment : :obj:`list` of :obj:`tuple`
List of 2-tuples in the form of (segment_value, segment_nodes) or
3-tuples in the form of (segment_value, segment_nodes,
segment_edge) depending on the ``edges`` argument.
Raises
------
MappingNetworkError
If the mapping network is not available for this instance.
"""
# retrieve mappingnetwork
mapnet = self.mapping_network
if not mapnet:
errMsg = ("Mapping network has not been built for this instance!")
raise MappingNetworkError(errMsg)
allSegments = mapnet.segment_contour_edges
allSegmentNodes = [(n, d) for n, d
in self.nodes_iter(data=True) if d["segment"]]
segdict = {}
for n in allSegmentNodes:
if n[1]["segment"] not in segdict:
segdict[n[1]["segment"]] = [n]
else:
segdict[n[1]["segment"]].append(n)
anbs = []
if data and edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, segnodes, segment))
elif data and not edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, segnodes))
elif not data and edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, [sn[0] for sn in segnodes], segment))
elif not data and not edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, [sn[0] for sn in segnodes]))
return anbs
# STITCH WIDTH SAMPLING ---------------------------------------------------
def sample_segment_contours(self, stitch_width):
"""
Samples the segment contours of the mapping network with the given
stitch width. The resulting points are added to the network as nodes
and a 'segment' attribute is assigned to them based on their origin
segment contour edge.
Parameters
----------
stitch_width : float
The width of a single stitch inside the knit.
Raises
------
MappingNetworkError
If the mapping network is not available for this instance.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# retrieve mapping network
mapnet = self.mapping_network
if not mapnet:
errMsg = ("Mapping network has not been built for this " +
"instance, sampling segment contours is impossible!")
raise MappingNetworkError(errMsg)
# get the highest index of all the nodes in the network
maxNode = max(self.nodes())
# get all the segment geometry ordered by segment number
segment_contours = mapnet.segment_contour_edges
# sample all segments with the stitch width
nodeindex = maxNode + 1
for i, seg in enumerate(segment_contours):
# get the geometry of the contour and reparametreize its domain
geo = seg[2]["geo"]
geo = geo.ToPolylineCurve()
geo.Domain = RhinoInterval(0.0, 1.0)
# compute the division points
crvlen = geo.GetLength()
density = int(round(crvlen / stitch_width))
if density == 0:
continue
divT = geo.DivideByCount(density, False)
divPts = [geo.PointAt(t) for t in divT]
# set leaf attribute
# TODO: better leaf strategy - this works but assigns false
# leaf nodes. usually not a problem but it should be fixed anyway
if self.node[seg[0]]["leaf"] and self.node[seg[1]]["leaf"]:
nodeLeaf = True
else:
nodeLeaf = False
# add all the nodes to the network
for j, pt in enumerate(divPts):
# add node to network
self.node_from_point3d(
nodeindex,
pt,
position=None,
num=j,
leaf=nodeLeaf,
start=False,
end=False,
segment=seg[2]["segment"],
increase=False,
decrease=False,
color=None)
# increment node index
nodeindex += 1
# CREATION OF FINAL 'WEFT' CONNECTIONS ------------------------------------
def create_final_weft_connections(self):
"""
Loop through all the segment contour edges and create all 'weft'
connections for this network.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all nodes by segment contour
SegmentValues, AllNodesBySegment = zip(*self.all_nodes_by_segment(
data=True))
# loop through all the segment contours
for i, segment in enumerate(AllNodesBySegment):
segval = SegmentValues[i]
firstNode = (segval[0], self.node[segval[0]])
lastNode = (segval[1], self.node[segval[1]])
if len(segment) == 0:
self.create_weft_edge(firstNode, lastNode, segval)
elif len(segment) == 1:
self.create_weft_edge(firstNode, segment[0], segval)
self.create_weft_edge(segment[0], lastNode, segval)
else:
# loop through all nodes on the current segment and create
# the final 'weft' edges
for j, node in enumerate(segment):
if j == 0:
self.create_weft_edge(firstNode, node, segval)
self.create_weft_edge(node, segment[j+1], segval)
elif j < len(segment)-1:
self.create_weft_edge(node, segment[j+1], segval)
elif j == len(segment)-1:
self.create_weft_edge(node, lastNode, segval)
# CREATION OF FINAL 'WARP' CONNECTIONS ------------------------------------
def attempt_warp_connection(self, node, candidate, source_nodes,
max_connections=4, verbose=False):
"""
Method for attempting a 'warp' connection to a candidate
node based on certain parameters.
Parameters
----------
node : node
The starting node for the possible 'weft' edge.
candidate : node
The target node for the possible 'weft' edge.
source_nodes : :obj:`list`
List of nodes on the position contour of node. Used to check if
the candidate node already has a connection.
max_connections : int, optional
The new 'weft' connection will only be made if the candidate nodes
number of connected neighbors is below this.
Defaults to ``4``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console.
Defaults to ``False``.
Returns
-------
result : bool
True if the connection has been made, otherwise false.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
connecting_neighbors = self[candidate[0]]
if len(connecting_neighbors) < max_connections:
isConnected = False
for cn in connecting_neighbors:
if cn in [v[0] for v in source_nodes]:
isConnected = True
# print info on verbose setting
v_print("Candidate node {} is ".format(candidate[0]) +
"already connected! Skipping to next node...")
break
if not isConnected:
# print info on verbose setting
v_print("Connecting node {} to best candidate {}.".format(
node[0],
candidate[0]))
# finally create the warp edge for good
self.create_warp_edge(node, candidate)
return True
else:
return False
else:
return False
def _create_initial_warp_connections(self, segment_pair, max_connections=4,
precise=False, verbose=False):
"""
Private method for creating first pass 'warp' connections for the
supplied pair of segment chains.
The pair is only defined as a list of nodes, the nodes have to be
supplied with their attribute data!
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
if len(segment_pair) < 2:
v_print("Not enough contour segments in supplied set!")
return
# print info on verbose output
v_print("Creating initial 'warp' connections for contour set...")
# get initial and target nodes without 'end' nodes
initial_nodes = segment_pair[0]
target_nodes = segment_pair[1]
# define forbidden node index
forbidden_node = -1
# do nothing if one of the sets is empty
if len(initial_nodes) == 0 or len(target_nodes) == 0:
return
# loop through all nodes on the current segment
for k, node in enumerate(initial_nodes):
# get geometry from current node
thisPt = node[1]["geo"]
# print info on verbose setting
v_print("Processing node {} on segment {}:".format(
node[0],
node[1]["segment"]))
# filtering according to forbidden nodes
if forbidden_node != -1:
target_nodes = [tnode for tx, tnode in enumerate(target_nodes)
if tx >= target_nodes.index(forbidden_node)]
if len(target_nodes) == 0:
continue
# compute distances to target nodes
if precise:
allDists = [thisPt.DistanceTo(tn[1]["geo"])
for tn in target_nodes]
else:
allDists = [thisPt.DistanceToSquared(tn[1]["geo"])
for tn in target_nodes]
# sort nodes after distances
allDists, sorted_target_nodes = zip(*sorted(
zip(allDists, target_nodes),
key=itemgetter(0)))
# the four nearest nodes are the possible connections
possible_connections = sorted_target_nodes[:4]
# print info on verbose setting
v_print("Possible connections: {}".format([pc[0] for pc in
possible_connections]))
# handle edge case where there is no possible connection or just
# one
if len(possible_connections) == 0:
continue
elif len(possible_connections) == 1:
# attempt to connect to only possible candidate
fCand = possible_connections[0]
res = self.attempt_warp_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand
continue
# get the segment contours current direction
if k < len(initial_nodes)-1:
contourDir = RhinoLine(thisPt,
initial_nodes[k+1][1]["geo"]).Direction
elif k == len(initial_nodes)-1:
contourDir = RhinoLine(
initial_nodes[k-1][1]["geo"], thisPt).Direction
contourDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"] for pc in possible_connections]
candidateDirections = [RhinoLine(
thisPt, cp).Direction for cp in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between segment contour dir and possible conn dir
normals = [RhinoVector3d.CrossProduct(
contourDir, cd) for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(
contourDir, cd, n) for cd, n in zip(
candidateDirections, normals)]
# compute deltas as a measure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort possible connections first by distance, then by delta
(allDists,
deltas,
angles,
most_perpendicular) = zip(*sorted(zip(allDists,
deltas,
angles,
possible_connections[:]),
key=itemgetter(0, 1)))
# compute angle difference
aDelta = angles[0] - angles[1]
# get node neighbors
nNeighbors = self[node[0]]
# CONNECTION FOR LEAST ANGLE CHANGE -------------------------------
if len(nNeighbors) > 2 and aDelta < radians(6.0):
# print info on verbose setting
v_print("Using procedure for least angle " +
"change connection...")
# get previous connected edge and its direction
prevEdges = self.node_warp_edges(node[0], data=True)
if len(prevEdges) > 1:
print("More than one previous " +
"'warp' connection! This was unexpected..." +
"Taking the first one..?")
prevDir = prevEdges[0][2]["geo"].Direction
else:
prevDir = prevEdges[0][2]["geo"].Direction
prevDir.Unitize()
# get directions for the best two candidates
mpA = most_perpendicular[0]
mpB = most_perpendicular[1]
dirA = RhinoLine(thisPt, mpA[1]["geo"]).Direction
dirB = RhinoLine(thisPt, mpB[1]["geo"]).Direction
dirA.Unitize()
dirB.Unitize()
# get normals for angle measurement
normalA = RhinoVector3d.CrossProduct(prevDir, dirA)
normalB = RhinoVector3d.CrossProduct(prevDir, dirB)
# measure the angles
angleA = RhinoVector3d.VectorAngle(prevDir, dirA, normalA)
angleB = RhinoVector3d.VectorAngle(prevDir, dirB, normalB)
# select final candidate for connection
if angleA < angleB:
fCand = mpA
else:
fCand = mpB
# attempt connection to final candidate
res = self.attempt_warp_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand
continue
# CONNECTION FOR MOST PERPENDICULAR -------------------------------
else:
# print info on verbose setting
v_print("Using procedure for most " +
"perpendicular connection...")
# define final candidate node
fCand = most_perpendicular[0]
# attempt connection to final candidate
res = self.attempt_warp_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand
def _create_second_pass_warp_connection(self, source_nodes, source_index,
window, precise=False,
verbose=False, reverse=False):
"""
Private method for creating second pass 'warp' connections for the
given set of contours.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
if len(window) == 0:
# print info on verbose setting
v_print("Length of window is 0, skipping...")
elif len(window) == 1:
# print info on verbose setting
v_print("Window has only one node.")
v_print("Connecting to node {}.".format(window[0][0]))
# connect 'warp' edge
if reverse:
self.create_warp_edge(window[0], source_nodes[source_index])
else:
self.create_warp_edge(source_nodes[source_index], window[0])
else:
# retrive the point of the current source node
thisPt = source_nodes[source_index][1]["geo"]
# print info on verbose setting
v_print("Processing window nodes: {}".format(
[w[0] for w in window]))
# sort nodes in window by distance
if precise:
allDists = [thisPt.DistanceTo(pc[1]["geo"])
for pc in window]
else:
allDists = [thisPt.DistanceToSquared(pc[1]["geo"])
for pc in window]
allDists, window = zip(*sorted(zip(allDists, window),
key=itemgetter(0)))
# get the contours current direction
if source_index < len(source_nodes)-1:
sourceDir = RhinoLine(
thisPt,
source_nodes[source_index+1][1]["geo"]).Direction
elif source_index == len(source_nodes)-1:
sourceDir = RhinoLine(source_nodes[source_index-1][1]["geo"],
thisPt).Direction
sourceDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"] for pc in window]
candidateDirections = [RhinoLine(thisPt, cp).Direction for cp
in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between contour dir and window dir
normals = [RhinoVector3d.CrossProduct(sourceDir, cd)
for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(sourceDir, cd, n) for cd, n
in zip(candidateDirections, normals)]
# compute deltas as a mesaure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort window by distance, then by delta
allDists, deltas, most_perpendicular = zip(*sorted(
zip(allDists,
deltas,
window),
key=itemgetter(0, 1)))
# set final candidate node for connection
fCand = most_perpendicular[0]
# print info on verbose setting
v_print("Connecting to node " +
"{} on segment {}...".format(fCand[0],
fCand[1]["segment"]))
# connect warp edge to best target
if reverse:
self.create_warp_edge(fCand, source_nodes[source_index])
else:
self.create_warp_edge(source_nodes[source_index], fCand)
def create_final_warp_connections(self, max_connections=4,
include_end_nodes=True, precise=False,
verbose=False):
"""
Create the final 'warp' connections by building chains of segment
contour edges and connecting them.
For each source chain, a target chain is found using an
'educated guessing' strategy. This means that the possible target
chains are guessed by leveraging known topology facts about the network
and its special 'end' nodes.
Parameters
----------
max_connections : int, optional
The number of maximum previous connections a candidate node for a
'warp' connection is allowed to have.
Defaults to ``4``.
include_end_nodes : bool, optional
If ``True``, 'end' nodes between adjacent segment contours in a
source chain will be included in the first pass of connecting
'warp' edges.
Defaults to ``True``.
precise : bool
If ``True``, the distance between nodes will be calculated using
the Rhino.Geometry.Point3d.DistanceTo method, otherwise the much
faster Rhino.Geometry.Point3d.DistanceToSquared method is used.
Defaults to ``False``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console. Great for
debugging and analysis.
Defaults to ``False``.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
# get all segment ids, nodes per segment and edges
SegmentValues, AllNodesBySegment, SegmentContourEdges = zip(
*self.all_nodes_by_segment(data=True, edges=True))
# build a dictionary of the segments by their index
SegmentDict = dict(zip(SegmentValues,
zip(SegmentContourEdges, AllNodesBySegment)))
# build source and target chains
source_chains, target_chain_dict = self.mapping_network.build_chains(
False,
True)
# initialize container dict for connected chains
connected_chains = dict()
# initialize segment mapping dictionaries
source_to_target = OrderedDict()
target_to_source = OrderedDict()
source_to_key = dict()
target_to_key = dict()
# ITERATE OVER SOURCE SEGMENT CHAINS ----------------------------------
# loop through all source chains and find targets in target chains
# using an 'educated guess strategy'
for i, source_chain in enumerate(source_chains):
# get the first and last node ('end' nodes)
firstNode = (source_chain[0][0][0],
self.node[source_chain[0][0][0]])
lastNode = (source_chain[0][-1][1],
self.node[source_chain[0][-1][1]])
# get the chain value of the current chain
chain_value = source_chain[1]
# extract the ids of the current chain
current_ids = tuple(source_chain[0])
# extract the current chains geometry
current_chain_geo_list = [SegmentDict[id][0][2]["geo"]
for id in current_ids]
current_chain_geo = RhinoCurve.JoinCurves(
[ccg.ToPolylineCurve() for ccg in current_chain_geo_list])[0]
current_chain_spt = current_chain_geo.PointAtNormalizedLength(0.5)
# retrieve the current segments from the segment dictionary by id
current_segment_nodes = [SegmentDict[id][1] for id in current_ids]
# retrieve the current nodes from the list of current segments
current_nodes = []
for j, csn in enumerate(current_segment_nodes):
if include_end_nodes and j > 0:
current_nodes.append((current_ids[j][0],
self.node[current_ids[j][0]]))
[current_nodes.append(n) for n in csn]
# reset the target key
target_key = None
# print info on verbose setting
v_print("--------------------------------------------------------")
v_print("Processing segment chain {} ...".format(source_chain))
# CASE 1 - ENCLOSED SHORT ROW <====> ALL CASES --------------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]
and key[1] == chain_value[1]
and key not in connected_chains]
if len(possible_target_keys) > 0:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
filtered_target_keys = []
possible_target_chain_dists = []
for j, ptc in enumerate(possible_target_chains):
# retrieve possible target geometry and join into one crv
ptc_geo_list = [SegmentDict[id][0][2]["geo"] for id in ptc]
if ptc_geo_list == current_chain_geo_list:
continue
ptc_geo = RhinoCurve.JoinCurves(
[ptcg.ToPolylineCurve() for ptcg in ptc_geo_list])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the filtered key to the key list
filtered_target_keys.append(possible_target_keys[j])
# append the measured distance to the distance list
possible_target_chain_dists.append(ptc_dist)
if len(filtered_target_keys) > 0:
# sort filtered target keys using the distances
possible_target_chain_dists, filtered_target_keys = zip(
*sorted(zip(
possible_target_chain_dists,
filtered_target_keys),
key=itemgetter(0)))
# set target key
target_key = filtered_target_keys[0]
else:
target_key = None
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((
target_ids[j][0], self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
# print info on verbose setting
v_print("<=====> detected. Connecting to " +
"segment chain {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
# create initial warp connections between the chains
connected_chains[target_key] = True
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
# CASE 2 - SHORT ROW TO THE RIGHT <=====/ ALL CASES ---------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]
and key[1] == chain_value[1]+1
and key not in connected_chains]
if len(possible_target_keys) == 1:
target_key = possible_target_keys[0]
elif len(possible_target_keys) > 1:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
possible_target_chain_dists = []
for ptc in possible_target_chains:
# retrieve possible target geometry and join into one crv
ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc]
ptc_geo = RhinoCurve.JoinCurves([pg.ToPolylineCurve()
for pg in ptc_geo])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the measured distance to the list
possible_target_chain_dists.append(ptc_dist)
# sort possible target keys using the distances
possible_target_chain_dists, possible_target_keys = zip(
*sorted(zip(possible_target_chain_dists,
possible_target_keys),
key=itemgetter(0)))
target_key = possible_target_keys[0]
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((target_ids[j][0],
self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
targetFirstNode = target_ids[0][0]
targetLastNode = target_ids[-1][1]
# check if firstNode and targetFirstNode are connected via a
# 'warp' edge to verify
if (targetFirstNode == firstNode[0]
and targetLastNode in self[lastNode[0]]):
# print info on verbose setting
v_print("<=====/ detected. Connecting " +
"to segment {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
connected_chains[target_key] = True
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
# create initial 'warp' connections between the chains
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
else:
v_print("No real connection for <=====/. Next case...")
# CASE 3 - SHORT ROW TO THE LEFT /====> ALL CASES -----------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]+1
and key[1] == chain_value[1]
and key not in connected_chains]
if len(possible_target_keys) == 1:
target_key = possible_target_keys[0]
elif len(possible_target_keys) > 1:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
possible_target_chain_dists = []
for ptc in possible_target_chains:
# retrieve possible target geometry and join into one crv
ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc]
ptc_geo = RhinoCurve.JoinCurves(
[pg.ToPolylineCurve() for pg in ptc_geo])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the measured distance to the list
possible_target_chain_dists.append(ptc_dist)
# sort possible target keys using the distances
possible_target_chain_dists, possible_target_keys = zip(
*sorted(zip(possible_target_chain_dists,
possible_target_keys),
key=itemgetter(0)))
target_key = possible_target_keys[0]
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((target_ids[j][0],
self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
targetFirstNode = target_ids[0][0]
targetLastNode = target_ids[-1][1]
# check if firstNode and targetFirstNode are connected via a
# 'warp' edge to verify
if (targetFirstNode in self[firstNode[0]]
and targetLastNode == lastNode[0]):
# print info on verbose setting
v_print("/=====> detected. Connecting " +
"to segment {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
connected_chains[target_key] = True
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
else:
v_print("No real connection for /=====>. Next case...")
# CASE 4 - REGULAR ROW /=====/ ALL CASES --------------------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]+1
and key[1] == chain_value[1]+1
and key not in connected_chains]
if len(possible_target_keys) == 1:
target_key = possible_target_keys[0]
elif len(possible_target_keys) > 1:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
possible_target_chain_dists = []
for ptc in possible_target_chains:
# retrieve possible target geometry and join into one crv
ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc]
ptc_geo = RhinoCurve.JoinCurves([pg.ToPolylineCurve()
for pg in ptc_geo])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the measured distance to the list
possible_target_chain_dists.append(ptc_dist)
# sort possible target keys using the distances
possible_target_chain_dists, possible_target_keys = zip(
*sorted(zip(possible_target_chain_dists,
possible_target_keys),
key=itemgetter(0)))
target_key = possible_target_keys[0]
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((target_ids[j][0],
self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
# set target first and last node ('end' nodes)
targetFirstNode = target_ids[0][0]
targetLastNode = target_ids[-1][1]
# check if firstNode and targetFirstNode are connected via a
# 'warp' edge to verify
if (targetFirstNode in self[firstNode[0]]
and targetLastNode in self[lastNode[0]]):
# print info on verbose setting
v_print("/=====/ detected. Connecting " +
"to segment {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
connected_chains[target_key] = True
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
else:
v_print("No real connection for /=====/. No cases match.")
# INVOKE SECOND PASS FOR SOURCE ---> TARGET ---------------------------
for i, current_chain in enumerate(source_to_target):
v_print("--------------------------------------------------------")
v_print("S>T Current Chain: {}".format(current_chain))
# build a list of nodes containing all nodes in the current chain
# including all 'end' nodes
current_chain_nodes = []
for j, ccid in enumerate(current_chain):
current_chain_nodes.append((ccid[0], self.node[ccid[0]]))
[current_chain_nodes.append(n) for n in SegmentDict[ccid][1]]
current_chain_nodes.append((current_chain[-1][1],
self.node[current_chain[-1][1]]))
# retrieve target chain from the source to target mapping
target_chain = source_to_target[current_chain]
cckey = source_to_key[current_chain]
tckey = target_to_key[target_chain]
# build a list of nodes containing all nodes in the target chain
# including all 'end' nodes
target_chain_nodes = []
for j, tcid in enumerate(target_chain):
target_chain_nodes.append((tcid[0], self.node[tcid[0]]))
[target_chain_nodes.append(n) for n in SegmentDict[tcid][1]]
target_chain_nodes.append((target_chain[-1][1],
self.node[target_chain[-1][1]]))
# initialize start of window marker
start_of_window = -1
# loop through all nodes on the current chain
for k, node in enumerate(current_chain_nodes):
# find out if the current node is already principally connected
node_connected = False
# if the node is the first or the last node, it is defined as
# connected per-se
if k == 0 or k == len(current_chain_nodes)-1:
node_connected = True
# find out if the current node is already connected to the
# target chain, get node warp edges and their target nodes
node_warp_edges = self.node_warp_edges(node[0], data=False)
warp_edge_targets = [we[1] for we in node_warp_edges]
# loop over warp edge targets to get the start of the window
for wet in warp_edge_targets:
# loop over target chain nodes
for n, tcn in enumerate(target_chain_nodes):
# if a warp edge target is in the target chain,
# the node is connected and star of window for next
# node is defined
if wet == tcn[0]:
if n > start_of_window or start_of_window == -1:
start_of_window = n
node_connected = True
# if the node is not connected to the target chain, we
# need to find the end of the window
if not node_connected:
v_print("Node: {}".format(node[0]))
v_print("Start of window: {}".format(start_of_window))
# re-check start of window for <.====/ case
if len(target_chain_nodes) >= 2 and start_of_window == -1:
if target_chain_nodes[0] == current_chain_nodes[0]:
start_of_window = 1
else:
start_of_window = 0
end_of_window = None
# loop over target chain nodes
for n, tcn in enumerate(target_chain_nodes):
if n >= start_of_window:
if tcn[0] == current_chain_nodes[-1][0]:
end_of_window = n
# get all warp edges of the current target node
# and their targets
tcn_warp_edges = self.node_warp_edges(tcn[0],
data=False)
tcn_warp_edge_targets = [we[1] for we
in tcn_warp_edges]
# loop over warp edge targets
for twet in tcn_warp_edge_targets:
if (twet in [cn[0] for cn
in current_chain_nodes]):
end_of_window = n
break
if end_of_window and end_of_window > start_of_window:
break
# re-check end of window for /====.> case
if end_of_window:
tcn_we = target_chain_nodes[end_of_window]
ccn_end = current_chain_nodes[-1]
ccn_len = len(current_chain_nodes)
if tcn_we == ccn_end and k == ccn_len-2:
end_of_window -= 1
if end_of_window < start_of_window:
start_of_window = -1
end_of_window = None
# if we have a valid window, set the target nodes
if start_of_window != -1 and end_of_window != None:
if end_of_window == len(target_chain_nodes)-1:
window = target_chain_nodes[start_of_window:]
else:
window = target_chain_nodes[start_of_window:
end_of_window+1]
v_print("End of window: {}".format(end_of_window))
# execute connection to target
if cckey <= tckey:
rev = False
else:
rev = True
v_print("Connecting chain {} to chain {}".format(
cckey,
tckey))
self._create_second_pass_warp_connection(
current_chain_nodes,
k,
window,
precise=precise,
verbose=verbose,
reverse=rev)
else:
# print info on verbose setting
v_print("No valid window for current chain!")
# INVOKE SECOND PASS FOR TARGET ---> SOURCE ---------------------------
for i, current_chain in enumerate(target_to_source):
v_print("--------------------------------------------------------")
v_print("T>S Current Chain: {}".format(current_chain))
# build a list of nodes containing all nodes in the current chain
# including all 'end' nodes
current_chain_nodes = []
for j, ccid in enumerate(current_chain):
current_chain_nodes.append((ccid[0], self.node[ccid[0]]))
[current_chain_nodes.append(n) for n in SegmentDict[ccid][1]]
current_chain_nodes.append((current_chain[-1][1],
self.node[current_chain[-1][1]]))
# retrieve target chain from the source to target mapping
target_chain = target_to_source[current_chain]
cckey = target_to_key[current_chain]
tckey = source_to_key[target_chain]
# build a list of nodes containing all nodes in the target chain
# including all 'end' nodes
target_chain_nodes = []
for j, tcid in enumerate(target_chain):
target_chain_nodes.append((tcid[0], self.node[tcid[0]]))
[target_chain_nodes.append(n) for n in SegmentDict[tcid][1]]
target_chain_nodes.append((target_chain[-1][1],
self.node[target_chain[-1][1]]))
# initialize start of window marker
start_of_window = -1
# loop through all nodes on the current chain
for k, node in enumerate(current_chain_nodes):
# find out if the current node is already principally connected
node_connected = False
if k == 0 or k == len(current_chain_nodes)-1:
node_connected = True
# find out if the current node is already connected to the
# target chain
node_warp_edges = self.node_warp_edges(node[0], data=False)
warp_edge_targets = [we[1] for we in node_warp_edges]
# loop over weft edge targets
for wet in warp_edge_targets:
# if warp edge target is in target chain nodes, node
# is connected and the start of our window for the next
# node
for n, tcn in enumerate(target_chain_nodes):
if wet == tcn[0]:
if n > start_of_window or start_of_window == -1:
start_of_window = n
node_connected = True
# if the node is not connected to the target chain, we
# need to find the end of the window
if not node_connected:
# print info on verbose output
v_print("Node: {}".format(node[0]))
v_print("Start of window: {}".format(start_of_window))
# re-check start of window for <.====/ case
if len(target_chain_nodes) >= 2 and start_of_window == -1:
if target_chain_nodes[0] == current_chain_nodes[0]:
start_of_window = 1
else:
start_of_window = 0
end_of_window = None
# loop over target chain nodes
for n, tcn in enumerate(target_chain_nodes):
if n >= start_of_window:
if tcn[0] == current_chain_nodes[-1][0]:
end_of_window = n
# get all warp edges of the current target node and
# their targets
tcn_warp_edges = self.node_warp_edges(tcn[0],
data=False)
tcn_warp_edge_targets = [we[1] for we
in tcn_warp_edges]
# loop over warp edge targets of current target
# node
for twet in tcn_warp_edge_targets:
# if warp edge target is in current chain,
# it is the end of the window
if (twet in [cn[0] for cn
in current_chain_nodes]):
end_of_window = n
break
if end_of_window and end_of_window > start_of_window:
break
# re-check end of window for /====.> case
if end_of_window:
tcn_we = target_chain_nodes[end_of_window]
ccn_end = current_chain_nodes[-1]
ccn_len = len(current_chain_nodes)
if tcn_we == ccn_end and k == ccn_len-2:
end_of_window -= 1
if end_of_window < start_of_window:
start_of_window = -1
end_of_window = None
# if there is a valid window, set the target chain nodes
if start_of_window != -1 and end_of_window != None:
if end_of_window == len(target_chain_nodes)-1:
window = target_chain_nodes[start_of_window:]
else:
window = target_chain_nodes[start_of_window:
end_of_window+1]
# print info on verbose output
v_print("End of window: {}".format(end_of_window))
# execute connection
if cckey < tckey:
rev = False
else:
rev = True
v_print("Connecting chain {} to chain {}.".format(
cckey,
tckey))
self._create_second_pass_warp_connection(
current_chain_nodes,
k,
window,
precise=precise,
verbose=verbose,
reverse=rev)
else:
v_print("No valid window for current chain!")
# FIND FACES OF NETWORK ---------------------------------------------------
def to_KnitDiNetwork(self):
"""
Constructs and returns a directed KnitDiNetwork based on this network
by duplicating all edges so that [u -> v] and [v -> u] for every
edge [u - v] in this undirected network.
Returns
-------
directed_network : :class:`KnitDiNetwork`
The directed representation of this network.
"""
# create a directed network with duplicate edges in opposing directions
dirnet = KnitDiNetwork()
dirnet.name = self.name
dirnet.add_nodes_from(self)
dirnet.add_edges_from((u, v, data)
for u, nbrs in self.adjacency_iter()
for v, data in nbrs.items())
dirnet.graph = self.graph
dirnet.node = self.node
dirnet.mapping_network = self.mapping_network
return dirnet
def find_cycles(self, mode=-1):
"""
Finds the cycles (faces) of this network by utilizing a wall-follower
mechanism.
Parameters
----------
mode : int, optional
Determines how the neighbors of each node are sorted when finding
cycles for the network.
``-1`` equals to using the world XY plane.
``0`` equals to using a plane normal to the origin nodes closest
point on the reference geometry.
``1`` equals to using a plane normal to the average of the origin
and neighbor nodes' closest points on the reference geometry.
``2`` equals to using an average plane between a plane fit to the
origin and its neighbor nodes and a plane normal to the origin
nodes closest point on the reference geometry.
Defaults to ``-1``.
Warning
-------
Modes other than ``-1`` are only possible if this network has an
underlying reference geometry in form of a Mesh or NurbsSurface. The
reference geometry should be assigned when initializing the network by
assigning the geometry to the "reference_geometry" attribute of the
network.
Notes
-----
Based on an implementation inside the COMPAS framework.
For more info see [16]_.
"""
return self.to_KnitDiNetwork().find_cycles(mode=mode)
def create_mesh(self, mode=-1, max_valence=4):
"""
Constructs a mesh from this network by finding cycles and using them as
mesh faces.
Parameters
----------
mode : int, optional
Determines how the neighbors of each node are sorted when finding
cycles for the network.
``-1`` equals to using the world XY plane.
``0`` equals to using a plane normal to the origin nodes closest
point on the reference geometry.
``1`` equals to using a plane normal to the average of the origin
and neighbor nodes' closest points on the reference geometry.
``2`` equals to using an average plane between a plane fit to the
origin and its neighbor nodes and a plane normal to the origin
nodes closest point on the reference geometry.
Defaults to ``-1``.
max_valence : int, optional
Sets the maximum edge valence of the faces. If this is set to > 4,
n-gon faces (more than 4 edges) are allowed. Otherwise, their
cycles are treated as invalid and will be ignored.
Defaults to ``4``.
Warning
-------
Modes other than ``-1`` are only possible if this network has an
underlying reference geometry in form of a Mesh or NurbsSurface. The
reference geometry should be assigned when initializing the network by
assigning the geometry to the "reference_geometry" attribute of the
network.
"""
return self.to_KnitDiNetwork().create_mesh(mode=mode,
max_valence=max_valence)
# DUALITY -----------------------------------------------------------------
def create_dual(self, mode=-1, merge_adj_creases=False,
mend_trailing_rows=False):
"""
Creates the dual of this KnitNetwork while translating current edge
attributes to the edges of the dual network.
Parameters
----------
mode : int, optional
Determines how the neighbors of each node are sorted when finding
cycles for the network.
``-1`` equals to using the world XY plane.
``0`` equals to using a plane normal to the origin nodes closest
point on the reference geometry.
``1`` equals to using a plane normal to the average of the origin
and neighbor nodes' closest points on the reference geometry.
``2`` equals to using an average plane between a plane fit to the
origin and its neighbor nodes and a plane normal to the origin
nodes closest point on the reference geometry.
Defaults to ``-1``.
merge_adj_creases : bool, optional
If ``True``, will merge adjacent 'increase' and 'decrease' nodes
connected by a 'weft' edge into a single node. This effectively
simplifies the pattern, as a decrease is unneccessary to perform
if an increase is right beside it - both nodes can be replaced by a
single regular node (stitch).
Defaults to ``False``.
mend_trailing_rows : bool, optional
If ``True``, will attempt to mend trailing rows by reconnecting
nodes.
Defaults to ``False``.
Returns
-------
dual_network : :class:`KnitDiNetwork`
The dual network of this KnitNetwork.
Warning
-------
Modes other than -1 (default) are only possible if this network has an
underlying reference geometry in form of a Mesh or NurbsSurface. The
reference geometry should be assigned when initializing the network by
assigning the geometry to the 'reference_geometry' attribute of the
network.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# first find the cycles of this network
cycles = self.find_cycles(mode=mode)
# get node data for all nodes once
node_data = {k: self.node[k] for k in self.nodes_iter()}
# create new directed KnitDiNetwork for dual network
DualNetwork = KnitDiNetwork(
reference_geometry=self.graph["reference_geometry"])
# create mapping dict for edges to adjacent cycles
edge_to_cycle = {(u, v): None for u, v in self.edges_iter()}
edge_to_cycle.update({(v, u): None for u, v in self.edges_iter()})
# CREATE NODES OF DUAL ------------------------------------------------
# for each cycle, find the centroid node
for ckey in sorted(cycles.keys()):
cycle = cycles[ckey]
clen = len(cycle)
# skip invalid cycles (ngons and self-loops)
if clen > 4 or clen < 3:
continue
# loop over cycle edges and fill mapping dicts
closed_cycle = cycle[:]
closed_cycle.append(cycle[0])
for u, v in pairwise(closed_cycle):
edge_to_cycle[(u, v)] = ckey
# get coords of cycle nodes
cycle_coords = [[node_data[k]["x"],
node_data[k]["y"],
node_data[k]["z"]] for k in cycle]
# compute centroid
cx, cy, cz = zip(*cycle_coords)
centroid = [sum(cx) / clen, sum(cy) / clen, sum(cz) / clen]
centroid_pt = RhinoPoint3d(*centroid)
# get node 'leaf' attributes
is_leaf = True in [node_data[k]["leaf"] for k in cycle]
# get node 'color' attributes. only if all colors of the cycle
# match, the color attribute will be set!
colors = [node_data[k]["color"] for k in cycle]
if all(x == colors[0] for x in colors):
cycle_color = colors[0]
else:
cycle_color = None
# add node to dual network
DualNetwork.node_from_point3d(ckey,
centroid_pt,
position=None,
num=None,
leaf=is_leaf,
start=False,
end=False,
segment=None,
increase=False,
decrease=False,
color=cycle_color)
# CREATE EDGES IN DUAL ------------------------------------------------
# loop over original edges and create corresponding edges in dual
for u, v, d in self.edges_iter(data=True):
u, v = self.edge_geometry_direction(u, v)
cycle_a = edge_to_cycle[(u, v)]
cycle_b = edge_to_cycle[(v, u)]
if cycle_a != None and cycle_b != None:
node_a = (cycle_a, DualNetwork.node[cycle_a])
node_b = (cycle_b, DualNetwork.node[cycle_b])
if d["warp"]:
DualNetwork.create_weft_edge(node_b, node_a)
elif d["weft"]:
DualNetwork.create_warp_edge(node_a, node_b)
# SET ATTRIBUTES OF DUAL NODES ----------------------------------------
# loop over all nodes of the network and set crease and end attributes
for node in DualNetwork.nodes_iter():
node_data = DualNetwork.node[node]
warp_in = DualNetwork.node_warp_edges_in(node)
warp_out = DualNetwork.node_warp_edges_out(node)
weft_in = DualNetwork.node_weft_edges_in(node)
weft_out = DualNetwork.node_weft_edges_out(node)
warplen = len(warp_in) + len(warp_out)
weftlen = len(weft_in) + len(weft_out)
# 2 warp edges and 1 weft edge >> end
if warplen == 2 and weftlen == 1:
node_data["end"] = True
if weft_out:
node_data["start"] = True
# 1 warp edge and 1 weft edge >> end and increase / decrease
elif warplen == 1 and weftlen == 1:
node_data["end"] = True
if weft_out:
node_data["start"] = True
if warp_out and not node_data["leaf"]:
node_data["increase"] = True
elif warp_in and not node_data["leaf"]:
node_data["decrease"] = True
# 2 warp edges and 0 weft edges >> end
elif warplen == 2 and weftlen == 0:
node_data["end"] = True
node_data["start"] = True
# 1 warp edge and 0 weft edges >> end
elif warplen == 1 and weftlen == 0:
node_data["end"] = True
node_data["start"] = True
# 0 warp edges and 1 weft edge >> end
elif warplen == 0 and weftlen == 1:
node_data["end"] = True
if weft_out:
node_data["start"] = True
# 1 warp edge and 2 weft edges >> increase or decrease
elif warplen == 1 and weftlen == 2:
if not node_data["leaf"]:
if warp_out:
node_data["increase"] = True
elif warp_in:
node_data["decrease"] = True
# MERGE ADJACENT INCREASES/DECREASES ----------------------------------
if merge_adj_creases:
increase_nodes = [inc for inc in DualNetwork.nodes_iter(data=True)
if inc[1]["increase"]]
for increase, data in increase_nodes:
pred = DualNetwork.predecessors(increase)
suc = DualNetwork.successors(increase)
pred = [p for p in pred if DualNetwork.node[p]["decrease"]]
suc = [s for s in suc if DualNetwork.node[s]["decrease"]]
# merge only with pred or with suc but not both
if (len(pred) == 1 and
DualNetwork.edge[pred[0]][increase]["weft"]):
# merge nodes, edge is pred, increase
pred = pred[0]
pd = DualNetwork.node[pred]
# remove the connecting edge
DualNetwork.remove_edge(pred, increase)
# get the points of the nodes
increase_pt = data["geo"]
pred_pt = pd["geo"]
# compute the new merged point
new_vec = RhinoVector3d(increase_pt - pred_pt)
new_pt = pred_pt + (new_vec * 0.5)
# replace the increase with the new pt and invert the
# increase attribute
data["geo"] = new_pt
data["x"] = new_pt.X
data["y"] = new_pt.Y
data["z"] = new_pt.Z
data["increase"] = False
# edit the edges of the increase
for edge in DualNetwork.edges_iter(increase, data=True):
edge[2]["geo"] = RhinoLine(
data["geo"],
DualNetwork.node[edge[1]]["geo"])
# edit edges of decrease
for edge in DualNetwork.in_edges_iter(pred, data=True):
if edge[2]["warp"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_warp_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
elif edge[2]["weft"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_weft_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
DualNetwork.remove_node(pred)
elif (not pred and len(suc) == 1 and
DualNetwork.edge[increase][suc[0]]["weft"]):
# merge nodes, edge is increase, suc
suc = suc[0]
sd = DualNetwork.node[suc]
# remove the connecting edge
DualNetwork.remove_edge(increase, suc)
# get the points of the nodes
increase_pt = data["geo"]
suc_pt = sd["geo"]
# compute the new merged point
new_vec = RhinoVector3d(suc_pt - increase_pt)
new_pt = increase_pt + (new_vec * 0.5)
# replace the increase with the new pt and invert the
# increase attribute
data["geo"] = new_pt
data["x"] = new_pt.X
data["y"] = new_pt.Y
data["z"] = new_pt.Z
data["increase"] = False
# edit the edges of the increase
for edge in DualNetwork.edges_iter(increase, data=True):
edge[2]["geo"] = RhinoLine(
data["geo"],
DualNetwork.node[edge[1]]["geo"])
for edge in DualNetwork.in_edges_iter(increase, data=True):
edge[2]["geo"] = RhinoLine(
DualNetwork.node[edge[0]]["geo"],
data["geo"])
# edit incoming edges of decrease
for edge in DualNetwork.in_edges_iter(suc, data=True):
if edge[2]["warp"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_warp_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
elif edge[2]["weft"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_weft_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
# edit outgoing edges of decrease
for edge in DualNetwork.edges_iter(suc, data=True):
if edge[2]["warp"]:
fromNode = (increase, data)
toNode = (edge[1], DualNetwork.node[edge[1]])
DualNetwork.create_warp_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
elif edge[2]["weft"]:
fromNode = (increase, data)
toNode = (edge[1], DualNetwork.node[edge[1]])
DualNetwork.create_weft_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
DualNetwork.remove_node(suc)
# ATTEMPT TO MEND TRAILING ROWS ---------------------------------------
if mend_trailing_rows:
# TODO: find a safer / more robust implementation attempt!
errMsg = ("This option is not satisfyingly implemented for this " +
"method, yet. Therefore, it is deactivated for now.")
raise NotImplementedError(errMsg)
# get all nodes which are 'leaf' and 'end' (right side)
# and all nodes which are 'leaf' and 'start' (left side)
trailing = sorted([(n, d) for n, d in
DualNetwork.nodes_iter(data=True)
if d["leaf"]
and d["end"]], key=lambda x: x[0])
trailing_left = deque([t for t in trailing if t[1]["start"]])
trailing_right = deque([t for t in trailing if not t[1]["start"]])
# from the trailing left nodes...
# travel one outgoing 'weft'
# from there travel one incoming 'warp'
# if the resulting node is 'start', 'end' and has 3 edges in total
# >> take its outgoing 'warp' edge (we already traveled that so
# we should already have it)
# >> connect it to the trailing left node
# >> remove the 'leaf' attribute from the trailing node as it is no
# longer trailing
# >> add the 'increase' attribute to the previous target of the
# 'warp' edge
while len(trailing_left) > 0:
# pop an item from the deque
trail = trailing_left.popleft()
# travel one outgoing 'weft' edge
weft_out = DualNetwork.node_weft_edges_out(trail[0], data=True)
if not weft_out:
continue
weft_out = weft_out[0]
# check the target of the 'weft' edge for incoming 'warp'
warp_in = DualNetwork.node_warp_edges_in(
weft_out[1],
data=True)
warp_out = DualNetwork.node_warp_edges_out(
weft_out[1],
data=True)
if not warp_in:
continue
warp_in = warp_in[0]
candidate = (warp_in[0], DualNetwork.node[warp_in[0]])
nce = len(DualNetwork.in_edges(warp_in[0]))
nce += len(DualNetwork.edges(warp_in[0]))
# if this condition holds, we have a trailing increase
if (candidate[1]["start"]
and candidate[1]["end"]
and nce == 3):
# remove found 'warp' edge
DualNetwork.remove_edge(warp_in[0], warp_in[1])
# assign 'increase' attribute to former 'warp' edge target
DualNetwork.node[warp_in[1]]["increase"] = True
# connect candidate to trail with new 'warp' edge
DualNetwork.create_warp_edge(candidate, trail)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
else:
if warp_out:
warp_out = warp_out[0]
candidate = (warp_out[1],
DualNetwork.node[warp_out[1]])
nce = len(DualNetwork.in_edges(warp_out[1]))
nce += len(DualNetwork.edges(warp_out[1]))
# if this condition holds, we have a trailing decrease
if (candidate[1]["start"]
and candidate[1]["end"]
and nce == 3):
# remove found 'warp' edge
DualNetwork.remove_edge(warp_out[0], warp_out[1])
# assign 'decrease' attribute to former 'warp'
# edge source
DualNetwork.node[warp_out[0]]["decrease"] = True
# connect former trail to candidate with new
# 'warp' edge
DualNetwork.create_warp_edge(trail, candidate)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
while len(trailing_right) > 0:
# pop an item from the deque
trail = trailing_right.popleft()
# travel one incoming 'weft' edge
weft_in = DualNetwork.node_weft_edges_in(trail[0], data=True)
if not weft_in:
continue
weft_in = weft_in[0]
# check the target of the 'weft' edge for incoming 'warp'
warp_in = DualNetwork.node_warp_edges_in(weft_in[0],
data=True)
warp_out = DualNetwork.node_warp_edges_out(weft_in[0],
data=True)
if not warp_in:
continue
warp_in = warp_in[0]
candidate = (warp_in[0], DualNetwork.node[warp_in[0]])
nce = len(DualNetwork.in_edges(warp_in[0]))
nce += len(DualNetwork.edges(warp_in[0]))
# if this condition holds, we have a trailing increase
if candidate[1]["end"] and nce == 3:
# remove found 'warp' edge
DualNetwork.remove_edge(warp_in[0], warp_in[1])
# assign 'increase' attribute to former 'warp' edge target
DualNetwork.node[warp_in[1]]["increase"] = True
# connect candidate to trail with new 'warp' edge
DualNetwork.create_warp_edge(candidate, trail)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
else:
if warp_out:
warp_out = warp_out[0]
candidate = (warp_out[1],
DualNetwork.node[warp_out[1]])
nce = len(DualNetwork.in_edges(warp_out[1]))
nce += len(DualNetwork.edges(warp_out[1]))
# if this condition holds, we have a trailing decrease
if (candidate[1]["start"]
and candidate[1]["end"]
and nce == 3):
# remove found 'warp' edge
DualNetwork.remove_edge(warp_out[0], warp_out[1])
# assign 'decrease' attribute to former 'warp'
# edge source
DualNetwork.node[warp_out[0]]["decrease"] = True
# connect former trail to candidate with new
# 'warp' edge
DualNetwork.create_warp_edge(trail, candidate)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
return DualNetwork
# MAIN ------------------------------------------------------------------------
if __name__ == '__main__':
pass
| 45.454681 | 79 | 0.495106 | 150,387 | 0.983211 | 0 | 0 | 4,247 | 0.027766 | 0 | 0 | 52,321 | 0.342068 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.