blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c3d8847ece82de5f4ddb2fa122ea976e7da211e | 2ee3a2b8971118b1a1e8c101382702d698021ad5 | /weather/models.py | 8372f28956277f086c9e5f53ff17faa6a968168c | [] | no_license | manikshahkataria/weather | 29a34264fd281cf26758be06d19dd19bbd226cfc | 1bb5160caab2dc287118ab7ed4a25cf575453ee4 | refs/heads/master | 2022-12-11T07:50:28.988645 | 2019-01-19T10:22:10 | 2019-01-19T10:22:10 | 163,946,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | from django.db import models
class City(models.Model):
name= models.CharField(max_length=25)
def __str__(self):
return self.name
class Meta:
verbose_name_plural='cities'
# Create your models here.
| [
"[email protected]"
] | |
96f4d811c08062451b1b929e346ee171461170de | 18aee5d93a63eab684fe69e3aa0abd1372dd5d08 | /test/legacy_test/test_poisson_op.py | ee66d578014c70395ec3525f8118d2780886458c | [
"Apache-2.0"
] | permissive | Shixiaowei02/Paddle | 8d049f4f29e281de2fb1ffcd143997c88078eadb | 3d4d995f26c48f7792b325806ec3d110fc59f6fc | refs/heads/develop | 2023-06-26T06:25:48.074273 | 2023-06-14T06:40:21 | 2023-06-14T06:40:21 | 174,320,213 | 2 | 1 | Apache-2.0 | 2022-12-28T05:14:30 | 2019-03-07T10:09:34 | C++ | UTF-8 | Python | false | false | 8,481 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unittest
import numpy as np
from eager_op_test import OpTest
import paddle
paddle.enable_static()
paddle.seed(100)
def output_hist(out, lam, a, b):
prob = []
bin = []
for i in range(a, b + 1):
prob.append((lam**i) * math.exp(-lam) / math.factorial(i))
bin.append(i)
bin.append(b + 0.1)
hist, _ = np.histogram(out, bin)
hist = hist.astype("float32")
hist = hist / float(out.size)
return hist, prob
class TestPoissonOp1(OpTest):
def setUp(self):
self.op_type = "poisson"
self.python_api = paddle.tensor.poisson
self.config()
self.attrs = {}
self.inputs = {'X': np.full([2048, 1024], self.lam, dtype=self.dtype)}
self.outputs = {'Out': np.ones([2048, 1024], dtype=self.dtype)}
def config(self):
self.lam = 10
self.a = 5
self.b = 15
self.dtype = "float64"
def verify_output(self, outs):
hist, prob = output_hist(np.array(outs[0]), self.lam, self.a, self.b)
np.testing.assert_allclose(hist, prob, rtol=0.01)
def test_check_output(self):
self.check_output_customized(self.verify_output)
def test_check_grad_normal(self):
self.check_grad(
['X'],
'Out',
user_defined_grads=[np.zeros([2048, 1024], dtype=self.dtype)],
user_defined_grad_outputs=[
np.random.rand(2048, 1024).astype(self.dtype)
],
)
class TestPoissonOp2(TestPoissonOp1):
def config(self):
self.lam = 5
self.a = 1
self.b = 8
self.dtype = "float32"
class TestPoissonAPI(unittest.TestCase):
def test_static(self):
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
x_np = np.random.rand(10, 10)
x = paddle.static.data(name="x", shape=[10, 10], dtype='float64')
y = paddle.poisson(x)
exe = paddle.static.Executor()
y_np = exe.run(
paddle.static.default_main_program(),
feed={"x": x_np},
fetch_list=[y],
)
self.assertTrue(np.min(y_np) >= 0)
def test_dygraph(self):
with paddle.fluid.dygraph.base.guard():
x = paddle.randn([10, 10], dtype='float32')
y = paddle.poisson(x)
self.assertTrue(np.min(y.numpy()) >= 0)
x = paddle.randn([10, 10], dtype='float32')
x.stop_gradient = False
y = paddle.poisson(x)
y.backward()
self.assertTrue(np.min(y.numpy()) >= 0)
np.testing.assert_array_equal(np.zeros_like(x), x.gradient())
def test_fixed_random_number(self):
# Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t'
if not paddle.is_compiled_with_cuda():
return
print("Test Fixed Random number on GPU------>")
paddle.disable_static()
paddle.set_device('gpu')
paddle.seed(2021)
x = paddle.full([32, 3, 1024, 768], 10.0, dtype="float32")
y = paddle.poisson(x)
y_np = y.numpy()
expect = [
13.0,
13.0,
11.0,
8.0,
12.0,
6.0,
9.0,
15.0,
16.0,
6.0,
13.0,
12.0,
9.0,
15.0,
17.0,
8.0,
11.0,
16.0,
11.0,
10.0,
]
np.testing.assert_array_equal(y_np[0, 0, 0, 0:20], expect)
expect = [
15.0,
7.0,
12.0,
8.0,
14.0,
10.0,
10.0,
11.0,
11.0,
11.0,
21.0,
6.0,
9.0,
13.0,
13.0,
11.0,
6.0,
9.0,
12.0,
12.0,
]
np.testing.assert_array_equal(y_np[8, 1, 300, 200:220], expect)
expect = [
10.0,
15.0,
9.0,
6.0,
4.0,
13.0,
10.0,
10.0,
13.0,
12.0,
9.0,
7.0,
10.0,
14.0,
7.0,
10.0,
8.0,
5.0,
10.0,
14.0,
]
np.testing.assert_array_equal(y_np[16, 1, 600, 400:420], expect)
expect = [
10.0,
9.0,
14.0,
12.0,
8.0,
9.0,
7.0,
8.0,
11.0,
10.0,
13.0,
8.0,
12.0,
9.0,
7.0,
8.0,
11.0,
11.0,
12.0,
5.0,
]
np.testing.assert_array_equal(y_np[24, 2, 900, 600:620], expect)
expect = [
15.0,
5.0,
11.0,
13.0,
12.0,
12.0,
13.0,
16.0,
9.0,
9.0,
7.0,
9.0,
13.0,
11.0,
15.0,
6.0,
11.0,
9.0,
10.0,
10.0,
]
np.testing.assert_array_equal(y_np[31, 2, 1023, 748:768], expect)
x = paddle.full([16, 1024, 1024], 5.0, dtype="float32")
y = paddle.poisson(x)
y_np = y.numpy()
expect = [
4.0,
5.0,
2.0,
9.0,
8.0,
7.0,
4.0,
7.0,
4.0,
7.0,
6.0,
3.0,
10.0,
7.0,
5.0,
7.0,
2.0,
5.0,
5.0,
6.0,
]
np.testing.assert_array_equal(y_np[0, 0, 100:120], expect)
expect = [
1.0,
4.0,
8.0,
11.0,
6.0,
5.0,
4.0,
4.0,
7.0,
4.0,
4.0,
7.0,
11.0,
6.0,
5.0,
3.0,
4.0,
6.0,
3.0,
3.0,
]
np.testing.assert_array_equal(y_np[4, 300, 300:320], expect)
expect = [
7.0,
5.0,
4.0,
6.0,
8.0,
5.0,
6.0,
7.0,
7.0,
7.0,
3.0,
10.0,
5.0,
10.0,
4.0,
5.0,
8.0,
7.0,
5.0,
7.0,
]
np.testing.assert_array_equal(y_np[8, 600, 600:620], expect)
expect = [
8.0,
6.0,
7.0,
4.0,
3.0,
0.0,
4.0,
6.0,
6.0,
4.0,
3.0,
10.0,
5.0,
1.0,
3.0,
8.0,
8.0,
2.0,
1.0,
4.0,
]
np.testing.assert_array_equal(y_np[12, 900, 900:920], expect)
expect = [
2.0,
1.0,
14.0,
3.0,
6.0,
5.0,
2.0,
2.0,
6.0,
5.0,
7.0,
4.0,
8.0,
4.0,
8.0,
4.0,
5.0,
7.0,
1.0,
7.0,
]
np.testing.assert_array_equal(y_np[15, 1023, 1000:1020], expect)
paddle.enable_static()
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
5fc93f5180bbbf9d6e8482073bcb89bf2d923892 | 2c68f9156087d6d338373f9737fee1a014e4546b | /src/connectedk8s/azext_connectedk8s/vendored_sdks/models/authentication_details_value.py | 982b4554803e85c978165d7b651f09cd77ff0c69 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | anpaz/azure-cli-extensions | 8b0d4071c49840da9883f13cb0fd1f4515246ee0 | 847fd487fe61e83f2a4163a9393edc9555267bc2 | refs/heads/master | 2023-04-23T17:22:53.427404 | 2021-01-29T17:48:28 | 2021-01-29T18:01:33 | 257,394,204 | 2 | 0 | MIT | 2021-01-28T10:31:07 | 2020-04-20T20:19:43 | Python | UTF-8 | Python | false | false | 890 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AuthenticationDetailsValue(Model):
"""Authentication token value.
:param token: Authentication token.
:type token: str
"""
_attribute_map = {
'token': {'key': 'token', 'type': 'str'},
}
def __init__(self, **kwargs):
super(AuthenticationDetailsValue, self).__init__(**kwargs)
self.token = kwargs.get('token', None)
| [
"[email protected]"
] | |
ab881c94078041feb7fe0fefd3fb0913233feb4b | e715be7aef31a307d2cf09d8a4ecf46ea662826f | /device_simulator/src/orchestator.py | e88831369f171c3e6acd4859ce8da628125314b0 | [] | no_license | GabrielMartinMoran/TFI_UNTREF | 0dcfd0d5b4d69c282ce732a21039c4a69a6530af | e4abc9bc93b840627a008e3af5f4d86b7cd30732 | refs/heads/main | 2023-06-23T11:06:35.138785 | 2021-07-14T13:21:14 | 2021-07-14T13:21:14 | 358,573,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | import time
from datetime import datetime
from src.models.energy_sensor import EnergySensor
from src.models.console_display import ConsoleDisplay
from src.models.measure import Measure
from src.repositories.user_repository import UserRepository
from src.repositories.device_repository import DeviceRepository
import config
class Orchestator:
def __init__(self, device, ref_voltage, ref_current, user_secret):
self.sensor = EnergySensor(ref_voltage, ref_current)
self.display = ConsoleDisplay()
self.device = device
self.user_repository = UserRepository(user_secret)
self.device_repository = DeviceRepository(user_secret)
self.user = self.user_repository.get_user_data()
self.message = ''
def loop(self):
while(True):
measure = Measure(
self.sensor.get_voltage(),
self.sensor.get_current(),
self.__get_timestamp()
)
try:
self.device_repository.add_measure(self.device.ble_id, measure)
self.message = 'Muestra enviada al servidor'
except Exception as e:
self.message = f'Error: {e}'
self.device.set_last_measure(measure)
self.display.set_ui(self.device, self.user, self.message)
self.display.draw()
time.sleep(config.TIME_BETWEEN_MEASUREMENTS)
def __get_timestamp(self):
return int(datetime.now().timestamp())
| [
"[email protected]"
] | |
5b6ae4546dda852369334665c79612273e580227 | 0eaf0d3f0e96a839f2ef37b92d4db5eddf4b5e02 | /past3/e.py | 6a5af7eb3744903d23f04c0de4ad30e373c33a27 | [] | no_license | silphire/atcoder | b7b02798a87048757745d99e8564397d1ca20169 | f214ef92f13bc5d6b290746d5a94e2faad20d8b0 | refs/heads/master | 2023-09-03T17:56:30.885166 | 2023-09-02T14:16:24 | 2023-09-02T14:16:24 | 245,110,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | n, m, q = map(int, input().split())
e = [set() for _ in range(n)]
for i in range(m):
u, v = map(int, input().split())
u -= 1
v -= 1
e[u].add(v)
e[v].add(u)
c = list(map(int, input().split()))
for i in range(q):
s = tuple(map(int, input().split()))
vv = s[1] - 1
print(c[vv])
if s[0] == 1:
for ee in e[vv]:
c[ee] = c[vv]
else:
c[vv] = s[2]
| [
"[email protected]"
] | |
1ee56e00fc1f6518207dde8d7e2c4ad70939ccb7 | 62b90959763f40954a7c6270bfb0529b536b2888 | /user/forms.py | e3f2e1e1a1c2c677d176cbff33084fa0620bcb3a | [
"MIT"
] | permissive | thiagosouzalink/blogphoto_Django | 68698c4fc684f0ba1d9dde795a07f72df32ead38 | 7d09f44b196897c4d31fff2eff8d2a164e44db27 | refs/heads/master | 2023-02-20T20:32:00.527084 | 2021-01-25T15:16:07 | 2021-01-25T15:16:07 | 332,782,817 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,226 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser
class UserForm(forms.ModelForm):
""" Formulário para cadastrar usuário"""
username = forms.CharField(
label='Usuário',
error_messages= {
'invalid': 'Nome de usuário inválido, informe apenas letras, números ou @, ., +, -, _',
'max_length': 'Você excedeu o limite de caracteres.',
'unique': 'Nome de usuário já existe.'
},
help_text= "Requeridos 150 caracteres ou menos. Letras, dígitos e @ /. / + / - / _ apenas",
widget=forms.TextInput(attrs={'placeholder':'Username'})
)
email = forms.EmailField(
label='E-mail',
error_messages={'invalid': 'E-mail inválido.'},
help_text='[email protected]',
widget=forms.TextInput(attrs={'placeholder':'E-mail'})
)
first_name = forms.CharField(
label='Nome',
error_messages={'max_length': 'Nome não pode ter mais de 30 caracteres'},
widget=forms.TextInput(attrs={'placeholder':'Nome'})
)
last_name = forms.CharField(
label='Sobrenome',
error_messages={'max_length': 'Sobrenome não pode ter mais de 150 caracteres'},
widget=forms.TextInput(attrs={'placeholder':'Sobrenome'})
)
telefone = forms.CharField(
label='Telefone',
help_text='(xx) xxxxx-xxxx',
widget=forms.TextInput(attrs={'placeholder':'Telefone...'})
)
password = forms.CharField(
label='Senha',
help_text="Digite uma senha segura",
widget=forms.PasswordInput(attrs={'placeholder':'Senha'})
)
password2 = forms.CharField(
label='Confirmar senha',
widget=forms.PasswordInput(attrs={'placeholder':'Repetir senha'})
)
class Meta:
model = CustomUser
fields = (
'username',
'email',
'first_name',
'last_name',
'telefone'
)
def clean_password2(self):
passwords = self.cleaned_data
if passwords['password2'] != passwords['password']:
raise forms.ValidationError("Senhas diferentes")
return passwords['password2']
def save(self, commit=True):
user = CustomUser.objects.create_user(
username=self.cleaned_data['username'],
email=self.cleaned_data['email'],
password=self.cleaned_data['password'],
first_name=self.cleaned_data['first_name'],
last_name=self.cleaned_data['last_name'],
telefone=self.cleaned_data['telefone']
)
return user
class UserProfileForm(forms.ModelForm):
""" Formulário para atualizar dados do usuário"""
facebook = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'https://www.facebook.com/seu_username'}), required=False)
instagram = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'https://www.instagram.com/seu_username'}), required=False)
twitter = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'https://www.twitter.com/seu_username'}), required=False)
class Meta:
model = CustomUser
fields = fields = (
'username',
'email',
'first_name',
'last_name',
'telefone',
'facebook',
'instagram',
'twitter',
'bio'
)
class CustomUserCreateForm(UserCreationForm):
""" Formulário para criar usuário no painel administrativo"""
class Meta:
model = CustomUser
fields = ('first_name', 'last_name', 'telefone')
labels = {'username': 'Username/E-mail'}
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
user.username = self.cleaned_data["username"]
if commit:
user.save()
return user
class CustomUserChangeForm(UserChangeForm):
""" Atualizar usuário no painel administrativo"""
class Meta:
model = CustomUser
fields = ('email', 'first_name', 'last_name', 'telefone')
| [
"[email protected]"
] | |
9378b601770bd4c71b6a616ad9a7a895ad48a7b2 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5658571765186560_1/Python/StefanPochmann/D.py | feca86f2ed13ba681c7c3230c60ce03f3e2c21f7 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | #f = open('D.in')
#def input():
# return next(f)
T = int(input())
for x in range(1, T + 1):
X, R, C = map(int, input().split())
A = R * C
s, S = sorted((R, C))
gabriel = A % X == 0 and \
(X == 1 or
X == 2 or
X == 3 and s >= 2 or
X == 4 and s >= 3 or
X == 5 and s >= 3 and A > 15 or
X == 6 and s >= 4)
print('Case #{}: {}'.format(x, 'GABRIEL' if gabriel else 'RICHARD'))
| [
"[email protected]"
] | |
37dafa17ed9dc319a258358248dd28b2bbf33390 | ee8c4c954b7c1711899b6d2527bdb12b5c79c9be | /assessment2/amazon/run/core/controllers/old.py | b834b8f7e61106d74a5d8d14bccffde5155b3848 | [] | no_license | sqlconsult/byte | 02ac9899aebea4475614969b594bfe2992ffe29a | 548f6cb5038e927b54adca29caf02c981fdcecfc | refs/heads/master | 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | #!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('old', __name__, url_prefix='/old')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
| [
"[email protected]"
] | |
21d962029b74b4eafe0c5b512082596bdf3800f2 | 95e7cf518b8d71270a7de6e7c7254861010f5035 | /garage/tf/algos/batch_polopt.py | 4245380d53581a9a7d6e72637049760557283eaf | [
"MIT"
] | permissive | reslthrowaway/garage | aaeadf7e918d80d467b2fcce61c50e8404480f83 | e921119434d205b6f644f139f6075516fb9ece74 | refs/heads/master | 2020-03-28T08:32:58.835060 | 2018-09-08T21:55:41 | 2018-09-08T21:55:41 | 147,972,769 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,354 | py | import time
import tensorflow as tf
from garage.algos import RLAlgorithm
import garage.misc.logger as logger
from garage.tf.plotter import Plotter
from garage.tf.samplers import BatchSampler
from garage.tf.samplers import VectorizedSampler
class BatchPolopt(RLAlgorithm):
"""
Base class for batch sampling-based policy optimization methods.
This includes various policy gradient methods like vpg, npg, ppo, trpo,
etc.
"""
def __init__(self,
env,
policy,
baseline,
scope=None,
n_itr=500,
start_itr=0,
batch_size=5000,
max_path_length=500,
discount=0.99,
gae_lambda=1,
plot=False,
pause_for_plot=False,
center_adv=True,
positive_adv=False,
store_paths=False,
whole_paths=True,
fixed_horizon=False,
sampler_cls=None,
sampler_args=None,
force_batch_sampler=False,
**kwargs):
"""
:param env: Environment
:param policy: Policy
:type policy: Policy
:param baseline: Baseline
:param scope: Scope for identifying the algorithm. Must be specified if
running multiple algorithms
simultaneously, each using different environments and policies
:param n_itr: Number of iterations.
:param start_itr: Starting iteration.
:param batch_size: Number of samples per iteration.
:param max_path_length: Maximum length of a single rollout.
:param discount: Discount.
:param gae_lambda: Lambda used for generalized advantage estimation.
:param plot: Plot evaluation run after each iteration.
:param pause_for_plot: Whether to pause before contiuing when plotting.
:param center_adv: Whether to rescale the advantages so that they have
mean 0 and standard deviation 1.
:param positive_adv: Whether to shift the advantages so that they are
always positive. When used in conjunction with center_adv the
advantages will be standardized before shifting.
:param store_paths: Whether to save all paths data to the snapshot.
:return:
"""
self.env = env
self.policy = policy
self.baseline = baseline
self.scope = scope
self.n_itr = n_itr
self.start_itr = start_itr
self.batch_size = batch_size
self.max_path_length = max_path_length
self.discount = discount
self.gae_lambda = gae_lambda
self.plot = plot
self.pause_for_plot = pause_for_plot
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.whole_paths = whole_paths
self.fixed_horizon = fixed_horizon
if sampler_cls is None:
if self.policy.vectorized and not force_batch_sampler:
sampler_cls = VectorizedSampler
else:
sampler_cls = BatchSampler
if sampler_args is None:
sampler_args = dict()
self.sampler = sampler_cls(self, **sampler_args)
self.init_opt()
def start_worker(self, sess):
self.sampler.start_worker()
if self.plot:
self.plotter = Plotter(self.env, self.policy, sess)
self.plotter.start()
def shutdown_worker(self):
self.sampler.shutdown_worker()
if self.plot:
self.plotter.shutdown()
def obtain_samples(self, itr):
return self.sampler.obtain_samples(itr)
def process_samples(self, itr, paths):
return self.sampler.process_samples(itr, paths)
def train(self, sess=None):
created_session = True if (sess is None) else False
if sess is None:
sess = tf.Session()
sess.__enter__()
sess.run(tf.global_variables_initializer())
self.start_worker(sess)
start_time = time.time()
last_average_return = None
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
with logger.prefix('itr #%d | ' % itr):
logger.log("Obtaining samples...")
paths = self.obtain_samples(itr)
logger.log("Processing samples...")
samples_data = self.process_samples(itr, paths)
last_average_return = samples_data["average_return"]
logger.log("Logging diagnostics...")
self.log_diagnostics(paths)
logger.log("Optimizing policy...")
self.optimize_policy(itr, samples_data)
logger.log("Saving snapshot...")
params = self.get_itr_snapshot(itr, samples_data)
if self.store_paths:
params["paths"] = samples_data["paths"]
logger.save_itr_params(itr, params)
logger.log("Saved")
logger.record_tabular('Time', time.time() - start_time)
logger.record_tabular('ItrTime', time.time() - itr_start_time)
logger.dump_tabular(with_prefix=False)
if self.plot:
self.plotter.update_plot(self.policy, self.max_path_length)
if self.pause_for_plot:
input("Plotting evaluation run: Press Enter to "
"continue...")
self.shutdown_worker()
if created_session:
sess.close()
return last_average_return
def log_diagnostics(self, paths):
self.policy.log_diagnostics(paths)
self.baseline.log_diagnostics(paths)
def init_opt(self):
"""
Initialize the optimization procedure. If using tensorflow, this may
include declaring all the variables and compiling functions
"""
raise NotImplementedError
def get_itr_snapshot(self, itr, samples_data):
"""
Returns all the data that should be saved in the snapshot for this
iteration.
"""
raise NotImplementedError
def optimize_policy(self, itr, samples_data):
raise NotImplementedError
| [
"[email protected]"
] | |
64f1c7bd8f0f8bab932d8e95efb828f317b84145 | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/438806_catenateFilesFactory/recipe-438806.py | 59a8c77281e6b148e80c4e006fc5987455451ecf | [
"Python-2.0",
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 2,449 | py | import os
def catenateFilesFactory(isTextFiles=True, isClearTgt=True, isCreateTgt=True):
"""return a catenateFiles function parameterized by the factory arguments.
isTextFiles: Catenate text files. If the last line of a non-empty file
is not terminated by an EOL, append an EOL to it.
isClearTgt If the target file already exists, clear its original
contents before appending the source files.
isCreateTgt If the target file does not already exist, and this
parameter is True, create the target file; otherwise raise
an IOError.
"""
eol = os.linesep
lenEol = len(eol)
def catenateFiles(tgtFile, *srcFiles):
isTgtAppendEol = False
if os.path.isfile(tgtFile):
if isClearTgt:
tgt = open(tgtFile, 'wb')
tgt.close()
elif isTextFiles:
tgt = open(tgtFile, 'rb')
data = tgt.read()
tgt.close()
if len(data) and (len(data) < lenEol or data[-lenEol:] != eol):
isTgtAppendEol = True
elif not isCreateTgt:
raise IOError, "catenateFiles target file '%s' not found" % (
tgtFile)
tgt = open(tgtFile, 'ab')
if isTgtAppendEol:
tgt.write(eol)
for srcFile in srcFiles:
src = open(srcFile, 'rb')
data = src.read()
src.close()
tgt.write(data)
if (isTextFiles and len(data) and
(len(data) < lenEol or data[-lenEol:] != eol)):
tgt.write(eol)
tgt.close()
return
# Support reflection and doc string.
catenateFiles.isTextFiles = isTextFiles
catenateFiles.isClearTgt = isClearTgt
catenateFiles.isCreateTgt = isCreateTgt
if isTextFiles:
docFileType = "text"
else:
docFileType = "binary"
if isCreateTgt:
docCreate = "Create tgtFile if it does not already exist."
else:
docCreate = "Require that tgtFile already exists."
if isClearTgt:
docClear = "replace"
else:
docClear = "append to"
catenateFiles.__doc__ = """Catenate %s srcFiles to %s the tgtFile.
%s
All of the srcFiles must exist; otherwise raise an IOError.
""" % (docFileType, docClear, docCreate)
return catenateFiles
| [
"[email protected]"
] | |
8402be75cce1ddbd62ff54e6ca1af746d547ba7e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04044/s043863854.py | 77675a21e84977960d992326c6742602cc68d034 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | N, L = map(int,input().split())
word = []
count = 0
while N > count:
S = input()
word.append(S)
count += 1
word = sorted(word)
ans = ''.join(word)
print(ans) | [
"[email protected]"
] | |
5f2b378d006e7aa2e46251661e0d4e03d3b9810f | d452e34253561a47b974e260dabd8dcda6e750a2 | /supervised_learning/0x0B-face_verification/5-main.py | 0859b3e7ecf4dc518afbab30ba555f77a521f265 | [] | no_license | JohnCook17/holbertonschool-machine_learning | 57fcb5b9d351826c3e3d5478b3b4fbe16cdfac9f | 4200798bdbbe828db94e5585b62a595e3a96c3e6 | refs/heads/master | 2021-07-07T10:16:21.583107 | 2021-04-11T20:38:33 | 2021-04-11T20:38:33 | 255,424,823 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | #!/usr/bin/env python3
from align import FaceAlign
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import numpy as np
fa = FaceAlign('models/landmarks.dat')
test_img = mpimg.imread('HBTN/KirenSrinivasan.jpg')
anchors = np.array([[0.194157, 0.16926692], [0.7888591, 0.15817115], [0.4949509, 0.5144414]], dtype=np.float32)
aligned = fa.align(test_img, np.array([36, 45, 33]), anchors, 96)
plt.imshow(aligned)
ax = plt.gca()
for anchor in anchors:
ax.add_patch(Circle(anchor * 96, 1))
plt.show()
| [
"[email protected]"
] | |
7508ed13cb989f8e06150d4a366684e8cb626f4c | 890c8b8e90e516a5a3880eca9b2d217662fe7d84 | /armulator/armv6/opcodes/abstract_opcodes/usad8.py | 6568222b03d6464463dd16b171bf86a89484d155 | [
"MIT"
] | permissive | doronz88/armulator | b864135996f876c7857b79a314d4aa06cc19c549 | 0294feac2785c8947e5943ac0c34f941ee4b5fff | refs/heads/master | 2022-11-05T08:14:42.405335 | 2020-06-18T23:53:17 | 2020-06-18T23:53:17 | 273,363,061 | 2 | 0 | null | 2020-06-18T23:51:03 | 2020-06-18T23:51:02 | null | UTF-8 | Python | false | false | 1,008 | py | from armulator.armv6.opcodes.abstract_opcode import AbstractOpcode
from bitstring import BitArray
class Usad8(AbstractOpcode):
def __init__(self, m, d, n):
super(Usad8, self).__init__()
self.m = m
self.d = d
self.n = n
def execute(self, processor):
if processor.condition_passed():
absdiff1 = abs(
processor.registers.get(self.n)[24:32].uint - processor.registers.get(self.m)[24:32].uint)
absdiff2 = abs(
processor.registers.get(self.n)[16:24].uint - processor.registers.get(self.m)[16:24].uint)
absdiff3 = abs(
processor.registers.get(self.n)[8:16].uint - processor.registers.get(self.m)[8:16].uint)
absdiff4 = abs(
processor.registers.get(self.n)[0:8].uint - processor.registers.get(self.m)[0:8].uint)
result = absdiff1 + absdiff2 + absdiff3 + absdiff4
processor.registers.set(self.d, BitArray(uint=result, length=32))
| [
"[email protected]"
] | |
1a27b7cb18413d5522bf3d1a3fb9298b4be330c4 | 6810a482759afd585db7bb0b85fd0416f0450e6d | /Open Kattis/sibice.py | f2d6c7c2fb3679fd80f435ba5541e145a8be4611 | [] | no_license | BenRStutzman/kattis | 01b000ac2353c8b8000c6bddec3698f66b0198ef | 005720f853e7f531a264227d0d9aaa19d4d7cf1b | refs/heads/master | 2020-07-15T23:52:45.785021 | 2019-11-09T03:28:06 | 2019-11-09T03:28:06 | 205,675,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | import sys
import math
[num, box_length, box_width] = [int(num) for num in sys.stdin.readline().split()]
diag = box_length**2 + box_width**2
for i in range(num):
match_length = int(sys.stdin.readline())
if match_length**2 <= diag:
print("DA")
else:
print("NE")
| [
"[email protected]"
] | |
458af30fcbf8a51195ccfe3366dbf2663797500d | 993ef8924418866f932396a58e3ad0c2a940ddd3 | /Production/python/PrivateSamples/EMJ_UL16APV_mMed-2500_mDark-20_ctau-5_unflavored-down_cff.py | a1176e87cc87095c340aa9cf5cc79a9acb526034 | [] | no_license | TreeMaker/TreeMaker | 48d81f6c95a17828dbb599d29c15137cd6ef009a | 15dd7fe9e9e6f97d9e52614c900c27d200a6c45f | refs/heads/Run2_UL | 2023-07-07T15:04:56.672709 | 2023-07-03T16:43:17 | 2023-07-03T16:43:17 | 29,192,343 | 16 | 92 | null | 2023-07-03T16:43:28 | 2015-01-13T13:59:30 | Python | UTF-8 | Python | false | false | 1,991 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-1.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-10.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-2.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-3.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-4.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-5.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-6.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-7.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-8.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16APV/step4_MINIAODv2_mMed-2500_mDark-20_ctau-5_unflavored-down_n-500_part-9.root',
] )
| [
"[email protected]"
] | |
a27806e252e67dc407b440d4781e0d23bf86fc34 | f3827ae39e077daf5507959a13d1ac4a782fe084 | /src/accounts/urls.py | c29e329060e8daed823e775738b48945589f62da | [] | no_license | achiengcindy/ExtendingDjangoAuth | c6bc2c5360d90378d7d96efb3132506ad10349d9 | 19214ef7ef9ccdcc66e4ec15fa9e22e5fd5e24f3 | refs/heads/master | 2020-03-26T20:09:34.687774 | 2018-08-28T19:32:46 | 2018-08-28T19:32:46 | 145,308,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | from django.urls import path
from .views import register, home, edit, activate, account_activation_sent
urlpatterns = [
path('', home, name='home'),
path('register/', register, name='register'),
path('edit/', edit, name='edit'),
path('activate/<slug:uidb64>/<slug:token>)/', activate, name='activate'),
path('account_activation_sent/', account_activation_sent, name='account_activation_sent')
]
| [
"[email protected]"
] | |
702fb4c5e14061b0357f79b59aab9e34ee74c5ae | e708ae6207ca775fe6f5106ea971c994b29ffac0 | /examples/shrec17/dataset.py | a5c07eee3fcfc1e87c9f3610656ea664095326d1 | [
"MIT"
] | permissive | arewellborn/s2cnn | 6f4304cd22456bfaefe7623e53ac7b8afda22471 | fd734b845f1d313ce13a6f1161c8fa383b28a9e5 | refs/heads/master | 2021-05-23T14:24:31.276613 | 2021-01-20T04:37:53 | 2021-01-20T04:37:53 | 253,338,038 | 0 | 0 | MIT | 2020-04-05T21:43:51 | 2020-04-05T21:43:51 | null | UTF-8 | Python | false | false | 11,432 | py | # pylint: disable=E1101,R,C
import csv
import glob
import os
import re
import numpy as np
import torch
import torch.utils.data
import trimesh
import logging
logging.getLogger('pyembree').disabled = True
def rotmat(a, b, c, hom_coord=False): # apply to mesh using mesh.apply_transform(rotmat(a,b,c, True))
"""
Create a rotation matrix with an optional fourth homogeneous coordinate
:param a, b, c: ZYZ-Euler angles
"""
def z(a):
return np.array([[np.cos(a), np.sin(a), 0, 0],
[-np.sin(a), np.cos(a), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def y(a):
return np.array([[np.cos(a), 0, np.sin(a), 0],
[0, 1, 0, 0],
[-np.sin(a), 0, np.cos(a), 0],
[0, 0, 0, 1]])
r = z(a).dot(y(b)).dot(z(c)) # pylint: disable=E1101
if hom_coord:
return r
else:
return r[:3, :3]
def make_sgrid(b, alpha, beta, gamma):
from lie_learn.spaces import S2
theta, phi = S2.meshgrid(b=b, grid_type='SOFT')
sgrid = S2.change_coordinates(np.c_[theta[..., None], phi[..., None]], p_from='S', p_to='C')
sgrid = sgrid.reshape((-1, 3))
R = rotmat(alpha, beta, gamma, hom_coord=False)
sgrid = np.einsum('ij,nj->ni', R, sgrid)
return sgrid
def render_model(mesh, sgrid):
# Cast rays
# triangle_indices = mesh.ray.intersects_first(ray_origins=sgrid, ray_directions=-sgrid)
index_tri, index_ray, loc = mesh.ray.intersects_id(
ray_origins=sgrid, ray_directions=-sgrid, multiple_hits=False, return_locations=True)
loc = loc.reshape((-1, 3)) # fix bug if loc is empty
# Each ray is in 1-to-1 correspondence with a grid point. Find the position of these points
grid_hits = sgrid[index_ray]
grid_hits_normalized = grid_hits / np.linalg.norm(grid_hits, axis=1, keepdims=True)
# Compute the distance from the grid points to the intersection pionts
dist = np.linalg.norm(grid_hits - loc, axis=-1)
# For each intersection, look up the normal of the triangle that was hit
normals = mesh.face_normals[index_tri]
normalized_normals = normals / np.linalg.norm(normals, axis=1, keepdims=True)
# Construct spherical images
dist_im = np.ones(sgrid.shape[0])
dist_im[index_ray] = dist
# dist_im = dist_im.reshape(theta.shape)
# shaded_im = np.zeros(sgrid.shape[0])
# shaded_im[index_ray] = normals.dot(light_dir)
# shaded_im = shaded_im.reshape(theta.shape) + 0.4
n_dot_ray_im = np.zeros(sgrid.shape[0])
# n_dot_ray_im[index_ray] = np.abs(np.einsum("ij,ij->i", normals, grid_hits_normalized))
n_dot_ray_im[index_ray] = np.einsum("ij,ij->i", normalized_normals, grid_hits_normalized)
nx, ny, nz = normalized_normals[:, 0], normalized_normals[:, 1], normalized_normals[:, 2]
gx, gy, gz = grid_hits_normalized[:, 0], grid_hits_normalized[:, 1], grid_hits_normalized[:, 2]
wedge_norm = np.sqrt((nx * gy - ny * gx) ** 2 + (nx * gz - nz * gx) ** 2 + (ny * gz - nz * gy) ** 2)
n_wedge_ray_im = np.zeros(sgrid.shape[0])
n_wedge_ray_im[index_ray] = wedge_norm
# Combine channels to construct final image
# im = dist_im.reshape((1,) + dist_im.shape)
im = np.stack((dist_im, n_dot_ray_im, n_wedge_ray_im), axis=0)
return im
def rnd_rot():
a = np.random.rand() * 2 * np.pi
z = np.random.rand() * 2 - 1
c = np.random.rand() * 2 * np.pi
rot = rotmat(a, np.arccos(z), c, True)
return rot
class ToMesh:
def __init__(self, random_rotations=False, random_translation=0):
self.rot = random_rotations
self.tr = random_translation
def __call__(self, path):
mesh = trimesh.load_mesh(path)
mesh.remove_degenerate_faces()
mesh.fix_normals()
mesh.fill_holes()
mesh.remove_duplicate_faces()
mesh.remove_infinite_values()
mesh.remove_unreferenced_vertices()
mesh.apply_translation(-mesh.centroid)
r = np.max(np.linalg.norm(mesh.vertices, axis=-1))
mesh.apply_scale(1 / r)
if self.tr > 0:
tr = np.random.rand() * self.tr
rot = rnd_rot()
mesh.apply_transform(rot)
mesh.apply_translation([tr, 0, 0])
if not self.rot:
mesh.apply_transform(rot.T)
if self.rot:
mesh.apply_transform(rnd_rot())
r = np.max(np.linalg.norm(mesh.vertices, axis=-1))
mesh.apply_scale(0.99 / r)
return mesh
def __repr__(self):
return self.__class__.__name__ + '(rotation={0}, translation={1})'.format(self.rot, self.tr)
class ProjectOnSphere:
def __init__(self, bandwidth):
self.bandwidth = bandwidth
self.sgrid = make_sgrid(bandwidth, alpha=0, beta=0, gamma=0)
def __call__(self, mesh):
im = render_model(mesh, self.sgrid)
im = im.reshape(3, 2 * self.bandwidth, 2 * self.bandwidth)
from scipy.spatial.qhull import QhullError # pylint: disable=E0611
try:
convex_hull = mesh.convex_hull
except QhullError:
convex_hull = mesh
hull_im = render_model(convex_hull, self.sgrid)
hull_im = hull_im.reshape(3, 2 * self.bandwidth, 2 * self.bandwidth)
im = np.concatenate([im, hull_im], axis=0)
assert len(im) == 6
im[0] -= 0.75
im[0] /= 0.26
im[1] -= 0.59
im[1] /= 0.50
im[2] -= 0.54
im[2] /= 0.29
im[3] -= 0.52
im[3] /= 0.19
im[4] -= 0.80
im[4] /= 0.18
im[5] -= 0.51
im[5] /= 0.25
im = im.astype(np.float32) # pylint: disable=E1101
return im
def __repr__(self):
return self.__class__.__name__ + '(bandwidth={0})'.format(self.bandwidth)
class CacheNPY:
def __init__(self, prefix, repeat, transform, pick_randomly=True):
self.transform = transform
self.prefix = prefix
self.repeat = repeat
self.pick_randomly = pick_randomly
def check_trans(self, file_path):
print("transform {}...".format(file_path))
try:
return self.transform(file_path)
except:
print("Exception during transform of {}".format(file_path))
raise
def __call__(self, file_path):
head, tail = os.path.split(file_path)
root, _ = os.path.splitext(tail)
npy_path = os.path.join(head, self.prefix + root + '_{0}.npy')
exists = [os.path.exists(npy_path.format(i)) for i in range(self.repeat)]
if self.pick_randomly and all(exists):
i = np.random.randint(self.repeat)
try: return np.load(npy_path.format(i))
except OSError: exists[i] = False
if self.pick_randomly:
img = self.check_trans(file_path)
np.save(npy_path.format(exists.index(False)), img)
return img
output = []
for i in range(self.repeat):
try:
img = np.load(npy_path.format(i))
except (OSError, FileNotFoundError):
img = self.check_trans(file_path)
np.save(npy_path.format(i), img)
output.append(img)
return output
def __repr__(self):
return self.__class__.__name__ + '(prefix={0}, transform={1})'.format(self.prefix, self.transform)
class Shrec17(torch.utils.data.Dataset):
'''
Download SHREC17 and output valid obj files content
'''
url_data = 'http://3dvision.princeton.edu/ms/shrec17-data/{}.zip'
url_label = 'http://3dvision.princeton.edu/ms/shrec17-data/{}.csv'
def __init__(self, root, dataset, perturbed=True, download=False, transform=None, target_transform=None):
self.root = os.path.expanduser(root)
if not dataset in ["train", "test", "val"]:
raise ValueError("Invalid dataset")
self.dir = os.path.join(self.root, dataset + ("_perturbed" if perturbed else ""))
self.transform = transform
self.target_transform = target_transform
if download:
self.download(dataset, perturbed)
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
self.files = sorted(glob.glob(os.path.join(self.dir, '*.obj')))
if dataset != "test":
with open(os.path.join(self.root, dataset + ".csv"), 'rt') as f:
reader = csv.reader(f)
self.labels = {}
for row in [x for x in reader][1:]:
self.labels[row[0]] = (row[1], row[2])
else:
self.labels = None
def __getitem__(self, index):
img = f = self.files[index]
if self.transform is not None:
img = self.transform(img)
if self.labels is not None:
i = os.path.splitext(os.path.basename(f))[0]
target = self.labels[i]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
else:
return img
def __len__(self):
return len(self.files)
def _check_exists(self):
files = glob.glob(os.path.join(self.dir, "*.obj"))
return len(files) > 0
def _download(self, url):
import requests
filename = url.split('/')[-1]
file_path = os.path.join(self.root, filename)
if os.path.exists(file_path):
return file_path
print('Downloading ' + url)
r = requests.get(url, stream=True)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=16 * 1024 ** 2):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return file_path
def _unzip(self, file_path):
import zipfile
if os.path.exists(self.dir):
return
print('Unzip ' + file_path)
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall(self.root)
zip_ref.close()
os.unlink(file_path)
def _fix(self):
print("Fix obj files")
r = re.compile(r'f (\d+)[/\d]* (\d+)[/\d]* (\d+)[/\d]*')
path = os.path.join(self.dir, "*.obj")
files = sorted(glob.glob(path))
c = 0
for i, f in enumerate(files):
with open(f, "rt") as x:
y = x.read()
yy = r.sub(r"f \1 \2 \3", y)
if y != yy:
c += 1
with open(f, "wt") as x:
x.write(yy)
print("{}/{} {} fixed ".format(i + 1, len(files), c), end="\r")
def download(self, dataset, perturbed):
if self._check_exists():
return
# download files
try:
os.makedirs(self.root)
except OSError as e:
if e.errno == os.errno.EEXIST:
pass
else:
raise
url = self.url_data.format(dataset + ("_perturbed" if perturbed else ""))
file_path = self._download(url)
self._unzip(file_path)
self._fix()
if dataset != "test":
url = self.url_label.format(dataset)
self._download(url)
print('Done!')
| [
"[email protected]"
] | |
9d1544e03e6517060106ba3d8555c94351c4e3c9 | b5fb45288ed2a204692051ab78e72d8aa6e5accd | /argo_data_scripts/util/count_concurrent.py | d13a0c91034e0cdcecb1c9531b3779cf08de3da0 | [
"Apache-2.0"
] | permissive | nithinksath96/MMdetection_TensorRT_FP16 | d4987f003798f5d6d4fe5bde2f30dd5ee2e8596d | c8379b209d4deeff9350baf5bbedfc95fb8941f4 | refs/heads/master | 2023-02-13T20:00:21.834541 | 2021-01-06T09:24:20 | 2021-01-06T09:24:20 | 327,260,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,792 | py | # given rt_ series input, count the max number of concurrent jobs
# current implementation only applies to inf results, where processing starts immediately
import argparse, json, pickle
from os.path import join, isfile
from tqdm import tqdm
import numpy as np
from pycocotools.coco import COCO
import sys; sys.path.insert(0, '..'); sys.path.insert(0, '.')
from util import mkdir2
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--annot-path', type=str, required=True)
parser.add_argument('--fps', type=float, default=30)
parser.add_argument('--result-dir', type=str, required=True)
parser.add_argument('--out-dir', type=str, default=None)
parser.add_argument('--type', type=str, default='det')
parser.add_argument('--overwrite', action='store_true', default=False)
opts = parser.parse_args()
return opts
def main():
opts = parse_args()
out_dir = mkdir2(opts.out_dir) if opts.out_dir else opts.result_dir
db = COCO(opts.annot_path)
seqs = db.dataset['sequences']
seq_dirs = db.dataset['seq_dirs']
n_concurrent = []
for sid, seq in enumerate(tqdm(seqs)):
frame_list = [img for img in db.imgs.values() if img['sid'] == sid]
results = pickle.load(open(join(opts.result_dir, seq + '.pkl'), 'rb'))
# use raw results when possible in case we change class subset during evaluation
if opts.type == 'det':
timestamps = results['timestamps']
input_fidx = results['input_fidx']
else:
det1_timestamps = results['det1_timestamps']
det2_timestamps = results['det2_timestamps']
det1_input_fidx = results['det1_input_fidx']
det2_input_fidx = results['det2_input_fidx']
timestamps = np.concatenate((det1_timestamps, det2_timestamps))
input_fidx = np.concatenate((det1_input_fidx, det2_input_fidx))
t_start = np.asarray(input_fidx)/opts.fps
t_end = np.asarray(timestamps)
t_all = np.concatenate((t_start, t_end))
order = np.argsort(t_all)
n_output = len(t_start)
n_current = 0
max_current = 0
for i in order:
if i < n_output:
# start
n_current += 1
max_current = max(max_current, n_current)
else:
# end
n_current -= 1
n_concurrent.append(max_current)
print(f'Max number of concurrent jobs {max(n_concurrent)}')
out_path = join(out_dir, 'n_concurrent.pkl')
if opts.overwrite or not isfile(out_path):
pickle.dump(n_concurrent, open(out_path, 'wb'))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
4d36a1fe52245f8e1c029f1e0d036ca2a1b932c1 | b3a2ac9eb02a6eef9e6f3504afabc6400f894f56 | /clld/lib/latex.py | 325cb2c7397bf818455945818ba172a534d29a06 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | Anaphory/clld | 9f67c477e228eff05fdc7b7fa4310c703af02108 | bed1a6d08275a41fd7b5d13a0af19e4e538d186c | refs/heads/master | 2021-01-17T04:50:05.382411 | 2017-02-16T11:20:11 | 2017-02-16T11:20:11 | 66,831,136 | 0 | 0 | null | 2016-08-29T09:43:06 | 2016-08-29T09:43:05 | null | UTF-8 | Python | false | false | 15,747 | py | """
Character translation utilities for LaTeX-formatted text.
Usage:
- unicode(string,'latex')
- ustring.decode('latex')
are both available just by letting "import latex" find this file.
- unicode(string,'latex+latin1')
- ustring.decode('latex+latin1')
where latin1 can be replaced by any other known encoding, also
become available by calling latex.register().
We also make public a dictionary latex_equivalents,
mapping ord(unicode char) to LaTeX code.
D. Eppstein, October 2003.
"""
from __future__ import generators
import codecs
import re
from six import text_type, Iterator, unichr
def register():
"""Enable encodings of the form 'latex+x' where x describes another encoding.
Unicode characters are translated to or from x when possible, otherwise
expanded to latex.
"""
codecs.register(_registry)
def getregentry():
"""Encoding module API."""
return _registry('latex') # pragma: no cover
def _registry(encoding):
if encoding == 'latex':
encoding = None # pragma: no cover
elif encoding.startswith('latex+'):
encoding = encoding[6:]
else:
return None # pragma: no cover
class Codec(codecs.Codec):
def encode(self, input, errors='strict'): # pragma: no cover
"""Convert unicode string to latex."""
output = []
for c in input:
if encoding:
try:
output.append(c.encode(encoding))
continue
except:
pass
if ord(c) in latex_equivalents:
output.append(latex_equivalents[ord(c)])
else:
output += ['{\\char', str(ord(c)), '}']
return ''.join(output), len(input)
def decode(self, input, errors='strict'):
"""Convert latex source string to unicode."""
if encoding:
input = text_type(input, encoding, errors)
# Note: we may get buffer objects here.
# It is not permussable to call join on buffer objects
# but we can make them joinable by calling unicode.
# This should always be safe since we are supposed
# to be producing unicode output anyway.
x = map(text_type, _unlatex(input))
return u''.join(x), len(input)
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
return (Codec().encode, Codec().decode, StreamReader, StreamWriter)
def _tokenize(tex): # pragma: no cover
"""Convert latex source into sequence of single-token substrings."""
start = 0
try:
# skip quickly across boring stuff
pos = next(_stoppers.finditer(tex)).span()[0]
except StopIteration:
yield tex
return
while 1:
if pos > start:
yield tex[start:pos]
if (
tex[start] == '\\'
and not (tex[pos - 1].isdigit() and tex[start + 1].isalpha())
):
while pos < len(tex) and tex[pos].isspace(): # skip blanks after csname
pos += 1
while pos < len(tex) and tex[pos] in _ignore: # pragma: no cover
pos += 1 # flush control characters
if pos >= len(tex):
return
start = pos
if tex[pos:pos + 2] in {'$$': None, '/~': None}: # protect ~ in urls
pos += 2 # pragma: no cover
elif tex[pos].isdigit():
while pos < len(tex) and tex[pos].isdigit():
pos += 1
elif tex[pos] == '-':
while pos < len(tex) and tex[pos] == '-':
pos += 1
elif tex[pos] != '\\' or pos == len(tex) - 1:
pos += 1
elif not tex[pos + 1].isalpha():
pos += 2
else:
pos += 1
while pos < len(tex) and tex[pos].isalpha():
pos += 1
if tex[start:pos] == '\\char' or tex[start:pos] == '\\accent':
while pos < len(tex) and tex[pos].isdigit(): # pragma: no cover
pos += 1
class _unlatex(Iterator): # pragma: no cover
"""Convert tokenized tex into sequence of unicode strings. Helper for decode()."""
def __iter__(self):
"""Turn self into an iterator. It already is one, nothing to do."""
return self
def __init__(self, tex):
"""Create a new token converter from a string."""
self.tex = tuple(_tokenize(tex)) # turn tokens into indexable list
self.pos = 0 # index of first unprocessed token
self.lastoutput = 'x' # lastoutput must always be nonempty string
def __getitem__(self, n):
"""Return token at offset n from current pos."""
p = self.pos + n
t = self.tex
return p < len(t) and t[p] or None
def __next__(self):
"""Find and return another piece of converted output."""
if self.pos >= len(self.tex):
raise StopIteration
nextoutput = self.chunk()
if (
self.lastoutput[0] == '\\'
and self.lastoutput[-1].isalpha()
and nextoutput[0].isalpha()
):
# add extra space to terminate csname
nextoutput = ' ' + nextoutput # pragma: no cover
self.lastoutput = nextoutput
return nextoutput
def chunk(self):
"""Grab another set of input tokens and convert them to an output string."""
for delta, c in self.candidates(0): # pragma: no cover
if c in _l2u:
self.pos += delta
return unichr(_l2u[c])
elif len(c) == 2 and c[1] == 'i' and (c[0], '\\i') in _l2u:
self.pos += delta # correct failure to undot i
return unichr(_l2u[(c[0], '\\i')])
elif len(c) == 1 and c[0].startswith('\\char') and c[0][5:].isdigit():
self.pos += delta
return unichr(int(c[0][5:]))
# nothing matches, just pass through token as-is
self.pos += 1
return self[-1]
def candidates(self, offset):
"""Generate pairs delta,c.
Where c is a token or tuple of tokens from tex
(after deleting extraneous brackets starting at pos) and delta
is the length of the tokens prior to bracket deletion.
"""
t = self[offset]
if t in _blacklist:
return
elif t == '{':
for delta, c in self.candidates(offset + 1):
if self[offset + delta + 1] == '}':
yield delta + 2, c
elif t == '\\mbox':
for delta, c in self.candidates(offset + 1): # pragma: no cover
yield delta + 1, c
elif t == '$' and self[offset + 2] == '$':
yield 3, (t, self[offset + 1], t) # pragma: no cover
else:
q = self[offset + 1]
if q == '{' and self[offset + 3] == '}':
yield 4, (t, self[offset + 2])
elif q:
yield 2, (t, q)
yield 1, t
latex_equivalents = {
0x0009: ' ',
0x000a: '\n',
0x0023: '{\#}',
0x0026: '{\&}',
0x00a0: '{~}',
0x00a1: '{!`}',
0x00a2: '{\\not{c}}',
0x00a3: '{\\pounds}',
0x00a7: '{\\S}',
0x00a8: '{\\"{}}',
0x00a9: '{\\copyright}',
0x00af: '{\\={}}',
0x00ac: '{\\neg}',
0x00ad: '{\\-}',
0x00b0: '{\\mbox{$^\\circ$}}',
0x00b1: '{\\mbox{$\\pm$}}',
0x00b2: '{\\mbox{$^2$}}',
0x00b3: '{\\mbox{$^3$}}',
0x00b4: "{\\'{}}",
0x00b5: '{\\mbox{$\\mu$}}',
0x00b6: '{\\P}',
0x00b7: '{\\mbox{$\\cdot$}}',
0x00b8: '{\\c{}}',
0x00b9: '{\\mbox{$^1$}}',
0x00bf: '{?`}',
0x00c0: '{\\`A}',
0x00c1: "{\\'A}",
0x00c2: '{\\^A}',
0x00c3: '{\\~A}',
0x00c4: '{\\"A}',
0x00c5: '{\\AA}',
0x00c6: '{\\AE}',
0x00c7: '{\\c{C}}',
0x00c8: '{\\`E}',
0x00c9: "{\\'E}",
0x00ca: '{\\^E}',
0x00cb: '{\\"E}',
0x00cc: '{\\`I}',
0x00cd: "{\\'I}",
0x00ce: '{\\^I}',
0x00cf: '{\\"I}',
0x00d1: '{\\~N}',
0x00d2: '{\\`O}',
0x00d3: "{\\'O}",
0x00d4: '{\\^O}',
0x00d5: '{\\~O}',
0x00d6: '{\\"O}',
0x00d7: '{\\mbox{$\\times$}}',
0x00d8: '{\\O}',
0x00d9: '{\\`U}',
0x00da: "{\\'U}",
0x00db: '{\\^U}',
0x00dc: '{\\"U}',
0x00dd: "{\\'Y}",
0x00df: '{\\ss}',
0x00e0: '{\\`a}',
0x00e1: "{\\'a}",
0x00e2: '{\\^a}',
0x00e3: '{\\~a}',
0x00e4: '{\\"a}',
0x00e5: '{\\aa}',
0x00e6: '{\\ae}',
0x00e7: '{\\c{c}}',
0x00e8: '{\\`e}',
0x00e9: "{\\'e}",
0x00ea: '{\\^e}',
0x00eb: '{\\"e}',
0x00ec: '{\\`\\i}',
0x00ed: "{\\'\\i}",
0x00ee: '{\\^\\i}',
0x00ef: '{\\"\\i}',
0x00f1: '{\\~n}',
0x00f2: '{\\`o}',
0x00f3: "{\\'o}",
0x00f4: '{\\^o}',
0x00f5: '{\\~o}',
0x00f6: '{\\"o}',
0x00f7: '{\\mbox{$\\div$}}',
0x00f8: '{\\o}',
0x00f9: '{\\`u}',
0x00fa: "{\\'u}",
0x00fb: '{\\^u}',
0x00fc: '{\\"u}',
0x00fd: "{\\'y}",
0x00ff: '{\\"y}',
0x0100: '{\\=A}',
0x0101: '{\\=a}',
0x0102: '{\\u{A}}',
0x0103: '{\\u{a}}',
0x0104: '{\\c{A}}',
0x0105: '{\\c{a}}',
0x0106: "{\\'C}",
0x0107: "{\\'c}",
0x0108: "{\\^C}",
0x0109: "{\\^c}",
0x010a: "{\\.C}",
0x010b: "{\\.c}",
0x010c: "{\\v{C}}",
0x010d: "{\\v{c}}",
0x010e: "{\\v{D}}",
0x010f: "{\\v{d}}",
0x0112: '{\\=E}',
0x0113: '{\\=e}',
0x0114: '{\\u{E}}',
0x0115: '{\\u{e}}',
0x0116: '{\\.E}',
0x0117: '{\\.e}',
0x0118: '{\\c{E}}',
0x0119: '{\\c{e}}',
0x011a: "{\\v{E}}",
0x011b: "{\\v{e}}",
0x011c: '{\\^G}',
0x011d: '{\\^g}',
0x011e: '{\\u{G}}',
0x011f: '{\\u{g}}',
0x0120: '{\\.G}',
0x0121: '{\\.g}',
0x0122: '{\\c{G}}',
0x0123: '{\\c{g}}',
0x0124: '{\\^H}',
0x0125: '{\\^h}',
0x0128: '{\\~I}',
0x0129: '{\\~\\i}',
0x012a: '{\\=I}',
0x012b: '{\\=\\i}',
0x012c: '{\\u{I}}',
0x012d: '{\\u\\i}',
0x012e: '{\\c{I}}',
0x012f: '{\\c{i}}',
0x0130: '{\\.I}',
0x0131: '{\\i}',
0x0132: '{IJ}',
0x0133: '{ij}',
0x0134: '{\\^J}',
0x0135: '{\\^\\j}',
0x0136: '{\\c{K}}',
0x0137: '{\\c{k}}',
0x0139: "{\\'L}",
0x013a: "{\\'l}",
0x013b: "{\\c{L}}",
0x013c: "{\\c{l}}",
0x013d: "{\\v{L}}",
0x013e: "{\\v{l}}",
0x0141: '{\\L}',
0x0142: '{\\l}',
0x0143: "{\\'N}",
0x0144: "{\\'n}",
0x0145: "{\\c{N}}",
0x0146: "{\\c{n}}",
0x0147: "{\\v{N}}",
0x0148: "{\\v{n}}",
0x014c: '{\\=O}',
0x014d: '{\\=o}',
0x014e: '{\\u{O}}',
0x014f: '{\\u{o}}',
0x0150: '{\\H{O}}',
0x0151: '{\\H{o}}',
0x0152: '{\\OE}',
0x0153: '{\\oe}',
0x0154: "{\\'R}",
0x0155: "{\\'r}",
0x0156: "{\\c{R}}",
0x0157: "{\\c{r}}",
0x0158: "{\\v{R}}",
0x0159: "{\\v{r}}",
0x015a: "{\\'S}",
0x015b: "{\\'s}",
0x015c: "{\\^S}",
0x015d: "{\\^s}",
0x015e: "{\\c{S}}",
0x015f: "{\\c{s}}",
0x0160: "{\\v{S}}",
0x0161: "{\\v{s}}",
0x0162: "{\\c{T}}",
0x0163: "{\\c{t}}",
0x0164: "{\\v{T}}",
0x0165: "{\\v{t}}",
0x0168: "{\\~U}",
0x0169: "{\\~u}",
0x016a: "{\\=U}",
0x016b: "{\\=u}",
0x016c: "{\\u{U}}",
0x016d: "{\\u{u}}",
0x016e: "{\\r{U}}",
0x016f: "{\\r{u}}",
0x0170: "{\\H{U}}",
0x0171: "{\\H{u}}",
0x0172: "{\\c{U}}",
0x0173: "{\\c{u}}",
0x0174: "{\\^W}",
0x0175: "{\\^w}",
0x0176: "{\\^Y}",
0x0177: "{\\^y}",
0x0178: '{\\"Y}',
0x0179: "{\\'Z}",
0x017a: "{\\'Z}",
0x017b: "{\\.Z}",
0x017c: "{\\.Z}",
0x017d: "{\\v{Z}}",
0x017e: "{\\v{z}}",
0x01c4: "{D\\v{Z}}",
0x01c5: "{D\\v{z}}",
0x01c6: "{d\\v{z}}",
0x01c7: "{LJ}",
0x01c8: "{Lj}",
0x01c9: "{lj}",
0x01ca: "{NJ}",
0x01cb: "{Nj}",
0x01cc: "{nj}",
0x01cd: "{\\v{A}}",
0x01ce: "{\\v{a}}",
0x01cf: "{\\v{I}}",
0x01d0: "{\\v\\i}",
0x01d1: "{\\v{O}}",
0x01d2: "{\\v{o}}",
0x01d3: "{\\v{U}}",
0x01d4: "{\\v{u}}",
0x01e6: "{\\v{G}}",
0x01e7: "{\\v{g}}",
0x01e8: "{\\v{K}}",
0x01e9: "{\\v{k}}",
0x01ea: "{\\c{O}}",
0x01eb: "{\\c{o}}",
0x01f0: "{\\v\\j}",
0x01f1: "{DZ}",
0x01f2: "{Dz}",
0x01f3: "{dz}",
0x01f4: "{\\'G}",
0x01f5: "{\\'g}",
0x01fc: "{\\'\\AE}",
0x01fd: "{\\'\\ae}",
0x01fe: "{\\'\\O}",
0x01ff: "{\\'\\o}",
0x02c6: '{\\^{}}',
0x02dc: '{\\~{}}',
0x02d8: '{\\u{}}',
0x02d9: '{\\.{}}',
0x02da: "{\\r{}}",
0x02dd: '{\\H{}}',
0x02db: '{\\c{}}',
0x02c7: '{\\v{}}',
0x03c0: '{\\mbox{$\\pi$}}',
# consider adding more Greek here
0xfb01: '{fi}',
0xfb02: '{fl}',
0x2013: '{--}',
0x2014: '{---}',
0x2018: "{`}",
0x2019: "{'}",
0x201c: "{``}",
0x201d: "{''}",
0x2020: "{\\dag}",
0x2021: "{\\ddag}",
0x2122: "{\\mbox{$^\\mbox{TM}$}}",
0x2022: "{\\mbox{$\\bullet$}}",
0x2026: "{\\ldots}",
0x2202: "{\\mbox{$\\partial$}}",
0x220f: "{\\mbox{$\\prod$}}",
0x2211: "{\\mbox{$\\sum$}}",
0x221a: "{\\mbox{$\\surd$}}",
0x221e: "{\\mbox{$\\infty$}}",
0x222b: "{\\mbox{$\\int$}}",
0x2248: "{\\mbox{$\\approx$}}",
0x2260: "{\\mbox{$\\neq$}}",
0x2264: "{\\mbox{$\\leq$}}",
0x2265: "{\\mbox{$\\geq$}}",
}
for _i in range(0x0020):
if _i not in latex_equivalents:
latex_equivalents[_i] = ''
for _i in range(0x0020, 0x007f):
if _i not in latex_equivalents:
latex_equivalents[_i] = chr(_i)
# Characters that should be ignored and not output in tokenization
_ignore = set([chr(i) for i in list(range(32)) + [127]]) - set('\t\n\r')
# Regexp of chars not in blacklist, for quick start of tokenize
_stoppers = re.compile('[\x00-\x1f!$\\-?\\{~\\\\`\']')
_blacklist = set(' \n\r')
_blacklist.add(None) # shortcut candidate generation at end of data
# Construction of inverse translation table
_l2u = {
'\ ': ord(' ') # unexpanding space makes no sense in non-TeX contexts
}
for _tex in latex_equivalents:
if _tex <= 0x0020 or (_tex <= 0x007f and len(latex_equivalents[_tex]) <= 1):
continue # boring entry
_toks = tuple(_tokenize(latex_equivalents[_tex]))
if _toks[0] == '{' and _toks[-1] == '}':
_toks = _toks[1:-1]
if _toks[0].isalpha():
continue # don't turn ligatures into single chars
if len(_toks) == 1 and (_toks[0] == "'" or _toks[0] == "`"):
continue # don't turn ascii quotes into curly quotes
if _toks[0] == '\\mbox' and _toks[1] == '{' and _toks[-1] == '}':
_toks = _toks[2:-1]
if len(_toks) == 4 and _toks[1] == '{' and _toks[3] == '}':
_toks = (_toks[0], _toks[2])
if len(_toks) == 1:
_toks = _toks[0]
_l2u[_toks] = _tex
# Shortcut candidate generation for certain useless candidates:
# a character is in _blacklist if it can not be at the start
# of any translation in _l2u. We use this to quickly skip through
# such characters before getting to more difficult-translate parts.
# _blacklist is defined several lines up from here because it must
# be defined in order to call _tokenize, however it is safe to
# delay filling it out until now.
for i in range(0x0020, 0x007f):
_blacklist.add(chr(i))
_blacklist.remove('{')
_blacklist.remove('$')
for candidate in _l2u:
if isinstance(candidate, tuple):
if not candidate or not candidate[0]:
continue # pragma: no cover
firstchar = candidate[0][0]
else:
firstchar = candidate[0]
_blacklist.discard(firstchar)
| [
"[email protected]"
] | |
76ec0bfa00c7191eecde64d7a553a7998771bae9 | 4b1a3db3249a51bcd2b9699edcb60c2716b2889e | /discharge.py | e851bc189eee5ad877ce01278d2da3087be6cfb8 | [] | no_license | shohei/tank-simulation | 799e7c8db543f639984dbadb1b41b83fc818b831 | d74b8d09eb1d5e1fa1fa61327f4fff35754ac5eb | refs/heads/master | 2021-01-02T09:14:46.709964 | 2015-07-29T13:48:52 | 2015-07-29T13:48:52 | 39,488,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#*** TANK PRESSURE SIMULATION ***
#*** AUTHOR: SHOHEI AOKI ********
#*** PROGRAMMED: 22 JUL 2015 ****
from pylab import *
##### PARAMETER AREA ############
gamma = 1.4
de = 2 #[mm]
Ae = pi*(de*0.001/2)*(de*0.001/2)#[m2]
T0i = (24 + 273.15) # Room temperature
R = 289 # J/(kg dot K)
V = 1/1000.0 #1L as [m3]
sigma = sqrt(gamma*((2/(gamma+1))**((gamma+1)/(gamma-1)))) # critical flow efficient
##### SIMULATION FOR AIR DISCHARGE FROM TANK ##
t = arange(0.01,100,0.01) # 100 sec
pi = 0.1013*5*(10**6) # 5[MPa]
V_array = [1/1000.0,10/1000.0,20/1000.0]
for V in V_array:
p0 = (1 + (((gamma - 1)/2)*(Ae*sigma*sqrt(R * T0i))/V)*t)**((-1)*2*gamma/(gamma-1))*pi
plot(t,p0*(10**-6))
##### VISUALIZATION AREA #########
title('TANK PRESSURE TRANSITION BY AIR DISCHARGE')
legend(('1L','10L','20L'),'upper right')
xlabel('t [sec]')
ylabel('p0 [MPa]')
savefig('./image/tank-discharge.png')
show()
| [
"[email protected]"
] | |
67cf38dc2257b719682391c2d54996982c9b8db1 | c7faef6d4c0a965032a37953f34373816ce092c1 | /Products/MetaPublisher2/library/compatibility/historical.py | f458599fd708f8b30dc7109289df426da19247b0 | [
"ZPL-2.1"
] | permissive | sfluehnsdorf/MetaPublisher2 | 2d52a6baf1822a94ba12f66c86760953dd1cead4 | 4688baa9182919a8f8da8a0afbd68997e4453708 | refs/heads/master | 2019-01-01T23:22:04.677296 | 2013-06-23T12:03:46 | 2013-06-23T12:03:46 | null | 0 | 0 | null | null | null | null | ISO-8859-3 | Python | false | false | 37,321 | py | # -*- coding: iso-8859-15 -*-
# ============================================================================
#
# M e t a P u b l i s h e r 2
#
# ----------------------------------------------------------------------------
# Copyright (c) 2002-2013, Sebastian Lühnsdorf - Web-Solutions and others
# For more information see the README.txt file or visit www.metapulisher.org
# ----------------------------------------------------------------------------
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL).
#
# A copy of the ZPL should accompany this distribution.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
# ============================================================================
__doc__ = """Historical Compatability
To ensure to continued operation of deprecated resources, this module provides
wrappers and handlers for these outdated resources. It also provides an API for
logging calls to deprecated resources.
$Id: library/compatibility/historical.py 11 2013-05-09 22:54:03Z sfluehnsdorf $
"""
__version__ = '$Revision: 2.3 $'[11:-2]
# ============================================================================
# Module Imports
from Products.MetaPublisher2.bases.field.legacyfield import LegacyFieldPlugin as FieldPlugin
from Products.MetaPublisher2.bases.frontend.legacyfrontend import LegacyFrontendPlugin as InterfacePlugin
from Products.MetaPublisher2.bases.widget.legacywidget import LegacyWidgetPlugin as WidgetPlugin
from Products.MetaPublisher2.bases.storage.legacystorage import LegacyStoragePlugin as StoragePlugin
from Products.MetaPublisher2.bases.plugin.legacyplugin import all_plugintypes
from Products.MetaPublisher2.library.application import permission_access_configuration, permission_access_entries, permission_change_configuration, permission_change_entries, permission_create_entries, permission_manage, permission_manage_designs, permission_manage_frontends, permission_manage_presets, permission_publish_frontends, permission_save_presets, permission_zmi
from Products.MetaPublisher2.library.common import ClassSecurityInfo, InitializeClass, Folder, OrderedFolder, true, false
from Products.MetaPublisher2.library.compatibility.deprecation import deprecated_form, deprecated_method
# ============================================================================
# Module Exports
__all__ = [
'HistoricalCompatibility',
'InterfacesFolder',
'standard_form_footer',
'standard_form_header',
'TestError',
]
# ============================================================================
# Legacy Field Value Test Exception
class TestError(Exception):
"""!TXT! Legacy Field Value Test Error"""
def __init__(self, **args):
for key in args.keys():
setattr(self, key, args[key])
# ============================================================================
# Interface Default Layout
standard_form_header = '''\
<dtml-var standard_html_header>
<h2><dtml-var title_or_id></h2>
'''
standard_form_footer = '''\
<p align="center">
<a target="_blank" href="http://metapublisher.org">
<img src="<dtml-var BASEPATH1>/misc_/MetaPublisher2/MP2Powered.gif" border="0" alt="Powered by MetaPublisher2">
</a>
</p>
<dtml-var standard_html_footer>
'''
# ============================================================================
# InterfacesFolder Base Class
class InterfacesFolder(Folder):
"""InterfacesFolder Base Class"""
meta_type = 'InterfacesFolder'
# ----------------------------------------------------------------------------
# Class Security
InitializeClass(InterfacesFolder)
# ============================================================================
# Historical Compatibility Mix-In Class
class HistoricalCompatibility:
"""!TXT! Historical Compatibility Mix-In Class"""
security = ClassSecurityInfo()
# ------------------------------------------------------------------------
# MetaPublisher2
security.declarePublic('zmp2')
def zmp2(self):
"""DEPRECATED: !TXT! Return this instance of MetaPublisher2"""
deprecated_method('zmp2')
return self.get_MetaPublisher2()
security.declarePublic('get_MetaPublisher2_url')
def get_MetaPublisher2_url(self):
"""DEPRECATED: !TXT! Return this instance's absolute url"""
deprecated_method('get_MetaPublisher2_url')
return self.get_MetaPublisher2_url()
# ------------------------------------------------------------------------
# UserInterface
security.declarePublic('manage_zmp2_css')
def manage_zmp2_css(self, REQUEST=None):
"""DEPRECATED: !TXT! MetaPublisher2 CSS"""
deprecated_form('manage_zmp2_css')
return self.manage_MetaPublisher2_css(self, REQUEST)
security.declarePublic('sp')
def sp(self, w=1, h=1, **kw):
"""DEPRECATED: !TXT! Return a tag for a spacer image with specifiable dimensions"""
deprecated_method('sp')
params = ''
for key in kw.keys():
params = params + ' %s="%s"' % (key, kw[key])
tag = '<img src="p_/sp" width="%s" height="%s" border="0" alt=""%s/>'
return tag % (self.REQUEST.BASEPATH1, w, h, params)
# ------------------------------------------------------------------------
# Configuration Constraints
# ------------------------------------------------------------------------
# Configuration Fields
security.declareProtected(permission_access_configuration, 'manage_fieldsBrowserForm')
def manage_fieldsBrowserForm(self, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_form('manage_fieldsBrowserForm')
return self.fields_form(self, REQUEST)
security.declareProtected(permission_change_configuration, 'manage_fieldsNewForm')
def manage_fieldsNewForm(self, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_form('manage_fieldsNewForm')
return self.add_field_form(self, REQUEST)
security.declareProtected(permission_access_configuration, 'getField')
def getField(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('getField')
return self.get_field(self, storageId, fieldId)
security.declareProtected(permission_access_configuration, 'fieldIds')
def fieldIds(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('fieldIds')
return self.field_ids(storageId)
security.declareProtected(permission_access_configuration, 'fieldItems')
def fieldItems(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('fieldItems')
return self.field_items(storageId)
security.declareProtected(permission_access_configuration, 'fieldValues')
def fieldValues(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('fieldValues')
return self.field_values(storageId)
security.declareProtected(permission_change_configuration, 'manage_fieldsNew')
def manage_fieldsNew(self, storageId, fieldType, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldsNew')
return self.add_field(storageId, fieldType, REQUEST)
security.declareProtected(permission_change_configuration, 'delField')
def delField(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('delField')
return self.delete_field(storageId, fieldId)
security.declareProtected(permission_change_configuration, 'manage_fieldsDelete')
def manage_fieldsDelete(self, storageId, ids=[], REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldsDelete')
return self.delete_fields(storageId, ids, REQUEST)
security.declareProtected(permission_change_configuration, 'delFields')
def delFields(self, storageId, fieldIds=[]):
"""DEPRECATED: !TXT!"""
deprecated_method('delFields')
return self.delete_fields(storageId, fieldIds)
security.declareProtected(permission_change_configuration, 'renameField')
def renameField(self, storageId, fieldId, newId):
"""DEPRECATED: !TXT!"""
deprecated_method('renameField')
return self.rename_field(storageId, fieldId, newId)
security.declareProtected(permission_access_configuration, 'fieldsSortable')
def fieldsSortable(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('fieldsSortable')
return self.are_fields_sortable(storageId)
security.declareProtected(permission_change_configuration, 'manage_fieldsMoveTop')
def manage_fieldsMoveTop(self, storageId, fieldId, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldMoveTop')
return self.move_fields_to_top(storageId, fieldId, REQUEST)
security.declareProtected(permission_change_configuration, 'moveFieldTop')
def moveFieldTop(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('moveFieldTop')
return self.move_fields_to_top(storageId, fieldId)
security.declareProtected(permission_change_configuration, 'manage_fieldsMoveUp')
def manage_fieldsMoveUp(self, storageId, fieldId, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldMoveUp')
return self.move_fields_up(storageId, fieldId, REQUEST)
security.declareProtected(permission_change_configuration, 'moveFieldUp')
def moveFieldUp(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('moveFieldUp')
return self.move_fields_up(storageId, fieldId)
security.declareProtected(permission_change_configuration, 'manage_fieldsMoveDown')
def manage_fieldsMoveDown(self, storageId, fieldId, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldMoveDown')
return self.move_fields_down(storageId, fieldId, REQUEST)
security.declareProtected(permission_change_configuration, 'moveFieldDown')
def moveFieldDown(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('moveFieldDown')
return self.move_fields_down(storageId, fieldId)
security.declareProtected(permission_change_configuration, 'manage_fieldsMoveBottom')
def manage_fieldsMoveBottom(self, storageId, fieldId, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_fieldMoveBottom')
return self.move_fields_to_bottom(storageId, fieldId, REQUEST)
security.declareProtected(permission_change_configuration, 'moveFieldBottom')
def moveFieldBottom(self, storageId, fieldId):
"""DEPRECATED: !TXT!"""
deprecated_method('moveFieldBottom')
return self.move_fields_to_bottom(storageId, fieldId)
# ------------------------------------------------------------------------
# Configuration Identifiers
security.declareProtected(permission_access_configuration, 'newEntryId')
def newEntryId(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('newEntryId')
return self.new_entry_id(storageId)
# ------------------------------------------------------------------------
# Configuration Indexing
security.declareProtected(permission_access_configuration, 'manage_storagesIndexForm')
def manage_storagesIndexForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Indexing Form"""
deprecated_form('manage_storagesIndexForm')
return self.indexing_form(self, REQUEST)
# ------------------------------------------------------------------------
# Configuration Inheritance
# ------------------------------------------------------------------------
# Configuration Relations
# ------------------------------------------------------------------------
# Configuration Storages
security.declareProtected(permission_change_configuration, 'manage_storagesBrowserForm')
def manage_storagesBrowserForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Storages Form"""
deprecated_form('manage_storagesBrowserForm')
return self.storages_form(self, REQUEST)
security.declareProtected(permission_change_configuration, 'manage_storagesNewForm')
def manage_storagesNewForm(self, REQUEST=None):
"""DEPRECATED: !TXT! New Storage Form"""
deprecated_form('manage_storagesNewForm')
return self.add_storage_form(self, REQUEST)
security.declareProtected(permission_access_configuration, 'getStorage')
def getStorage(self, storageId):
"""DEPRECATED: !TXT!"""
deprecated_method('getStorage')
return self.get_storage(storageId)
security.declareProtected(permission_access_configuration, 'storageIds')
def storageIds(self):
"""DEPRECATED: !TXT!"""
deprecated_method('storageIds')
return self.storage_ids()
security.declareProtected(permission_access_configuration, 'storageItems')
def storageItems(self):
"""DEPRECATED: !TXT!"""
deprecated_method('storageItems')
return self.storage_items()
security.declareProtected(permission_access_configuration, 'storageValues')
def storageValues(self):
"""DEPRECATED: !TXT!"""
deprecated_method('storageValues')
return self.storage_values()
security.declareProtected(permission_change_configuration, 'manage_storagesNew')
def manage_storagesNew(self, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_storagesNew')
return self.add_storage(REQUEST)
security.declareProtected(permission_change_configuration, 'manage_storagesDelete')
def manage_storagesDelete(self, ids=[], REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_storagesDelete')
return self.delete_storages(ids, REQUEST)
# ------------------------------------------------------------------------
# Configuration Triggers
# ------------------------------------------------------------------------
# Data Entries
security.declareProtected(permission_access_entries, 'manage_entriesBrowserForm')
def manage_entriesBrowserForm(self, REQUEST=None):
"""DEPRECATED: !TXT! management form for entries"""
deprecated_form('manage_entriesBrowserForm')
return self.entries_form(self, REQUEST)
security.declareProtected(permission_create_entries, 'manage_entriesNewForm')
def manage_entriesNewForm(self, REQUEST=None):
"""DEPRECATED: !TXT! management form for adding entries"""
deprecated_form('manage_entriesNewForm')
return self.entries_form(self, REQUEST)
security.declareProtected(permission_create_entries, 'manage_entriesNew')
def manage_entriesNew(self, REQUEST=None):
"""DEPRECATED: !TXT! add entry to storage"""
deprecated_method('manage_entriesNew')
source = REQUEST.get('storage_id', REQUEST.get('storageId', self.get_profile_variable(REQUEST, 'storage_id')))
entry_id = REQUEST.get('entry_id', REQUEST.get('entryId', None))
entry_id = self.add_entry(source, entry_id, REQUEST.form)
self.redirect(
REQUEST,
'entries_form',
message='!TXT! Entry "%s" in Storage "%s" added.' % (entry_id, source)
)
security.declareProtected(permission_create_entries, 'manage_entriesNewMore')
def manage_entriesNewMore(self, REQUEST=None):
"""DEPRECATED: !TXT! add entry to storage"""
deprecated_method('manage_entriesNewMore')
source = REQUEST.get('storage_id', REQUEST.get('storageId', self.get_profile_variable(REQUEST, 'storage_id')))
entry_id = REQUEST.get('entry_id', REQUEST.get('entryId', None))
entry_id = self.add_entry(source, entry_id, REQUEST.form)
self.redirect(
REQUEST,
'add_entry_form',
message='!TXT! Entry "%s" in Storage "%s" added.' % (entry_id, source)
)
security.declareProtected(permission_change_entries, 'manage_entriesEdit')
def manage_entriesEdit(self, REQUEST=None):
"""DEPRECATED: !TXT! edit entry in storage"""
source = REQUEST.get('storage_id', REQUEST.get('storageId', self.get_profile_variable(REQUEST, 'storage_id')))
entry_id = REQUEST.get('entry_id', REQUEST['entryId'])
self.edit_entry(source, entry_id, REQUEST.form, REQUEST)
security.declareProtected(permission_change_entries, 'manage_entriesDelete')
def manage_entriesDelete(self, storageId, ids=[], REQUEST=None):
"""DEPRECATED: !TXT! delete entries from storage"""
deprecated_method('manage_entriesDelete')
return self.delete_entries(storageId, ids, REQUEST)
security.declareProtected(permission_access_entries, 'entryIds')
def entryIds(self, storageId):
"""DEPRECATED: !TXT! retrieve list of ids of entries in storage"""
deprecated_method('entryIds')
return self.entry_ids(storageId)
security.declareProtected(permission_access_entries, 'entryItems')
def entryItems(self, storageId):
"""DEPRECATED: !TXT! retrieve list of id, value tuples of entries in storage"""
deprecated_method('entryItems')
return self.entry_items(storageId)
security.declareProtected(permission_access_entries, 'entryValues')
def entryValues(self, storageId):
"""DEPRECATED: !TXT! retrieve list of values of entries in storage"""
deprecated_method('entryValues')
return self.entry_values(storageId)
security.declareProtected(permission_access_entries, 'getEntry')
def getEntry(self, storageId, entryId):
"""DEPRECATED: !TXT! retrieve entry from storage"""
deprecated_method('getEntry')
return self.get_entry(self, storageId, entryId)
security.declareProtected(permission_create_entries, 'addEntry')
def addEntry(self, storageId, entryId, entryData={}, **args):
"""DEPRECATED: !TXT! add entry to storage"""
deprecated_method('addEntry')
entryData.update(args)
return self.add_entry(self, storageId, entryId, entryData)
security.declareProtected(permission_change_entries, 'delEntries')
def delEntries(self, storageId, entryIds=[]):
"""DEPRECATED: !TXT! delete entries from storage"""
deprecated_method('delEntries')
return self.delete_entries(storageId, entryIds)
security.declareProtected(permission_change_entries, 'delEntry')
def delEntry(self, storageId, entryId):
"""DEPRECATED: !TXT! delete entry from storage"""
deprecated_method('delEntry')
return self.del_entry(storageId, entryId)
security.declareProtected(permission_change_entries, 'editEntry')
def editEntry(self, storageId, entryId, entryData={}, **args):
"""DEPRECATED: !TXT! edit entry in storage"""
deprecated_method('editEntry')
entryData.update(args)
return self.edit_entry(storageId, entryId, entryData)
security.declareProtected(permission_change_entries, 'renameEntry')
def renameEntry(self, entryId, newId):
"""DEPRECATED: !TXT! rename entry in storage"""
deprecated_method('renameEntry')
return self.rename_entry(entryId, newId)
security.declareProtected(permission_change_entries, 'manage_entriesMoveBottom')
def manage_entriesMoveBottom(self, storageId, entryId, REQUEST=None):
"""DEPRECATED: !TXT! move entry to bottom"""
deprecated_method('manage_entriesMoveBottom')
return self.move_entry_to_bottom(storageId, entryId, REQUEST)
security.declareProtected(permission_change_entries, 'manage_entriesMoveDown')
def manage_entriesMoveDown(self, storageId, entryId, REQUEST=None):
"""DEPRECATED: !TXT! move entry down"""
deprecated_method('manage_entriesMoveDown')
return self.move_entry_down(storageId, entryId, REQUEST)
security.declareProtected(permission_change_entries, 'manage_entriesMoveTop')
def manage_entriesMoveTop(self, storageId, entryId, REQUEST=None):
"""DEPRECATED: !TXT! move entry to top"""
deprecated_method('manage_entriesMoveTop')
return self.move_entry_to_top(storageId, entryId, REQUEST)
security.declareProtected(permission_change_entries, 'manage_entriesMoveToPosition')
def manage_entriesMoveToPosition(self, storageId, entryId, position, REQUEST=None):
"""DEPRECATED: !TXT! move entry to position"""
deprecated_method('manage_entriesMoveToPosition')
return self.move_entry(storageId, entryId, position, REQUEST)
security.declareProtected(permission_change_entries, 'manage_entriesMoveUp')
def manage_entriesMoveUp(self, storageId, entryId, REQUEST=None):
"""DEPRECATED: !TXT! move entry up"""
deprecated_method('manage_entriesMoveUp')
return self.move_entry_up(storageId, entryId, REQUEST)
security.declareProtected(permission_change_entries, 'moveEntryBottom')
def moveEntryBottom(self, storageId, entryId):
"""DEPRECATED: !TXT! move entry to bottom"""
deprecated_method('moveEntryBottom')
return self.move_entry_to_bottom(storageId, entryId)
security.declareProtected(permission_change_entries, 'moveEntryDown')
def moveEntryDown(self, storageId, entryId):
"""DEPRECATED: !TXT! move entry down"""
deprecated_method('moveEntryDown')
return self.move_entry_down(storageId, entryId)
security.declareProtected(permission_change_entries, 'moveEntryTop')
def moveEntryTop(self, storageId, entryId):
"""DEPRECATED: !TXT! move entry to top"""
deprecated_method('moveEntryTop')
return self.move_entry_to_top(storageId, entryId)
security.declareProtected(permission_change_entries, 'moveEntryToPosition')
def moveEntryToPosition(self, storageId, entryId, position):
"""DEPRECATED: !TXT! move entry to position"""
deprecated_method('moveEntryToPosition')
return self.move_entry(storageId, entryId, position)
security.declareProtected(permission_change_entries, 'moveEntryUp')
def moveEntryUp(self, storageId, entryId):
"""DEPRECATED: !TXT! move entry up"""
deprecated_method('moveEntryUp')
return self.move_entry_up(storageId, entryId)
security.declareProtected(permission_access_entries, 'getEntryPosition')
def getEntryPosition(self, storageId, entryId):
"""DEPRECATED: !TXT! retrieve position of entry"""
deprecated_method('getEntryPosition')
return self.get_entry_position(storageId, entryId)
security.declareProtected(permission_access_entries, 'getEntryField')
def getEntryField(self, storageId, entryId, fieldId, default=None):
"""DEPRECTAED: !TXT! Get the value of an Entry's Field."""
deprecated_method('getEntryField')
self.get_entry_field(storageId, entryId, fieldId, default)
security.declareProtected(permission_change_entries, 'setEntryField')
def setEntryField(self, storageId, entryId, fieldId, value):
"""DEPRECTAED: !TXT! Set the value of an Entry's Field."""
deprecated_method('setEntryField')
self.set_entry_field(storageId, entryId, fieldId, value)
# ------------------------------------------------------------------------
# Data Exports
# ------------------------------------------------------------------------
# Data Expressions
# ------------------------------------------------------------------------
# Data Imports
# ------------------------------------------------------------------------
# Data Queries
security.declareProtected(permission_access_entries, 'manage_entriesQueriesForm')
def manage_entriesQueriesForm(self, REQUEST=None):
"""DEPRECTAED: !TXT!"""
return self.queries_form(self, REQUEST)
# ------------------------------------------------------------------------
# Data Reports
# ------------------------------------------------------------------------
# Data Search
# ------------------------------------------------------------------------
# Data Transfer
# ------------------------------------------------------------------------
# Publisher Audit
# ------------------------------------------------------------------------
# Publisher Caching
security.declareProtected(permission_manage_designs, 'manage_storagesCacheForm')
def manage_storagesCacheForm(self, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_form('manage_storagesCacheForm')
return self.caching_form(self, REQUEST)
# ------------------------------------------------------------------------
# Publisher Designs
security.declareProtected(permission_manage_designs, 'manage_interfacesStylesForm')
def manage_interfacesStylesForm(self, REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_form('manage_interfacesStylesForm')
return self.designs_form(self, REQUEST)
# ------------------------------------------------------------------------
# Publisher Frontends
security.declareProtected(permission_manage_frontends, 'manage_interfacesDelete')
def manage_interfacesDelete(self, ids=[], REQUEST=None):
"""DEPRECATED: !TXT! Delete specified Frontends"""
deprecated_method('manage_interfacesDelete')
return self.del_frontends(ids, REQUEST)
security.declareProtected(permission_manage_frontends, 'interfaceValues')
def interfaceValues(self):
"""DEPRECATED: !TXT! Return values of Frontends"""
deprecated_method('interfaceValues')
return self.frontend_values()
security.declareProtected(permission_manage_frontends, 'interfaceItems')
def interfaceItems(self):
"""DEPRECATED: !TXT! Return tuples of id, value of Frontends"""
deprecated_method('interfaceItems')
return self.frontend_items()
security.declareProtected(permission_manage_frontends, 'interfaceIds')
def interfaceIds(self):
"""DEPRECATED: !TXT! Return ids of Frontends"""
deprecated_method('interfaceIds')
return self.frontend_paths()
security.declareProtected(permission_manage_frontends, 'getInterface')
def getInterface(self, interfaceId):
"""DEPRECATED: !TXT! Return the specified Frontend"""
deprecated_method('getInterface')
return self.get_frontend(interfaceId)
security.declareProtected(permission_manage_frontends, 'getInterfacePaths')
def getInterfacePaths(self):
"""DEPRECATED: !TXT! Return all Frontend object paths recursively"""
deprecated_method('getInterfacePaths')
return self.get_frontend_parents()
security.declareProtected(permission_manage_frontends, 'manage_interfacesBrowserForm')
def manage_interfacesBrowserForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Frontends Form"""
deprecated_form('manage_interfacesStylesForm')
return self.frontends_form(self, REQUEST)
security.declareProtected(permission_manage_frontends, 'manage_interfacesNewForm')
def manage_interfacesNewForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Add Frontend Form"""
deprecated_form('manage_interfacesNewForm')
return self.add_frontend_form(self, REQUEST)
security.declareProtected(permission_manage_frontends, 'manage_interfacesNew')
def manage_interfacesNew(self, REQUEST=None):
"""DEPRECATED: !TXT! Call specified Frontend's factory"""
deprecated_method('manage_interfacesNew')
return self.add_frontend_type(REQUEST)
# ------------------------------------------------------------------------
# Publisher Languages
# ------------------------------------------------------------------------
# Publisher Renderer
security.declareProtected(permission_publish_frontends, 'manage_interfacesRender')
def manage_interfacesRender(self, ids=[], REQUEST=None):
"""DEPRECATED: !TXT!"""
deprecated_method('manage_interfacesRender')
self.render_frontends(ids)
security.declareProtected(permission_publish_frontends, 'manage_interfacesRenderForm')
def manage_interfacesRenderForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Render Frontends Form"""
deprecated_form('manage_interfacesRenderForm')
return self.renderer_form(self, REQUEST)
# ------------------------------------------------------------------------
# Publisher Widgets
security.declareProtected(permission_manage_frontends, 'getWidgetsForField')
def getWidgetsForField(self, formTypeId, fieldTypeId):
"""DEPRECATED: !TXT!"""
deprecated_method('getWidgetsForField')
return self.get_widgets_for_field(formTypeId, fieldTypeId)
# ------------------------------------------------------------------------
# Service Assistant
security.declareProtected(permission_manage, 'manage_aboutAssistantForm')
def manage_aboutAssistantForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Assistant Form"""
deprecated_form('manage_aboutAssistantForm')
return self.assistant_form(self, REQUEST)
# ------------------------------------------------------------------------
# Service Community
# ------------------------------------------------------------------------
# Service Feedback
security.declareProtected(permission_zmi, 'manage_aboutFeedbackForm')
def manage_aboutFeedbackForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Feedback Form"""
deprecated_form('manage_aboutFeedbackForm')
return self.feedback_form(self, REQUEST)
# ------------------------------------------------------------------------
# Service Help
# ------------------------------------------------------------------------
# Service Manual
security.declareProtected(permission_zmi, 'manage_aboutManualForm')
def manage_aboutManualForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Manual Form"""
deprecated_form('manage_aboutManualForm')
return self.manual_form(self, REQUEST)
# ------------------------------------------------------------------------
# Service Reference
# ------------------------------------------------------------------------
# Service Release
security.declareProtected(permission_zmi, 'manage_aboutReleaseForm')
def manage_aboutReleaseForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Release Form"""
deprecated_form('manage_aboutReleaseForm')
return self.release_form(self, REQUEST)
security.declareProtected(permission_zmi, 'manage_aboutVersion')
def manage_aboutVersion(self):
"""DEPRECATED: !TXT! Return the contents of the VERSION.txt file"""
deprecated_method('manage_aboutVersion')
return self.read_release_version_file()
security.declareProtected(permission_zmi, 'manage_aboutReadMe')
def manage_aboutReadMe(self):
"""DEPRECATED: !TXT! Return the contents of the README.txt file"""
deprecated_method('manage_aboutReadMe')
return self.read_release_readme_file()
security.declareProtected(permission_zmi, 'manage_aboutLicense')
def manage_aboutLicense(self):
"""DEPRECATED: !TXT! Return the contents of the LICENSE.txt file"""
deprecated_method('manage_aboutLicense')
return self.read_release_license_file()
# ------------------------------------------------------------------------
# System Events
# ------------------------------------------------------------------------
# System Integrity
security.declareProtected(permission_manage, 'manage_systemIntegrityForm')
def manage_systemIntegrityForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Integrity Form"""
deprecated_form('manage_systemIntegrityForm')
return self.integrity_form(self, REQUEST)
# ------------------------------------------------------------------------
# System Plugins
security.declareProtected(permission_manage, 'manage_systemPluginsForm')
def manage_systemPluginsForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Plugins Form"""
deprecated_form('manage_systemPluginsForm')
return self.plugins_form(self, REQUEST)
security.declareProtected(permission_manage, 'pluginIds')
def pluginIds(self, pluginTypes=[]):
"""DEPRECATED: !TXT! Return ids of installed MetaPublisher2 plugins"""
deprecated_method('pluginIds')
return map(lambda item: item[0], self.pluginItems(pluginTypes))
security.declareProtected(permission_manage, 'pluginItems')
def pluginItems(self, pluginTypes=[]):
"""DEPRECATED: !TXT! Return tuples of id, value of installed MetaPublisher2 plugins"""
deprecated_method('pluginItems')
result = []
if isinstance(pluginTypes, str):
pluginTypes = [pluginTypes, ]
elif len(pluginTypes) == 0:
pluginTypes = validPluginTypes
for plugin_type in pluginTypes:
for id, plugin in self.plugin_items():
if plugin.get('visibility', None) in pluginTypes:
result.append((id, plugin))
return result
security.declareProtected(permission_manage, 'pluginValues')
def pluginValues(self, pluginTypes=[]):
"""DEPRECATED: !TXT! Return values of installed MetaPublisher2 plugins"""
deprecated_method('pluginValues')
return map(lambda item: item[1], self.pluginItems(pluginTypes))
security.declareProtected(permission_manage, 'getPlugin')
def getPlugin(self, pluginType):
"""DEPRECATED: !TXT! Return the specified MetaPublisher2 plugin"""
deprecated_method('getPlugin')
return self.get_plugin(pluginType)
# ------------------------------------------------------------------------
# System Presets
security.declareProtected(permission_manage_presets, 'manage_presetsBrowserForm')
def manage_presetsBrowserForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Presets Form"""
deprecated_form('manage_presetsBrowserForm')
return self.presets_form(self, REQUEST)
security.declareProtected(permission_save_presets, 'manage_presetsNewForm')
def manage_presetsNewForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Add Preset Form"""
deprecated_form('manage_presetsNewForm')
return self.save_preset_form(self, REQUEST)
# ------------------------------------------------------------------------
# System Profiles
security.declareProtected(permission_manage, 'manage_systemProfilesForm')
def manage_systemProfilesForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Profiles Form"""
deprecated_form('manage_systemProfilesForm')
return self.profiles_form(self, REQUEST)
# ------------------------------------------------------------------------
# System Settings
security.declareProtected(permission_manage, 'manage_systemSettingsForm')
def manage_systemSettingsForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Settings Form"""
deprecated_form('manage_systemSettingsForm')
return self.settings_form(self, REQUEST)
# ------------------------------------------------------------------------
# System Tools
security.declareProtected(permission_manage, 'manage_systemToolsForm')
def manage_systemToolsForm(self, REQUEST=None):
"""DEPRECATED: !TXT! Tools Form"""
deprecated_form('manage_systemToolsForm')
return self.tools_form(self, REQUEST)
# ----------------------------------------------------------------------------
# Class Security
InitializeClass(HistoricalCompatibility)
# !!! historical.py - handle request vars (entryId instead of entry_id, etc.)
| [
"[email protected]"
] | |
a5bdaa8e9a727c27b7d64b956d252fd66588dfe7 | b516a1d0791bba8010ad0a8616c4be259589ce5c | /anidb/model.py | 5bc42d2d56d81688209b35883f85b3d6baba68c2 | [] | no_license | p0psicles/pyanihttp | 769cd585a9351deea6ca5c7ad8af5aa32fcaee37 | 6a67899e4491afd6b020edb7781e37b2781ad1df | refs/heads/master | 2021-01-17T19:59:33.580955 | 2012-02-26T20:09:55 | 2012-02-26T20:09:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,563 | py | __all__ = ["Anime", "Category", "Title", "Episode", "Tag"]
class Entity(object):
def __init__(self, id):
self._id = int(id)
@property
def id(self):
"""The id of this entity"""
return self._id
@id.setter
def id(self, value):
self._id = value
class Titled(object):
"""
Base class for all classes with a `titles` attribute
"""
def __init__(self):
self.titles = {}
def add_title(self, title):
"""
Add a new title to this entity
:param title: The title to add
"""
assert title.lang is not None
assert title.title is not None
if title.lang in self.titles:
self.titles[title.lang].append(title)
else:
self.titles[title.lang] = [title]
class Typed(object):
"""
Base class for all classes with a `type` attribute
"""
def __init__(self):
self._type = None
@property
def type(self):
"""The type property"""
return self._type
@type.setter
def type(self, value):
self._type = value
class Named(object):
"""
Base class for all classes with a `name` attribute
"""
def __init__(self):
self.name = None
@property
def name(self):
"""The name property"""
return self._name
@name.setter
def name(self, value):
self._name = value
class Described(object):
"""
Base class for all classes with a `description` attribute
"""
def __init__(self):
self._description = None
@property
def description(self):
"""The description property"""
return self._description
@description.setter
def description(self, value):
self._description = value
class Anime(Entity, Titled, Typed, Described):
"""
An anime. Identified by an `aid`
"""
def __init__(self, aid):
Entity.__init__(self, aid)
Titled.__init__(self)
Typed.__init__(self)
Described.__init__(self)
self._type = None
self._episodecount = None
self._episodes = {}
self._startdate = None
self._enddate = None
self._categories = []
self._tags = []
self._ratings = {
"permanent":
{ "count": None, "rating": None},
"temporary":
{ "count": None, "rating": None},
"review":
{ "count": None, "rating": None}
}
def add_category(self, category):
"""
Add a new category to this anime
:param category: The category to add
"""
if not isinstance(category, Category):
raise TypeError("Category expected")
else:
self._categories.append(category)
def add_episode(self, episode):
"""
Adds an episode to this anime
:param episode: :class:`anidb.model.Episode`
"""
if isinstance(episode, Episode):
self._episodes[episode.epno] = Episode
else:
raise TypeError("Episode expected")
def set_rating(self, which, count, rating):
"""
Set the rating of this anime
:param which: Which rating. Either `temporary`, `permanent` or
`reviews`
:param count: The number of votes
:param rating: The rating
"""
if which in self._ratings.keys():
self._ratings[which]["count"] = float(count)
self._ratings[which]["rating"] = rating
else:
raise ValueError("Unknown kind of rating")
def add_tag(self, tag):
"""
Adds a tag to this anime
:param tag: A :class:`anidb.model.Tag`
"""
self._tags.append(tag)
@property
def episodecount(self):
"""The episodecount property"""
return self._episodecount
@episodecount.setter
def episodecount(self, value):
self._episodecount = int(value)
@property
def startdate(self):
"""The startdate property"""
return self._startdate
@startdate.setter
def startdate(self, value):
self._startdate = value
@property
def enddate(self):
"""The enddate property"""
return self._enddate
@enddate.setter
def enddate(self, value):
self._enddate = value
@property
def ratings(self):
"""The ratings property"""
return self._ratings
@property
def episodes(self):
"""The episodes property"""
return self._episodes
@property
def categories(self):
"""The categories property"""
return self._categories
@property
def tags(self):
"""The tags property"""
return self._tags
class Episode(Entity, Titled):
"""
An episode. Identified by an `id`
"""
def __init__(self, id):
Entity.__init__(self, id)
Titled.__init__(self)
self._length = None
self._airdate = None
self._epno = None
self._rating = None
def set_rating(self, votes, rating):
self._rating = (int(votes), float(rating))
@property
def epno(self):
"""The epno property"""
return self._epno
@epno.setter
def epno(self, value):
self._epno = value
@property
def airdate(self):
"""The airdate property"""
return self._airdate
@airdate.setter
def airdate(self, value):
self._airdate = value
@property
def length(self):
"""The length property"""
return self._length
@length.setter
def length(self, value):
self._length = value
class Category(Entity, Named, Described):
"""
An AniDB category
"""
def __init__(self, id):
Entity.__init__(self, id)
Named.__init__(self)
Described.__init__(self)
self._hentai = False
self._weight = 0
self._name = None
self._description = None
self._parentid = None
@property
def hentai(self):
"""
Whether or not this category contains hentai material
:rtype: Boolean
"""
return self._hentai
@hentai.setter
def hentai(self, value):
self._hentai = value
@property
def weight(self):
return self._weight
@weight.setter
def weight(self, value):
self._weight = value
@property
def parentid(self):
"""The parentid property"""
return self._parentid
@parentid.setter
def parentid(self, value):
self._parentid = value
class Title(Typed):
def __init__(self, lang, type=None, title=None, exact=False):
Typed.__init__(self)
assert lang is not None
self._lang = lang
self._type = type
self._title = title
self._exact = exact
def __str__(self):
return self._title
@property
def lang(self):
"""
The language of the title. A complete list is available at `the
AniDB wiki <http://wiki.anidb.net/w/User:Eloyard/anititles_dump>`_
"""
return self._lang
@lang.setter
def lang(self, value):
self._lang = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def exact(self):
"""
Whether or not this is an exact match, in case the correspondig
:class:`anidb.model.Anime` object was retrieved via a search.
:rtype: Boolean
"""
return self._exact
@exact.setter
def exact(self, value):
self._exact = value
class Tag(Entity, Named, Described):
def __init__(self, id):
Entity.__init__(self, id)
Named.__init__(self)
Described.__init__(self)
self._spoiler = False
self._approval = None
self._count = None
@property
def count(self):
"""The count property"""
return self._count
@count.setter
def count(self, value):
self._count = int(value)
@property
def spoiler(self):
"""The spoiler property"""
return self._spoiler
@spoiler.setter
def spoiler(self, value):
self._spoiler = value
@property
def approval(self):
"""The approval property"""
return self._approval
@approval.setter
def approval(self, value):
self._approval = int(value)
| [
"[email protected]"
] | |
3fb764293b9870d147abe9b85e9cd11943995550 | 9849e98406768caba0cb9c9ac9300688fd012179 | /doc_gen/apps.py | 380b518525739e9a7d8a348feca6275b06dc5f92 | [] | no_license | Trevahok/Extractinator-9000 | 3cb4e1c6d20d355cef156c8e7f2ca57137b38188 | 131122fb25a6513dc765495fbe0622ad67ca29cc | refs/heads/master | 2020-04-29T10:29:21.700611 | 2019-05-18T11:17:56 | 2019-05-18T11:17:56 | 176,063,513 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | from django.apps import AppConfig
class DocGenConfig(AppConfig):
name = 'doc_gen'
| [
"[email protected]"
] | |
3d8932d7ae7a374fb4a9079e524d480316a6f5d4 | 817ff801938d25776b2564b3087c8a3c674da1a7 | /NUP153_AnalyseComplex/WT_Minimization/chainBCP/WT_chainBCP_Minimization_6.py | 93b4c101aa994d29251e3c4bc96938fd94e7bad9 | [] | no_license | yanghaobojordan/HIV1-Capsid | b22e21a9ad530ae11f128f409e298c5ab68871ee | f44f04dc9886e660c1fe870936c48e0e5bb5adc6 | refs/heads/main | 2023-04-09T01:27:26.626676 | 2021-04-23T18:17:07 | 2021-04-23T18:17:07 | 360,968,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | from pyrosetta import *
from pyrosetta import PyMOLMover
from pyrosetta.toolbox import cleanATOM
from pyrosetta.toolbox import get_secstruct
from pyrosetta.teaching import *
from pyrosetta.toolbox import get_hbonds
from pyrosetta.toolbox import mutate_residue
from pyrosetta.rosetta.protocols.relax import *
from pyrosetta.rosetta.protocols.simple_moves import *
from pyrosetta.rosetta.core.fragment import *
from pyrosetta.rosetta.protocols.moves import *
from pyrosetta.rosetta.protocols.rigid import *
from pyrosetta.rosetta.protocols.docking import *
import sys
init()
def main():
filename=sys.argv[1]
pose=pose_from_pdb(filename)
test=Pose()
test.assign(pose)
scorefxn=get_fa_scorefxn()
dumpfile = 'WT_chainBCP_Minimization_6.pdb'
txtfile = 'WT_chainBCP_Minimization_6.txt'
newfile = open(txtfile, "w")
newfile.write(str(scorefxn(test)))
newfile.write('\n')
kT = 1
mc = MonteCarlo(test, scorefxn, kT)
min_mover = MinMover()
mm = MoveMap()
mm.set_bb(True)
mm.set_chi(True)
min_mover.movemap(mm)
min_mover.score_function(scorefxn)
min_mover.min_type("dfpmin")
min_mover.tolerance(0.001)
task_pack=standard_packer_task(test)
task_pack.restrict_to_repacking()
task_pack.or_include_current(True)
pack_mover=PackRotamersMover(scorefxn, task_pack)
for i in range(20):
pack_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Repacking Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
newfile.write('Repacking Complete')
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write('\n')
for i in range(10000):
min_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Minimization Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
newfile.write('Minimization Complete')
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write('\n')
newfile.write('RMSD')
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
newfile.close()
test.dump_pdb(dumpfile)
main()
| [
"[email protected]"
] | |
9f4a4daa608a920aafce684a30429bf510d9d867 | b381b5ce79ec03e281cba7e6ea253b286205fba1 | /openstack/map_reduce/v1/job.py | 2ab8baab2c33c86bccb1719887437cb769f0ca27 | [
"Apache-2.0"
] | permissive | sun363587351/python-openstacksdk | 972eedc24199c3b8a15bd21accd29a6ec70febdb | f9e055300b1c79637d7b6a791168427f27322d73 | refs/heads/master | 2020-12-02T19:23:01.771376 | 2017-07-05T13:24:47 | 2017-07-05T13:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,901 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource2 as resource
from openstack.auto_scaling import auto_scaling_service
from openstack.auto_scaling.v1 import get_next_marker
from openstack.map_reduce import map_reduce_service
class ExecutableJob(resource.Resource):
"""Executable Job
The executable job indicates for a job and job-execution(I do not know why
the attributes is so different with job and job-execution...)
"""
#: Properties
#: Job name
job_name = resource.Body("job_name")
#: Job type, supports: ``MapReduce``, ``Spark``, ``Hive``, ``hql``,
#: ``DistCp``, ``SparkScript``, ``SparkSql``
job_type = resource.Body("job_type")
#: Path of the .jar package or .sql file for job execution
jar_path = resource.Body("jar_path")
#: Key parameter for job execution
arguments = resource.Body("arguments")
#: Path for inputting data which must start with ``/`` or ``s3a://``
input = resource.Body("input")
#: Path for outputting data, which must start with / or s3a://
output = resource.Body("output")
#: Path for storing job logs that record job running status.
#: This path must start with / or s3a://
job_log = resource.Body("job_log")
#: Whether to delete the cluster after the jobs are complete
shutdown_cluster = resource.Body("shutdown_cluster")
#: Data import and export
file_action = resource.Body("file_action")
#: whether to submit the job when the cluster is ready.
submit_job_once_cluster_run = resource.Body(
"submit_job_once_cluster_run", type=bool)
#: HiveQL statement
hql = resource.Body("hql")
#: SQL program path
hive_script_path = resource.Body("hive_script_path")
#: Reserved attribute, is job protected
is_protected = resource.Body("is_protected", type=bool)
#: Reserved attribute, is job public
is_public = resource.Body("is_public", type=bool)
class Job(resource.Resource):
resource_key = "job"
resources_key = "jobs"
base_path = "/jobs"
service = map_reduce_service.MapReduceService()
# capabilities
allow_create = True
allow_update = True
patch_update = True
allow_list = True
allow_get = True
allow_delete = True
_query_mapping = resource.QueryParameters(
"sort_by"
)
#: Properties
#: Job name
name = resource.Body("name")
#: Job type, supports: ``MapReduce``, ``Spark``, ``Hive``, ``hql``,
#: ``DistCp``, ``SparkScript``, ``SparkSql``
type = resource.Body("type")
#: A list of programs to be executed by the job
mains = resource.Body("mains", type=list)
#: A list of job-binaries required by the job
libs = resource.Body("libs", type=list)
#: Reserved attribute, user customer interfaces
interface = resource.Body("interface", type=list)
#: Job description
description = resource.Body("description")
#: Reserved attribute, is job protected
is_protected = resource.Body("is_protected", type=bool)
#: Reserved attribute, is job public
is_public = resource.Body("is_public", type=bool)
#: UTC date and time of the job created time
created_at = resource.Body("created_at")
#: UTC date and time of the job last updated time
updated_at = resource.Body("updated_at")
#: The tenant this job belongs to
tenant_id = resource.Body("tenant_id")
| [
"[email protected]"
] | |
2d25dc2c818efe033ad59ee1eb11f2c8ccfde452 | 1ea966542e28e24b2f3f7d5e0352cbdc110a979a | /Algorithm/Programmers/Programmers_2개이하로다른비트.py | 9a1ef5bccf76215078cf0ce46a5b4b707c54eb9b | [] | no_license | yunhacho/SelfStudy | 9ff7002362f6e9d8fe7d1ca3ccf94ee96726f635 | 99912af3df014a6864893c9274dbf83ff9ed05a8 | refs/heads/main | 2023-08-25T06:56:21.116419 | 2021-10-28T07:35:09 | 2021-10-28T07:35:09 | 360,470,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | def solution(numbers):
binarys=[]
for n in numbers:
b='0'+ bin(n)[2:]; i=len(b)-b[::-1].find('0')-1
binarys.append('{}{}{}'.format(b[:i],'1' if len(b)-1==i else '10',b[i+2:]))
return [int(b, 2) for b in binarys] | [
"[email protected]"
] | |
dbbb78de7586a3fd69f564f7384cb29ca7f56999 | 5793b470eea39ba99ff4d16325d462440647b77d | /System/Threads/thread-count.py | e0c36329cec628396bc4ee866d2c6f9daca41c6c | [] | no_license | zhongjiezheng/python | 01a99438fc4681817d4d0e623673afa1e488864c | 5c5725ad0e75d07e016b64d79eddf3d88a524fa0 | refs/heads/master | 2021-01-17T11:57:12.373343 | 2015-07-08T09:04:48 | 2015-07-08T09:04:48 | 35,549,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | """
thread basics: start 5 copies of a function running in parallel;
uses time.sleep so that the main thread doesn't die too earlly--
this kills all other threads on some platforms; stdout is shared:
thread output may be intermixd in this version arbitrarily.
"""
import _thread as thread, time
def counter(myId, count):
for i in range(count):
time.sleep(1)
print('[%s] => %s' % (myId, i))
for i in range(5):
thread.start_new_thread(counter,(i, 5))
time.sleep(6)
print('Main thread exiting.') | [
"[email protected]"
] | |
48b6b7cb9368a9db6760084e0982e05ee92758d6 | 04142fdda9b3fb29fb7456d5bc3e504985f24cbe | /mmcv/ops/masked_conv.py | 919702e9cbd04b9e1f5c93147bcced8a1be38c61 | [
"Apache-2.0"
] | permissive | open-mmlab/mmcv | 419e301bbc1d7d45331d67eccfd673f290a796d5 | 6e9ee26718b22961d5c34caca4108413b1b7b3af | refs/heads/main | 2023-08-31T07:08:27.223321 | 2023-08-28T09:02:10 | 2023-08-28T09:02:10 | 145,670,155 | 5,319 | 1,900 | Apache-2.0 | 2023-09-14T02:37:16 | 2018-08-22T07:05:26 | Python | UTF-8 | Python | false | false | 4,851 | py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from ..utils import ext_loader
ext_module = ext_loader.load_ext(
'_ext', ['masked_im2col_forward', 'masked_col2im_forward'])
class MaskedConv2dFunction(Function):
@staticmethod
def symbolic(g, features, mask, weight, bias, padding, stride=1):
return g.op(
'mmcv::MMCVMaskedConv2d',
features,
mask,
weight,
bias,
padding_i=padding,
stride_i=stride)
@staticmethod
def forward(ctx,
features: torch.Tensor,
mask: torch.Tensor,
weight: torch.nn.Parameter,
bias: torch.nn.Parameter,
padding: int = 0,
stride: int = 1) -> torch.Tensor:
assert mask.dim() == 3 and mask.size(0) == 1
assert features.dim() == 4 and features.size(0) == 1
assert features.size()[2:] == mask.size()[1:]
pad_h, pad_w = _pair(padding)
stride_h, stride_w = _pair(stride)
if stride_h != 1 or stride_w != 1:
raise ValueError(
'Stride could not only be 1 in masked_conv2d currently.')
out_channel, in_channel, kernel_h, kernel_w = weight.size()
if features.device.type == 'npu':
import torch_npu
output = torch_npu.npu_conv2d(
features,
weight,
bias,
stride=(stride_h, stride_w),
padding=(pad_h, pad_w),
dilation=(1, 1),
groups=1)
if mask.size()[1:] != output.size()[2:]:
raise ValueError(
'The mask is inconsistent with the shape of output_conv.')
mask = mask > 0
mask = mask.type(output.dtype)
output = output * mask
return output
batch_size = features.size(0)
out_h = int(
math.floor(
torch.true_divide((features.size(2) + 2 * pad_h -
(kernel_h - 1) - 1), stride_h) + 1))
out_w = int(
math.floor(
torch.true_divide((features.size(3) + 2 * pad_w -
(kernel_w - 1) - 1), stride_w) + 1))
mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False)
output = features.new_zeros(batch_size, out_channel, out_h, out_w)
if mask_inds.numel() > 0:
mask_h_idx = mask_inds[:, 0].contiguous()
mask_w_idx = mask_inds[:, 1].contiguous()
data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
mask_inds.size(0))
ext_module.masked_im2col_forward(
features,
mask_h_idx,
mask_w_idx,
data_col,
kernel_h=kernel_h,
kernel_w=kernel_w,
pad_h=pad_h,
pad_w=pad_w)
masked_output = torch.addmm(1, bias[:, None], 1,
weight.view(out_channel, -1), data_col)
ext_module.masked_col2im_forward(
masked_output,
mask_h_idx,
mask_w_idx,
output,
height=out_h,
width=out_w,
channels=out_channel)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output: torch.Tensor) -> tuple:
return (None, ) * 5
masked_conv2d = MaskedConv2dFunction.apply
class MaskedConv2d(nn.Conv2d):
"""A MaskedConv2d which inherits the official Conv2d.
The masked forward doesn't implement the backward function and only
supports the stride parameter to be 1 currently.
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, ...]],
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True):
super().__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
def forward(self,
input: torch.Tensor,
mask: Optional[torch.Tensor] = None) -> torch.Tensor:
if mask is None: # fallback to the normal Conv2d
return super().forward(input)
else:
return masked_conv2d(input, mask, self.weight, self.bias,
self.padding)
| [
"[email protected]"
] | |
745b47f4b9653e1adb5938a611487ad9e3201e35 | 303bac96502e5b1666c05afd6c2e85cf33f19d8c | /solutions/python3/944.py | bd687a861c6ef672174fcc914271cffea1314b06 | [
"MIT"
] | permissive | jxhangithub/leetcode | 5e82f4aeee1bf201e93e889e5c4ded2fcda90437 | 0de1af607557d95856f0e4c2a12a56c8c57d731d | refs/heads/master | 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 | MIT | 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null | UTF-8 | Python | false | false | 138 | py | class Solution:
def minDeletionSize(self, A):
return sum(any(a[j] > b[j] for a, b in zip(A, A[1:])) for j in range(len(A[0]))) | [
"[email protected]"
] | |
4da391e6845015007b01093614347747e5b52720 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R4/benchmark/startPyquil309.py | b9a32c54bbe98a26e0fe98284c0b30b521a3eef5 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | # qubit number=4
# total number=12
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += Y(3) # number=8
prog += Y(3) # number=9
prog += SWAP(1,0) # number=10
prog += SWAP(1,0) # number=11
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil309.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"[email protected]"
] | |
9c2e5b9526c6eadce1fc38a03bb4c1f15495d7bc | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/neutron_create_security_group_option.py | 0f2264525c7e8c7ce51ffcb365afd1fd1693468f | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,837 | py | # coding: utf-8
import pprint
import re
import six
class NeutronCreateSecurityGroupOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'description': 'str',
'name': 'str'
}
attribute_map = {
'description': 'description',
'name': 'name'
}
def __init__(self, description=None, name=None):
"""NeutronCreateSecurityGroupOption - a model defined in huaweicloud sdk"""
self._description = None
self._name = None
self.discriminator = None
if description is not None:
self.description = description
if name is not None:
self.name = name
@property
def description(self):
"""Gets the description of this NeutronCreateSecurityGroupOption.
功能说明:安全组描述 取值范围:0-255个字符
:return: The description of this NeutronCreateSecurityGroupOption.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this NeutronCreateSecurityGroupOption.
功能说明:安全组描述 取值范围:0-255个字符
:param description: The description of this NeutronCreateSecurityGroupOption.
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this NeutronCreateSecurityGroupOption.
功能说明:安全组名称 取值范围:0-255个字符 约束:不允许为“default”
:return: The name of this NeutronCreateSecurityGroupOption.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this NeutronCreateSecurityGroupOption.
功能说明:安全组名称 取值范围:0-255个字符 约束:不允许为“default”
:param name: The name of this NeutronCreateSecurityGroupOption.
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NeutronCreateSecurityGroupOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
23857be0068cb1b58270601d7ea42d5393abbad8 | 2dfbb97b47fd467f29ffb26faf9a9f6f117abeee | /leetcode/84.py | a8c43f1a9b98fb79e9c45be4a3ddfa8e973b06fe | [] | no_license | liuweilin17/algorithm | 0e04b2d36dfb6b7b1b0e0425daf69b62273c54b5 | d3e8669f932fc2e22711e8b7590d3365d020e189 | refs/heads/master | 2020-12-30T11:03:40.085105 | 2020-04-10T03:46:01 | 2020-04-10T03:46:01 | 98,844,919 | 3 | 1 | null | 2018-10-05T03:01:02 | 2017-07-31T03:35:14 | C++ | UTF-8 | Python | false | false | 4,644 | py | ###########################################
# Let's Have Some Fun
# File Name: 84.py
# Author: Weilin Liu
# Mail: [email protected]
# Created Time: Fri Sep 27 11:00:23 2019
###########################################
#coding=utf-8
#!/usr/bin/python
# 84. Largest Rectangle in Histogram
class SegmentNode:
def __init__(self, begin, end):
self.min_v = -1
self.begin = begin
self.end = end
self.left = None
self.right = None
class Solution:
# O(n^3), time limit exceed
def largestRectangleArea1(self, heights: List[int]) -> int:
N = len(heights)
ret = 0
for i in range(N):
for j in range(i, N):
ret = max(ret, min(heights[i:j+1])*(j-i+1))
return ret
# O(n^2), time limit exceed
def largestRectangleArea2(self, heights: List[int]) -> int:
N = len(heights)
ret = 0
for i in range(N):
min_h = heights[i]
for j in range(i, N):
min_h = min(min_h, heights[j])
ret = max(ret, min_h*(j-i+1))
return ret
# divide and conquer
# the maximum area of rectangle is one of these:
# 1. minimum height * number of bars
# 2. maximum area of bars on the left of minimum height
# 3. maximum area of bars on the right of minimum height
# average O(nlogn)
# worst O(n^2) when heights are sorted
# time limit exceed
def largestRectangleArea3(self, heights: List[int]) -> int:
def helper(begin, end):
if begin > end: return 0
min_ind = begin
min_height = heights[min_ind]
for i in range(begin+1, end+1):
if heights[i] < min_height:
min_ind = i
min_height = heights[i]
a1 = min_height * (end - begin + 1)
a2 = helper(begin, min_ind-1)
a3 = helper(min_ind+1, end)
return max([a1, a2, a3])
N = len(heights)
return helper(0, N-1)
# divide and conquer with segment tree
def largestRectangleArea4(self, heights: List[int]) -> int:
# build segment tree for find mininum value in heights
def buildSegmentTree(begin, end):
if begin > end: return None
root = SegmentNode(begin, end)
if begin == end:
root.min_v = begin
return root
else:
middle = (begin + end) // 2
root.left = buildSegmentTree(begin, middle)
root.right = buildSegmentTree(middle+1, end)
root.min_v = root.left.min_v if heights[root.left.min_v] < heights[root.right.min_v] else root.right.min_v
return root
# find the mininum value in segment tree
def query(nd, begin, end):
if nd == None or begin > nd.end or end < nd.begin:
return -1
# I don't know why, check the review updates below this solution
if begin <= nd.begin and end >= nd.end:
return nd.min_v
left_min = query(nd.left, begin, end)
right_min = query(nd.right, begin, end)
if left_min == -1: return right_min
if right_min == -1: return left_min
return left_min if heights[left_min] < heights[right_min] else right_min
def helper(begin, end):
if begin > end: return 0
elif begin == end: return heights[begin]
else: pass
min_ind = query(root, begin, end)
print(begin, end, min_ind)
min_height = heights[min_ind]
a1 = min_height * (end - begin + 1)
a2 = helper(begin, min_ind-1)
a3 = helper(min_ind+1, end)
return max([a1, a2, a3])
N = len(heights)
root = buildSegmentTree(0, N-1)
return helper(0, N-1)
# stack
# st[-1] is the local maximum heights, we calcuate the area from its left to st[-1], all the heights on the left is in the stack and smaller than it
def largestRectangleArea5(self, heights: List[int]) -> int:
st = [-1] # use -1 to calculate the minimum width
N = len(heights)
max_area = 0
for i in range(N):
while st[-1] != -1 and heights[st[-1]] >= heights[i]:
max_area = max(max_area, heights[st.pop()] * (i - st[-1] - 1))
st.append(i)
while st[-1] != -1:
max_area = max(max_area, heights[st.pop()] * (len(heights) - st[-1] -1))
return max_area
| [
"[email protected]"
] | |
658a507fcf159ac4b48d14cc5cca2cfada4e319d | 3c2a197bf32e72444c5f36559ad0cb9b64035516 | /codeskeleton/value_generators/random_int.py | d9aac5df824e8d25278a8ef6f114953e6b7a0a9f | [] | no_license | appressoas/codeskeleton | c538bbfccf19735464dc42996b754cf9199a14a3 | 604011cb27c47b02d325379895bc23b543797216 | refs/heads/master | 2021-01-19T19:24:21.100020 | 2017-06-26T09:34:45 | 2017-06-26T09:34:45 | 88,417,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | import random
from .registry import GENERATOR_REGISTRY
def random_int(from_int=0, to_int=999999999):
return random.randint(from_int, to_int)
GENERATOR_REGISTRY.register(generator_name='random_int', function=random_int)
| [
"[email protected]"
] | |
161c6671e458fed554bf825c179cc4b7abb336c1 | 96aab9f77de8170ae93004d699bd0b11e820b2d4 | /rest/app/user/urls.py | c0b6dccc152b56208b129bc04563ca5b6a09e9fd | [] | no_license | JasoSalgado/rest-app | 8dbc842d6de0ec705fd04bc94e79ee75ad80f2e2 | 3d1662800bd1e98142a0edca244c82498cc4832b | refs/heads/master | 2022-11-15T15:25:44.135084 | 2020-07-16T14:58:59 | 2020-07-16T14:58:59 | 280,182,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from django.conf.urls import url
from rest.app.user.views import UserRegistrationView, UserLoginView
urlpatterns = [
url(r'^signup', UserRegistrationView.as_view()),
url(r'^signin', UserLoginView.as_view()),
] | [
"[email protected]"
] | |
a69b7932bdbaf598e483e8e989db6149b82fd61c | b650ecac89df96d01b3371806b931858eaa202cc | /io_scene_gltf2/blender/imp/gltf2_blender_skin.py | b2b39a2b94764e627b2e5bd93979c919ba73d9a1 | [] | no_license | yazici/blender-addons | a92a87e6f96d130cf32c94227cbd5623fe129ad9 | 85bda1d3f7d20766561ef73c864a4a6872d93a23 | refs/heads/master | 2020-04-25T18:26:34.571420 | 2019-02-25T15:48:03 | 2019-02-25T15:48:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,427 | py | # Copyright 2018 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from mathutils import Vector, Matrix
from ..com.gltf2_blender_conversion import matrix_gltf_to_blender, scale_to_matrix
from ...io.imp.gltf2_io_binary import BinaryData
class BlenderSkin():
"""Blender Skinning / Armature."""
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def create_armature(gltf, skin_id, parent):
"""Armature creation."""
pyskin = gltf.data.skins[skin_id]
if pyskin.name is not None:
name = pyskin.name
else:
name = "Armature_" + str(skin_id)
armature = bpy.data.armatures.new(name)
obj = bpy.data.objects.new(name, armature)
bpy.data.scenes[gltf.blender_scene].collection.objects.link(obj)
pyskin.blender_armature_name = obj.name
if parent is not None:
obj.parent = bpy.data.objects[gltf.data.nodes[parent].blender_object]
@staticmethod
def set_bone_transforms(gltf, skin_id, bone, node_id, parent):
"""Set bone transformations."""
pyskin = gltf.data.skins[skin_id]
pynode = gltf.data.nodes[node_id]
obj = bpy.data.objects[pyskin.blender_armature_name]
# Set bone bind_pose by inverting bindpose matrix
if node_id in pyskin.joints:
index_in_skel = pyskin.joints.index(node_id)
if pyskin.inverse_bind_matrices is not None:
inverse_bind_matrices = BinaryData.get_data_from_accessor(gltf, pyskin.inverse_bind_matrices)
# Needed to keep scale in matrix, as bone.matrix seems to drop it
if index_in_skel < len(inverse_bind_matrices):
pynode.blender_bone_matrix = matrix_gltf_to_blender(
inverse_bind_matrices[index_in_skel]
).inverted()
bone.matrix = pynode.blender_bone_matrix
else:
gltf.log.error("Error with inverseBindMatrix for skin " + pyskin)
else:
pynode.blender_bone_matrix = Matrix() # 4x4 identity matrix
else:
print('No invBindMatrix for bone ' + str(node_id))
pynode.blender_bone_matrix = Matrix()
# Parent the bone
if parent is not None and hasattr(gltf.data.nodes[parent], "blender_bone_name"):
bone.parent = obj.data.edit_bones[gltf.data.nodes[parent].blender_bone_name] # TODO if in another scene
# Switch to Pose mode
bpy.ops.object.mode_set(mode="POSE")
obj.data.pose_position = 'POSE'
# Set posebone location/rotation/scale (in armature space)
# location is actual bone location minus it's original (bind) location
bind_location = Matrix.Translation(pynode.blender_bone_matrix.to_translation())
bind_rotation = pynode.blender_bone_matrix.to_quaternion()
bind_scale = scale_to_matrix(pynode.blender_bone_matrix.to_scale())
location, rotation, scale = matrix_gltf_to_blender(pynode.transform).decompose()
if parent is not None and hasattr(gltf.data.nodes[parent], "blender_bone_matrix"):
parent_mat = gltf.data.nodes[parent].blender_bone_matrix
# Get armature space location (bindpose + pose)
# Then, remove original bind location from armspace location, and bind rotation
final_location = (bind_location.inverted() @ parent_mat @ Matrix.Translation(location)).to_translation()
obj.pose.bones[pynode.blender_bone_name].location = \
bind_rotation.inverted().to_matrix().to_4x4() @ final_location
# Do the same for rotation
obj.pose.bones[pynode.blender_bone_name].rotation_quaternion = \
(bind_rotation.to_matrix().to_4x4().inverted() @ parent_mat @
rotation.to_matrix().to_4x4()).to_quaternion()
obj.pose.bones[pynode.blender_bone_name].scale = \
(bind_scale.inverted() @ parent_mat @ scale_to_matrix(scale)).to_scale()
else:
obj.pose.bones[pynode.blender_bone_name].location = bind_location.inverted() @ location
obj.pose.bones[pynode.blender_bone_name].rotation_quaternion = bind_rotation.inverted() @ rotation
obj.pose.bones[pynode.blender_bone_name].scale = bind_scale.inverted() @ scale
@staticmethod
def create_bone(gltf, skin_id, node_id, parent):
"""Bone creation."""
pyskin = gltf.data.skins[skin_id]
pynode = gltf.data.nodes[node_id]
scene = bpy.data.scenes[gltf.blender_scene]
obj = bpy.data.objects[pyskin.blender_armature_name]
bpy.context.window.scene = scene
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode="EDIT")
if pynode.name:
name = pynode.name
else:
name = "Bone_" + str(node_id)
bone = obj.data.edit_bones.new(name)
pynode.blender_bone_name = bone.name
pynode.blender_armature_name = pyskin.blender_armature_name
bone.tail = Vector((0.0, 1.0, 0.0)) # Needed to keep bone alive
# set bind and pose transforms
BlenderSkin.set_bone_transforms(gltf, skin_id, bone, node_id, parent)
bpy.ops.object.mode_set(mode="OBJECT")
@staticmethod
def create_vertex_groups(gltf, skin_id):
"""Vertex Group creation."""
pyskin = gltf.data.skins[skin_id]
for node_id in pyskin.node_ids:
obj = bpy.data.objects[gltf.data.nodes[node_id].blender_object]
for bone in pyskin.joints:
obj.vertex_groups.new(name=gltf.data.nodes[bone].blender_bone_name)
@staticmethod
def assign_vertex_groups(gltf, skin_id):
"""Assign vertex groups to vertices."""
pyskin = gltf.data.skins[skin_id]
for node_id in pyskin.node_ids:
node = gltf.data.nodes[node_id]
obj = bpy.data.objects[node.blender_object]
offset = 0
for prim in gltf.data.meshes[node.mesh].primitives:
idx_already_done = {}
if 'JOINTS_0' in prim.attributes.keys() and 'WEIGHTS_0' in prim.attributes.keys():
original_joint_ = BinaryData.get_data_from_accessor(gltf, prim.attributes['JOINTS_0'])
original_weight_ = BinaryData.get_data_from_accessor(gltf, prim.attributes['WEIGHTS_0'])
tmp_indices = {}
tmp_idx = 0
weight_ = []
for i in prim.tmp_indices:
if i[0] not in tmp_indices.keys():
tmp_indices[i[0]] = tmp_idx
tmp_idx += 1
weight_.append(original_weight_[i[0]])
tmp_indices = {}
tmp_idx = 0
joint_ = []
for i in prim.tmp_indices:
if i[0] not in tmp_indices.keys():
tmp_indices[i[0]] = tmp_idx
tmp_idx += 1
joint_.append(original_joint_[i[0]])
for poly in obj.data.polygons:
for loop_idx in range(poly.loop_start, poly.loop_start + poly.loop_total):
vert_idx = obj.data.loops[loop_idx].vertex_index
if vert_idx in idx_already_done.keys():
continue
idx_already_done[vert_idx] = True
if vert_idx in range(offset, offset + prim.vertices_length):
tab_index = vert_idx - offset
cpt = 0
for joint_idx in joint_[tab_index]:
weight_val = weight_[tab_index][cpt]
if weight_val != 0.0: # It can be a problem to assign weights of 0
# for bone index 0, if there is always 4 indices in joint_
# tuple
group = obj.vertex_groups[gltf.data.nodes[
pyskin.joints[joint_idx]
].blender_bone_name]
group.add([vert_idx], weight_val, 'REPLACE')
cpt += 1
else:
gltf.log.error("No Skinning ?????") # TODO
offset = offset + prim.vertices_length
@staticmethod
def create_armature_modifiers(gltf, skin_id):
"""Create Armature modifier."""
pyskin = gltf.data.skins[skin_id]
if pyskin.blender_armature_name is None:
# TODO seems something is wrong
# For example, some joints are in skin 0, and are in another skin too
# Not sure this is glTF compliant, will check it
return
for node_id in pyskin.node_ids:
node = gltf.data.nodes[node_id]
obj = bpy.data.objects[node.blender_object]
for obj_sel in bpy.context.scene.objects:
obj_sel.select_set(False)
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
# bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
# Reparent skinned mesh to it's armature to avoid breaking
# skinning with interleaved transforms
obj.parent = bpy.data.objects[pyskin.blender_armature_name]
arma = obj.modifiers.new(name="Armature", type="ARMATURE")
arma.object = bpy.data.objects[pyskin.blender_armature_name]
| [
"[email protected]"
] | |
7a021ccaf3c670c98dfc5155d1cbd84b76bfd436 | 2caf6885511af24443e22aaa43cd679d694f6f80 | /note/my_note/first_month/day06/do_it.py | ec11f1b1af222ae157ca35960d3fb73a0a203e08 | [] | no_license | nandadao/Python_note | 7f9ba54a73af05c935b4f7e24cacb728859a6c69 | abddfc2e9a1704c88867cff1898c9251f59d4fb5 | refs/heads/master | 2020-11-25T18:29:50.607670 | 2019-12-19T01:28:02 | 2019-12-19T01:28:02 | 228,793,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | # dict_like = {
# "qtx":["编码","看书","跑步"],
# "lzmly":["看电影","编码","美食","唱歌"]
# }
#
# # for i in dict_like["qtx"]:
# # print("qtx",i)
#
# # 所有人的爱好
# for item in dict_like:
# for i in dict_like[item]:
# print(item, i)
dict_city = {
"北京":{
"景区":["天安门","天坛","故宫"],
"美食":["驴打滚","豆汁"]
},
"四川":{
"景区":["九寨沟","宽窄巷子"],
"美食":["火锅","串串香"]
},
}
for i in dict_city["北京"]["景区"]:
print("北京美食",i)
for item in dict_city:
print("城市有:", item)
for item in dict_city:
for i in dict_city[item]["景区"]:
print(item, i)
| [
"[email protected]"
] | |
17ff71f9320ed1b5a19d7b730f0302b2113591eb | 196f7e3238f961fb5eba7a794f0b0c75d7c30ba1 | /Python自动化运维技术与最佳实践/2业务服务监控/213对比nginx配置文件test.py | 9cc58a6e05df6dd2f918590e93150457966d7b24 | [] | no_license | Liaoyingjie/Pythonlearn | d0b1b95110017af7e063813660e52c61a6333575 | 8bca069f38a60719acac5aa39bd347f90ab0bfb1 | refs/heads/master | 2020-04-08T07:35:07.357487 | 2018-04-12T16:44:43 | 2018-04-12T16:44:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | #!/usr/bin/python
import difflib
import sys
textfile1='nginx.conf.v1'
textfile2='nginx.conf.v2'
def readfile(filename):
try:
fileHandle = open (filename, 'a+' )
text=fileHandle.read().splitlines()
fileHandle.close()
return text
except IOError as error:
print('Read file Error:'+str(error))
sys.exit()
if textfile1=="" or textfile2=="":
print("Usage: simple3.py filename1 filename2")
sys.exit()
#textfile1='HtmlDiff.html'
#textfile2='HtmlDiff111.html'
text1_lines = readfile(textfile1)
text2_lines = readfile(textfile2)
d = difflib.HtmlDiff()
#print(d.make_file(text1_lines, text2_lines))
f=open('213对比Nginx网页结果.html', 'a+')
#print(d.make_file(text1_lines,text2_lines))
print((d.make_file(text1_lines,text2_lines)),file=f)
f.close()
| [
"[email protected]"
] | |
df1b483b12a18f285047ae7d1f7b07f90b38e4ab | a43504f11666edffa9497630d9fcad31566b4349 | /app/bot_engine/request_makers.py | 7c475e770f2ee3cf8dc298e5fc7d6fa180ffd930 | [] | no_license | korid24/shop_list_webhook_bot | 4896d0e731679815b043ba4c997651fe4f3682a9 | efe359b42a8b625ea78b855664358937419a1785 | refs/heads/master | 2022-12-29T09:51:16.212730 | 2020-10-22T10:16:20 | 2020-10-22T10:16:20 | 306,046,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,933 | py | import json
import requests
from typing import List, NamedTuple, Optional, Union
from config import AUTH_TOKEN, BASE_URL
# from local_utils import write_json
from bot_engine.utils import path_to
HEADERS = {
'Content-type': 'application/json',
'Authorization': 'token {}'.format(AUTH_TOKEN),
'Accept-Language': 'en-US'}
class RequestConstructor(NamedTuple):
"""
Шаблон для информации запроса
"""
url: str
method: str
data: Optional[Union[list, dict]]
def create_user(
telegram_id: int,
first_name: Optional[str],
last_name: Optional[str],
nickname: Optional[str]) -> RequestConstructor:
"""
Формирует информацию для запроса на добавление пользоавателя
"""
data = {
'telegram_id': telegram_id,
'first_name': first_name,
'last_name': last_name,
'nickname': nickname}
url = BASE_URL + path_to('user')
return RequestConstructor(url=url, data=data, method='post')
def add_elements(
telegram_id: int,
position: int,
elements: List[str]) -> RequestConstructor:
"""
Формирует информацию для запроса на добавление элемента
"""
data = []
for element in elements:
data.append({'title': element})
if position:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', position) +
path_to('purchase') + 'bulk_create/')
else:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist') + 'bulk_create/')
return RequestConstructor(url=url, data=data, method='post')
def replace_element(
telegram_id: int,
position: int,
old_ind: int,
new_ind: int) -> RequestConstructor:
"""
Формирует информацию для запроса на перемещение элемента
"""
data = {'ind': new_ind}
if position:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', position) +
path_to('purchase', old_ind))
else:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', old_ind))
return RequestConstructor(url=url, data=data, method='patch')
def remove_elements(
telegram_id: int,
position: int,
elements: List[int]) -> RequestConstructor:
"""
Формирует информацию для запроса на удаление элемента
"""
data = {'items': elements}
if position:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist', position) +
path_to('purchase') + 'bulk_delete/')
else:
url = (BASE_URL + path_to('user', telegram_id) +
path_to('purchaselist') + 'bulk_delete/')
return RequestConstructor(url=url, data=data, method='delete')
def get_all(telegram_id: int) -> RequestConstructor:
"""
Формирует информацию для запроса на получение полной инфы о пользователе
"""
url = BASE_URL + path_to('user', telegram_id)
return RequestConstructor(url=url, data=None, method='get')
def make_request(
info: RequestConstructor, answer: bool = True) -> Union[dict, int]:
"""
Совершает запрос исходя из предоставленной инфы. Возвращает тело ответа
если нужно, а если не нужно то код ответа
"""
response = requests.request(
method=info.method,
url=info.url,
data=json.dumps(info.data),
headers=HEADERS)
if not answer:
return response.status_code
return response.json()
| [
"[email protected]"
] | |
2d9da259fdc14b8cb5bf137e5ab76ab8e8182a96 | c019093a2474b92bda1b9fcab0ae750937aedc1c | /jaxlie/manifold/_manifold_helpers.py | 9bfeaae7af619693187e8bbe93f513efab7291ad | [
"MIT"
] | permissive | mfkiwl/jaxlie | 6c8a83d367299592a68bb80c7dc9816e9e006f09 | 4dbe16f3c1d1cfda30e0418ef5d1e1772cf9f537 | refs/heads/master | 2023-07-13T17:11:16.693321 | 2021-08-31T18:51:33 | 2021-08-31T18:51:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,652 | py | from typing import TypeVar, cast
import jax
from jax import numpy as jnp
from .. import hints
from .._base import MatrixLieGroup
from .._se2 import SE2
from .._se3 import SE3
from .._so2 import SO2
from .._so3 import SO3
T = TypeVar("T", bound=MatrixLieGroup)
@jax.jit
def rplus(transform: T, delta: hints.TangentVector) -> T:
"""Manifold right plus.
Computes `T_wb = T_wa @ exp(delta)`.
Args:
transform: `T_wa`
delta: `T_ab.log()`
Returns:
T: `T_wb`
"""
return transform @ type(transform).exp(delta)
@jax.jit
def rplus_jacobian_parameters_wrt_delta(transform: MatrixLieGroup) -> hints.MatrixJax:
"""Analytical Jacobians for `jaxlie.manifold.rplus()`, linearized around a zero
local delta.
Useful for on-manifold optimization.
Equivalent to --
```
def rplus_jacobian_parameters_wrt_delta(transform: MatrixLieGroup) -> jnp.ndarray:
# Since transform objects are pytree containers, note that `jacfwd` returns a
# transformation object itself and that the Jacobian terms corresponding to the
# parameters are grabbed explicitly.
return jax.jacfwd(
jaxlie.manifold.rplus, # Args are (transform, delta)
argnums=1, # Jacobian wrt delta
)(transform, onp.zeros(transform.tangent_dim)).parameters()
```
Args:
transform
Returns:
Jacobian. Shape should be `(Group.parameters_dim, Group.tangent_dim)`.
"""
if type(transform) is SO2:
# Jacobian row indices: cos, sin
# Jacobian col indices: theta
transform_so2 = cast(SO2, transform)
J = jnp.zeros((2, 1))
cos, sin = transform_so2.unit_complex
J = J.at[0].set(-sin).at[1].set(cos)
elif type(transform) is SE2:
# Jacobian row indices: cos, sin, x, y
# Jacobian col indices: vx, vy, omega
transform_se2 = cast(SE2, transform)
J = jnp.zeros((4, 3))
# Translation terms
J = J.at[2:, :2].set(transform_se2.rotation().as_matrix())
# Rotation terms
J = J.at[:2, 2:3].set(
rplus_jacobian_parameters_wrt_delta(transform_se2.rotation())
)
elif type(transform) is SO3:
# Jacobian row indices: qw, qx, qy, qz
# Jacobian col indices: omega x, omega y, omega z
transform_so3 = cast(SO3, transform)
w, x, y, z = transform_so3.wxyz
_unused_neg_w, neg_x, neg_y, neg_z = -transform_so3.wxyz
J = (
jnp.array(
[
[neg_x, neg_y, neg_z],
[w, neg_z, y],
[z, w, neg_x],
[neg_y, x, w],
]
)
/ 2.0
)
elif type(transform) is SE3:
# Jacobian row indices: qw, qx, qy, qz, x, y, z
# Jacobian col indices: vx, vy, vz, omega x, omega y, omega z
transform_se3 = cast(SE3, transform)
J = jnp.zeros((7, 6))
# Translation terms
J = J.at[4:, :3].set(transform_se3.rotation().as_matrix())
# Rotation terms
J = J.at[:4, 3:6].set(
rplus_jacobian_parameters_wrt_delta(transform_se3.rotation())
)
else:
assert False, f"Unsupported type: {type(transform)}"
assert J.shape == (transform.parameters_dim, transform.tangent_dim)
return J
@jax.jit
def rminus(a: T, b: T) -> hints.TangentVectorJax:
"""Manifold right minus.
Computes `delta = (T_wa.inverse() @ T_wb).log()`.
Args:
a: `T_wa`
b: `T_wb`
Returns:
`T_ab.log()`
"""
return (a.inverse() @ b).log()
| [
"[email protected]"
] | |
0628d28942b07798a3581b0c726246718d0103bf | 6ed233ec80984cd8d6eb5b8f2efde1ac5feadc4b | /ebc/nbr2018/tests/base.py | d8f1b8dc478a579559e54fd5ce377970766ca953 | [
"Unlicense"
] | permissive | lflrocha/ebc.nbr2018 | ce03abd238dca532d8adedaae0778b519b334852 | 0259390fecda065bf040b08e5ae3050ba96b1c4e | refs/heads/master | 2020-04-08T12:44:17.247750 | 2019-08-08T18:13:57 | 2019-08-08T18:13:57 | 159,359,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,962 | py | """Test setup for integration and functional tests.
When we import PloneTestCase and then call setupPloneSite(), all of
Plone's products are loaded, and a Plone site will be created. This
happens at module level, which makes it faster to run each test, but
slows down test runner startup.
"""
from Products.Five import zcml
from Products.Five import fiveconfigure
from Testing import ZopeTestCase as ztc
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
# When ZopeTestCase configures Zope, it will *not* auto-load products
# in Products/. Instead, we have to use a statement such as:
# ztc.installProduct('SimpleAttachment')
# This does *not* apply to products in eggs and Python packages (i.e.
# not in the Products.*) namespace. For that, see below.
# All of Plone's products are already set up by PloneTestCase.
@onsetup
def setup_product():
"""Set up the package and its dependencies.
The @onsetup decorator causes the execution of this body to be
deferred until the setup of the Plone site testing layer. We could
have created our own layer, but this is the easiest way for Plone
integration tests.
"""
# Load the ZCML configuration for the example.tests package.
# This can of course use <include /> to include other packages.
fiveconfigure.debug_mode = True
import ebc.nbr2018
zcml.load_config('configure.zcml', ebc.nbr2018)
fiveconfigure.debug_mode = False
# We need to tell the testing framework that these products
# should be available. This can't happen until after we have loaded
# the ZCML. Thus, we do it here. Note the use of installPackage()
# instead of installProduct().
# This is *only* necessary for packages outside the Products.*
# namespace which are also declared as Zope 2 products, using
# <five:registerPackage /> in ZCML.
# We may also need to load dependencies, e.g.:
# ztc.installPackage('borg.localrole')
ztc.installPackage('ebc.nbr2018')
# The order here is important: We first call the (deferred) function
# which installs the products we need for this product. Then, we let
# PloneTestCase set up this product on installation.
setup_product()
ptc.setupPloneSite(products=['ebc.nbr2018'])
class TestCase(ptc.PloneTestCase):
"""We use this base class for all the tests in this package. If
necessary, we can put common utility or setup code in here. This
applies to unit test cases.
"""
class FunctionalTestCase(ptc.FunctionalTestCase):
"""We use this class for functional integration tests that use
doctest syntax. Again, we can put basic common utility or setup
code in here.
"""
def afterSetUp(self):
roles = ('Member', 'Contributor')
self.portal.portal_membership.addMember('contributor',
'secret',
roles, [])
| [
"[email protected]"
] | |
635734801373905925dc4741bdc7e1cb867d19fa | 85790a5af1f58b81e6b94b3a2b53035a17b0edf3 | /10-day/2-列表初识.py | ad20ef41614f731643bae4d8605902c0325f6005 | [] | no_license | liuruitao/Python-liuruitao | 681734577484f803e2be14260ae4d0fc1505a762 | 1c028c4bfeb4e89cb5120af1eadfa3f7ad34b569 | refs/heads/master | 2021-04-18T22:34:26.479834 | 2018-04-18T06:45:35 | 2018-04-18T06:45:35 | 126,920,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | aa='人生苦短,我用python,life is short'
print(aa.count('p'))
print(aa.count('l'))
print(aa.rfind('s'))
print(aa.upper())
| [
"[email protected]"
] | |
0bccda679a470479ed2e699aaf932957507e734f | 5c0c0176db0ccf2c24b6b5ed459a8dc144518b13 | /nni/nas/benchmarks/nasbench101/graph_util.py | 10805685fec3ff7359ec39dc0ae1c019e67950ae | [
"MIT"
] | permissive | petuum/nni | ac4f4a1c4d6df71684eeffa127b7c4858fd29e97 | 8134be6269902939232482d63649c06f9864be6d | refs/heads/master | 2023-02-18T11:21:41.078889 | 2021-01-20T03:21:50 | 2021-01-20T03:21:50 | 302,736,456 | 4 | 3 | MIT | 2020-11-20T20:21:15 | 2020-10-09T19:34:11 | Python | UTF-8 | Python | false | false | 3,790 | py | import hashlib
import numpy as np
from .constants import INPUT, LABEL2ID, OUTPUT
def _labeling_from_architecture(architecture, vertices):
return [INPUT] + [architecture['op{}'.format(i)] for i in range(1, vertices - 1)] + [OUTPUT]
def _adjancency_matrix_from_architecture(architecture, vertices):
matrix = np.zeros((vertices, vertices), dtype=np.bool)
for i in range(1, vertices):
for k in architecture['input{}'.format(i)]:
matrix[k, i] = 1
return matrix
def nasbench_format_to_architecture_repr(adjacency_matrix, labeling):
"""
Computes a graph-invariance MD5 hash of the matrix and label pair.
Imported from NAS-Bench-101 repo.
Parameters
----------
adjacency_matrix : np.ndarray
A 2D array of shape NxN, where N is the number of vertices.
``matrix[u][v]`` is 1 if there is a direct edge from `u` to `v`,
otherwise it will be 0.
labeling : list of str
A list of str that starts with input and ends with output. The intermediate
nodes are chosen from candidate operators.
Returns
-------
tuple and int and dict
Converted number of vertices and architecture.
"""
num_vertices = adjacency_matrix.shape[0]
assert len(labeling) == num_vertices
architecture = {}
for i in range(1, num_vertices - 1):
architecture['op{}'.format(i)] = labeling[i]
assert labeling[i] not in [INPUT, OUTPUT]
for i in range(1, num_vertices):
architecture['input{}'.format(i)] = [k for k in range(i) if adjacency_matrix[k, i]]
return num_vertices, architecture
def infer_num_vertices(architecture):
"""
Infer number of vertices from an architecture dict.
Parameters
----------
architecture : dict
Architecture in NNI format.
Returns
-------
int
Number of vertices.
"""
op_keys = set([k for k in architecture.keys() if k.startswith('op')])
intermediate_vertices = len(op_keys)
assert op_keys == {'op{}'.format(i) for i in range(1, intermediate_vertices + 1)}
return intermediate_vertices + 2
def hash_module(architecture, vertices):
"""
Computes a graph-invariance MD5 hash of the matrix and label pair.
This snippet is modified from code in NAS-Bench-101 repo.
Parameters
----------
matrix : np.ndarray
Square upper-triangular adjacency matrix.
labeling : list of int
Labels of length equal to both dimensions of matrix.
Returns
-------
str
MD5 hash of the matrix and labeling.
"""
labeling = _labeling_from_architecture(architecture, vertices)
labeling = [LABEL2ID[t] for t in labeling]
matrix = _adjancency_matrix_from_architecture(architecture, vertices)
in_edges = np.sum(matrix, axis=0).tolist()
out_edges = np.sum(matrix, axis=1).tolist()
assert len(in_edges) == len(out_edges) == len(labeling)
hashes = list(zip(out_edges, in_edges, labeling))
hashes = [hashlib.md5(str(h).encode('utf-8')).hexdigest() for h in hashes]
# Computing this up to the diameter is probably sufficient but since the
# operation is fast, it is okay to repeat more times.
for _ in range(vertices):
new_hashes = []
for v in range(vertices):
in_neighbors = [hashes[w] for w in range(vertices) if matrix[w, v]]
out_neighbors = [hashes[w] for w in range(vertices) if matrix[v, w]]
new_hashes.append(hashlib.md5(
(''.join(sorted(in_neighbors)) + '|' +
''.join(sorted(out_neighbors)) + '|' +
hashes[v]).encode('utf-8')).hexdigest())
hashes = new_hashes
fingerprint = hashlib.md5(str(sorted(hashes)).encode('utf-8')).hexdigest()
return fingerprint
| [
"[email protected]"
] | |
2ec5a1156d06c902673f739affb49f1533f4092d | 24bc4990e9d0bef6a42a6f86dc783785b10dbd42 | /build/fuchsia/PRESUBMIT.py | f8c7df28fc5fd1397f1569b6b65e371324b3fa65 | [
"BSD-3-Clause"
] | permissive | nwjs/chromium.src | 7736ce86a9a0b810449a3b80a4af15de9ef9115d | 454f26d09b2f6204c096b47f778705eab1e3ba46 | refs/heads/nw75 | 2023-08-31T08:01:39.796085 | 2023-04-19T17:25:53 | 2023-04-19T17:25:53 | 50,512,158 | 161 | 201 | BSD-3-Clause | 2023-05-08T03:19:09 | 2016-01-27T14:17:03 | null | UTF-8 | Python | false | false | 1,591 | py | # Copyright 2021 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Fuchsia.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into depot_tools.
"""
USE_PYTHON3 = True
import os
def CommonChecks(input_api, output_api):
build_fuchsia_dir = input_api.PresubmitLocalPath()
def J(*dirs):
"""Returns a path relative to presubmit directory."""
return input_api.os_path.join(build_fuchsia_dir, *dirs)
tests = []
unit_tests = [
J('binary_sizes_test.py'),
J('binary_size_differ_test.py'),
J('device_target_test.py'),
J('gcs_download_test.py'),
J('update_images_test.py'),
J('update_product_bundles_test.py'),
J('update_sdk_test.py'),
]
# TODO(1309977): enable on Windows when fixed.
if os.name != 'nt':
unit_tests.extend([J('fvdl_target_test.py')])
tests.extend(
input_api.canned_checks.GetUnitTests(input_api,
output_api,
unit_tests=unit_tests,
run_on_python2=False,
run_on_python3=True,
skip_shebang_check=True))
return input_api.RunTests(tests)
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
| [
"[email protected]"
] | |
a7bbc60feece73e88f0a57f6209db2d14d87241c | bc441bb06b8948288f110af63feda4e798f30225 | /user_service_sdk/model/cmdb_extend/subsystem_dependency_pb2.pyi | 56c626d516302fae9e256521a00d0df10a2ecd97 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,344 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
from user_service_sdk.model.cmdb_extend.app_dependency_pb2 import (
AppDependency as user_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class SubsystemDependency(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class ConnectSubsystems(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
abbreviation = ... # type: typing___Text
object_id = ... # type: typing___Text
instance_id = ... # type: typing___Text
name = ... # type: typing___Text
def __init__(self,
*,
abbreviation : typing___Optional[typing___Text] = None,
object_id : typing___Optional[typing___Text] = None,
instance_id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> SubsystemDependency.ConnectSubsystems: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> SubsystemDependency.ConnectSubsystems: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"abbreviation",b"abbreviation",u"instance_id",b"instance_id",u"name",b"name",u"object_id",b"object_id"]) -> None: ...
abbreviation = ... # type: typing___Text
name = ... # type: typing___Text
object_id = ... # type: typing___Text
instance_id = ... # type: typing___Text
@property
def components(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[user_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency]: ...
@property
def connect_subsystems(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[SubsystemDependency.ConnectSubsystems]: ...
def __init__(self,
*,
abbreviation : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
object_id : typing___Optional[typing___Text] = None,
instance_id : typing___Optional[typing___Text] = None,
components : typing___Optional[typing___Iterable[user_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency]] = None,
connect_subsystems : typing___Optional[typing___Iterable[SubsystemDependency.ConnectSubsystems]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> SubsystemDependency: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> SubsystemDependency: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"abbreviation",b"abbreviation",u"components",b"components",u"connect_subsystems",b"connect_subsystems",u"instance_id",b"instance_id",u"name",b"name",u"object_id",b"object_id"]) -> None: ...
| [
"[email protected]"
] | |
c534d7c22c8c9cfe1da125036b9b6e7f079298dc | 6cfa6d84722cf560b9dc144ba826d857e884d8fb | /redis/ticker/config/role.sample.py | 793f9031371e1b17ce60702bbe524190ca842034 | [] | no_license | chaeplin/dash-ticker | b5e3702c87bc351ae40863de8cd8a55dddc74330 | 99e1fdc4e105601bdcfa55e80c524ca48294bee8 | refs/heads/master | 2021-01-18T23:07:24.246729 | 2017-11-23T09:39:40 | 2017-11-23T09:39:40 | 72,606,879 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | HOST_ROLE = 'MASTER'
#HOST_ROLE = 'SLAVE'
# SENTINEL CHECK
# MASTER
MASTER_SETINEL_HOST = '192.168.10.3'
MASTER_REDIS_MASTER = '192.168.10.2'
#SLAVE
SLAVE_SETINEL_HOST = '192.168.10.4'
SLAVE_REDIS_MASTER = '192.168.10.1'
| [
"[email protected]"
] | |
a2aad80e1bd8dcac5b76b43c7c1b79f9d346ecb5 | b501a5eae1018c1c26caa96793c6ee17865ebb2d | /data_persistence_and_exchange/sqlite3/sqlite3_iterdump.py | b3e3cb6c5fd4e677c2df637f142fcfd822cb06dd | [] | no_license | jincurry/standard_Library_Learn | 12b02f9e86d31ca574bb6863aefc95d63cc558fc | 6c7197f12747456e0f1f3efd09667682a2d1a567 | refs/heads/master | 2022-10-26T07:28:36.545847 | 2018-05-04T12:54:50 | 2018-05-04T12:54:50 | 125,447,397 | 0 | 1 | null | 2022-10-02T17:21:50 | 2018-03-16T01:32:50 | Python | UTF-8 | Python | false | false | 866 | py | import sqlite3
schema_filename = 'todo_schema.sql'
with sqlite3.connect(':memory:') as conn:
conn.row_factory = sqlite3.Row
print('Creating schema')
with open(schema_filename, 'rt') as f:
schema = f.read()
conn.executescript(schema)
print('Inserting initial data')
conn.execute("""
insert INTO project (name, description, deadline)
VALUES ('pymotw', 'Python Module fo the Week', '2018-12-01')
""")
data = [
('write about select', 'done', '2010-10-03', 'pymotw'),
('write about random', 'waiting', '2010-11-10', 'pymotw'),
('write about sqlite3', 'active', '2010-10-17', 'pymotw'),
]
conn.executemany("""
insert INTO task (details, status, deadline, project)
VALUES (?, ?, ?, ?)
""", data)
print('Dumping:')
for text in conn.iterdump():
print(text)
| [
"[email protected]"
] | |
bd32ba3fd62a9486d5b5dbaac375ebf63d3f6370 | b4871c8dd8ef257d604ac221ecff0c71e14f06cb | /pilot_curriculum/django_apps/generic_nasa/nasa/migrations/0001_initial.py | 99716aced67f5162e56b39feda4fbd930ab74f6c | [] | no_license | manzur1990/tracecamp_curriculum | b55605b0bbe4b5e3b333ae3fb105141e53f42e39 | e9c8ee9a3c151a5cd57137f6575d1342a7de83fb | refs/heads/master | 2022-04-13T15:24:54.305552 | 2019-08-12T13:58:40 | 2019-08-12T13:58:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | # Generated by Django 2.1.4 on 2018-12-21 15:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NasaComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('comment', models.TextField()),
('rating', models.IntegerField()),
('image_url', models.URLField()),
],
),
]
| [
"[email protected]"
] | |
496afa8406a6ad5f9584ceddba65ba6374ac3cfb | cc44edfa1edbedea3ad044805be7548e0ccba70d | /0x0C-python-almost_a_circle/models/square.py | dc5e2428176dcbb01162c0529f50870f361569e2 | [] | no_license | set808/holbertonschool-higher_level_programming | 421f0da1f91cd56eb2daa4e07a51b4a505d53edc | eb276a4e68e5cc43498459eec78fc05f72e2cd48 | refs/heads/master | 2020-03-09T13:07:43.824914 | 2018-09-08T00:26:46 | 2018-09-08T00:26:46 | 128,802,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,833 | py | #!/usr/bin/python3
'''
Defines the class Square
'''
from models.rectangle import Rectangle
class Square(Rectangle):
'''Square class that inherits from Rectangle
'''
def __init__(self, size, x=0, y=0, id=None):
'''Initializes the Square object
Args:
size (int): size of the square
x (int): position on the x axis
y (int): position on the y axis
id (int): the id of the object
'''
super().__init__(size, size, x, y, id)
def __str__(self):
'''Returns a string representation of a Square object
'''
return ('[Square] ({:d}) {:d}/{:d} - {:d}'.
format(self.id, self.x, self.y, self.width))
@property
def size(self):
'''Returns the size of the Square object
Return:
returns the size
'''
return self.width
@size.setter
def size(self, value):
'''Sets the size of the Square object
Args:
value (int): the new size value
'''
self.width = value
self.height = value
def update(self, *args, **kwargs):
'''Updates the Square instance
Args:
list of new values to update Square values
'''
if args:
keys = ['id', 'size', 'x', 'y']
for key, value in zip(keys, args):
setattr(self, key, value)
return
else:
for key, value in kwargs.items():
if key in kwargs.keys():
setattr(self, key, value)
def to_dictionary(self):
'''Returns a dictionary representation of a Square
Return:
returns the dictionary representation
'''
return {'id': self.id, 'size': self.size, 'x': self.x, 'y': self.y}
| [
"[email protected]"
] | |
698e29f11047d2ec058edc49d83078842a204ea8 | f98ca6e020f21b303f8cc2a8474f71ce436f2d75 | /tests/test_jsonlib.py | a67045757d2f9a40191ad7b77c8194728c2eb43e | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | garnerargoed/clldutils | df8afd714ab0ae1004aeb47dc24e1e96bb33323b | 0d85c0bf46184bb99c6800ecbfa7f5db87cb2e7e | refs/heads/master | 2021-09-07T22:36:17.564100 | 2018-03-02T09:54:44 | 2018-03-02T09:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | # coding: utf8
from __future__ import unicode_literals
from datetime import date
import pytest
from clldutils.jsonlib import dump, load, parse, update, format
def test_parse_json_with_datetime():
assert parse(dict(d='2012-12-12T20:12:12.12'))['d'].year
def test_update(tmppath):
p = tmppath / 'test'
with pytest.raises(ValueError):
with update(p):
pass # pragma: no cover
with update(p, default={}) as obj:
obj['a'] = 1
with update(p) as obj:
assert obj['a'] == 1
obj['a'] = 2
with update(p) as obj:
assert obj['a'] == 2
def test_json(tmppath):
d = {'a': 234, 'ä': 'öäüß'}
p = tmppath / 'test'
dump(d, p, indent=4)
for k, v in load(p).items():
assert d[k] == v
def test_format_json():
format(date.today())
assert format(5) == 5
| [
"[email protected]"
] | |
f8eccfe1aedbc9b1aa4f0a96d6d6e702357e2324 | 9c69e2fc689194237ef294071a9c14c6dfabe545 | /src/ultimate/user/urls.py | f8a18af2ec2ba1e3f063efd7648d2a5fa4b8f419 | [] | permissive | a2ultimate/ultimate-league-app | 69f0331f5efeb2883c00990eb7a59ac346a34c69 | 1b40e8a01950fc526db9b649b78ada71ec567624 | refs/heads/main | 2023-04-03T06:43:24.471566 | 2023-03-16T00:55:22 | 2023-03-16T00:55:22 | 8,152,035 | 4 | 2 | BSD-3-Clause | 2023-03-16T00:55:24 | 2013-02-12T03:15:25 | Python | UTF-8 | Python | false | false | 1,599 | py | from django.conf.urls import url, include
from django.contrib.auth.views import login, logout, password_reset, password_reset_done, password_reset_confirm, password_reset_complete
from . import views
urlpatterns = [
url(r'^$', views.index, {}, 'user'),
url(r'^log-in/$', login, {'template_name': 'user/login.html'}, 'auth_log_in'),
url(r'^log-out/$', logout, {'template_name': 'user/logout.html'}, 'auth_log_out'),
url(r'^password/reset/$', password_reset, {'post_reset_redirect': '/user/password/reset/done/', 'template_name': 'user/registration/password_reset_form.html',
'email_template_name': 'user/registration/password_reset_email.html', 'subject_template_name': 'user/registration/password_reset_subject.txt', }, 'password_reset'),
url(r'^password/reset/done/$', password_reset_done,
{'template_name': 'user/registration/password_reset_done.html'}, 'password_reset_done'),
url(r'^password/reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', password_reset_confirm, {
'post_reset_redirect': '/user/password/done/', 'template_name': 'user/registration/password_reset_confirm.html'}, 'password_reset_confirm'),
url(r'^password/done/$', password_reset_complete,
{'template_name': 'user/registration/password_reset_complete.html'}, 'password_reset_confirm'),
url(r'^sign-up/$', views.signup, {}, 'registration_register'),
url(r'^edit/profile/$', views.editprofile, {}, 'editprofile'),
url(r'^edit/ratings/$', views.editratings, {}, 'editratings'),
]
| [
"[email protected]"
] | |
4ee1c835bfb47e715ce2c96c65cf218f187bab31 | 282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19 | /Malware1/venv/Lib/site-packages/numpy/lib/npyio.py | 6eb9cbd18235dbb0bd65ee520e9f94852156d02a | [] | no_license | sameerakhtar/CyberSecurity | 9cfe58df98495eac6e4e2708e34e70b7e4c055d3 | 594973df27b4e1a43f8faba0140ce7d6c6618f93 | refs/heads/master | 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:40ba76b8ee10e92857396797dd69934bd5b9c1c413138a9ccef88a8b534917a6
size 84853
| [
"[email protected]"
] | |
8710c34e56994907a5c2a8375d3db551668509d1 | 5e5b3776acc441b77a970db29d99e850b79be65e | /gist/define.py | 3f301f552c4ea79b674caf06a826dd54b686c3f2 | [
"Apache-2.0"
] | permissive | eightnoteight/x-gist | f153ae7c5ae5d9335af23ba54c0c668b71a5c157 | ec65e5193f989238a026a8b239eabf61c5ec7a8d | refs/heads/master | 2020-12-24T16:23:40.157360 | 2014-08-15T04:54:14 | 2014-08-15T04:54:14 | 22,781,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class struct(dict):
def __init__(self, **kwargs):
super(struct, self).__init__(**kwargs)
self.__dict__ = self
client = struct(name="x-gist",
url ="https://github.com/eightnoteight/x-gist") | [
"[email protected]"
] | |
ac2dea16ccd2b71756d09ad35c1724375eada021 | 5c2f520dde0cf8077facc0fcd9a92bc1a96d168b | /microbenchmarks/exceptions_ubench.py | 8260f1764de23db08de7146823a2733eba4417dc | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | nagyist/pyston | b613337a030ef21a3f03708febebe76cedf34c61 | 14ba2e6e6fb5c7316f66ccca86e6c6a836d96cab | refs/heads/master | 2022-12-24T03:56:12.885732 | 2015-02-25T11:11:08 | 2015-02-25T11:28:13 | 31,314,596 | 0 | 0 | NOASSERTION | 2022-12-17T08:15:11 | 2015-02-25T13:24:41 | Python | UTF-8 | Python | false | false | 241 | py | def f():
# Try to eliminate as much non-exception stuff as possible:
from __builtin__ import Exception
e = Exception()
for i in xrange(100000):
try:
raise e
except Exception:
pass
f()
| [
"[email protected]"
] | |
67f7daaefaae8d776a203ce1eb65de7d4fc4810a | cb57a9ea4622b94207d12ea90eab9dd5b13e9e29 | /lc/python/0339_nested_list_weight_sum.py | 4db7961460d959975511fefeea17b42164eec24f | [] | no_license | boknowswiki/mytraning | b59585e1e255a7a47c2b28bf2e591aef4af2f09a | 5e2f6ceacf5dec8260ce87e9a5f4e28e86ceba7a | refs/heads/master | 2023-08-16T03:28:51.881848 | 2023-08-10T04:28:54 | 2023-08-10T04:28:54 | 124,834,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | # dfs
# time O(n)
# space O(1)
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger:
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution:
def depthSum(self, nestedList: List[NestedInteger]) -> int:
if not nestedList:
return 0
ret = 0
def dfs(nl, level):
nonlocal ret
for nnl in nl:
if nnl.isInteger():
ret += nnl.getInteger()*level
else:
dfs(nnl.getList(), level+1)
return
dfs(nestedList, 1)
return ret
| [
"[email protected]"
] | |
3215f4395ddfe3f66ca86b29a70209aa7b2a2b1b | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/databox/azure-mgmt-databox/azure/mgmt/databox/__init__.py | 01935d3cb4901323838878ded3e2783723747739 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 726 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._data_box_management_client import DataBoxManagementClient
__all__ = ['DataBoxManagementClient']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
from ._version import VERSION
__version__ = VERSION
| [
"[email protected]"
] | |
91d66a042dd155e0b4c535b6fd0d0b0c5f21e6bc | 5c8139f1e57e06c7eaf603bd8fe74d9f22620513 | /PartB/Py括号的配对问题.py | 07341985e258c672ef6b2f14eda73e2fd3628ce7 | [] | no_license | madeibao/PythonAlgorithm | c8a11d298617d1abb12a72461665583c6a44f9d2 | b4c8a75e724a674812b8a38c0202485776445d89 | refs/heads/master | 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | lst = raw_input().strip()
stack = []
flag = True
for i in lst:
if i == '(':
stack.append('(')
elif i == '[':
stack.append('[')
elif i == ')':
if len(stack) > 0 and stack[-1] == '(':
stack.pop(-1)
else:
flag = False
break
elif i == ']':
if len(stack) > 0 and stack[-1] == '[':
stack.pop(-1)
else:
flag = False
break
if flag:
print('true')
else:
print('false')
| [
"[email protected]"
] | |
6518580d8c0f42beea9b03ecc5cf5026c5eb4b0b | a2f6e449e6ec6bf54dda5e4bef82ba75e7af262c | /venv/Lib/site-packages/pandas/tests/io/pytables/__init__.py | 04573ec7273cbbee29a9587f2fd75e67ef512d86 | [] | no_license | mylonabusiness28/Final-Year-Project- | e4b79ccce6c19a371cac63c7a4ff431d6e26e38f | 68455795be7902b4032ee1f145258232212cc639 | refs/heads/main | 2023-07-08T21:43:49.300370 | 2021-06-05T12:34:16 | 2021-06-05T12:34:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:7899383beb479b67e688296a1f549aed94a571607226a1afea25dce1a3dc152c
size 411
| [
"[email protected]"
] | |
a0501f5fe8d7b2dbd0b347bf845646f7cd23628d | 34599a66861f7d95a5740eeb5329ea77014e18d4 | /problems_solving/algospot/firetrucks.py | 0006d6df8ed42a042757bbebd9b4c40fa0a3cf8c | [] | no_license | laolee010126/algorithm-with-python | f0f5f1bc3cbe374ccbb59e10ac639674c44ae743 | 89ff0c47a6d8b0cd5b31a25bb3981b8e90971f19 | refs/heads/master | 2022-04-01T17:38:36.199309 | 2020-01-14T01:54:22 | 2020-01-14T01:54:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | """Get least sum of distances to dispatch firetrucks to houses on fire
:input:
1
8 12 3 2
1 2 3
1 6 9
2 3 6
3 4 4
3 5 2
4 5 7
6 5 5
8 6 5
6 7 3
8 7 3
7 5 1
2 8 3
2 3 5
4 6
:return:
16
"""
from heapq import heappush, heappop
from math import inf
from sys import stdin
get_input = stdin.readline
def min_dist(g, dest, src):
V = len(g)
# Add a new trasparent vertex connecting fire stations into one sinlge station
for s in src:
g[0].append((0, s))
g[s].append((0, 0))
# 1. priority queue version
# pq = [(0, 0)]
# dist = [inf] * V
# dist[0] = 0
# while pq:
# cost, here = heappop(pq)
# if cost > dist[here]:
# continue
# for dc, there in g[here]:
# nxt_dist = cost + dc
# if nxt_dist < dist[there]:
# dist[there] = nxt_dist
# heappush(pq, (nxt_dist, there))
# return sum(dist[d] for d in dest)
# 2. Non-priority queue version
dist = [inf] * V
dist[0] = 0
visited = [False] * V
while True:
min_dist = inf
here = None
for v in range(V):
if dist[v] < min_dist and not visited[v]:
min_dist = dist[v]
here = v
if min_dist == inf:
break
visited[here] = True
for dc, there in g[here]:
nxt_dist = dist[here] + dc
if not visited[there] and nxt_dist < dist[there]:
dist[there] = nxt_dist
return sum(dist[d] for d in dest)
if __name__ == '__main__':
C = int(get_input().strip())
ans = []
for _ in range(C):
V, E, DEST, SRC = (int(n) for n in get_input().strip().split())
g = [[] for _ in range(V+1)]
for _ in range(E):
a, b, d = (int(n) for n in get_input().strip().split())
g[a].append((d, b))
g[b].append((d, a))
dest = [int(n) for n in get_input().strip().split()]
src = [int(n) for n in get_input().strip().split()]
ans.append(min_dist(g, dest, src))
for n in ans:
print(n)
| [
"[email protected]"
] | |
d1772e7e5ce09349017f1c2dd30cdfbab93383ed | 977396938e6a077423276eda152d4541578eb527 | /migrations/versions/f9155326f52d_.py | 6b91c18d0de33f955ebc4eeda030c79a1e03e91c | [] | no_license | Areum0921/web_pybo | 688c741a5a8b5fa3d8df51f058c7ec0a8288ae91 | 0c830eda270dbbe3257e3458af4576b38d5dbaa8 | refs/heads/master | 2023-06-19T06:40:41.327188 | 2021-07-16T02:29:34 | 2021-07-16T02:29:34 | 355,765,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,375 | py | """empty message
Revision ID: f9155326f52d
Revises: 5496eea3137d
Create Date: 2021-03-29 14:31:37.557367
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'f9155326f52d'
down_revision = '5496eea3137d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('_alembic_tmp_answer')
with op.batch_alter_table('answer', schema=None) as batch_op:
batch_op.alter_column('ip',
existing_type=sa.VARCHAR(length=50),
nullable=False)
with op.batch_alter_table('question', schema=None) as batch_op:
batch_op.add_column(sa.Column('user_id', sa.Integer(), server_default='1', nullable=True))
batch_op.create_foreign_key(batch_op.f('fk_question_user_id_user'), 'user', ['user_id'], ['id'], ondelete='CASCADE')
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_unique_constraint(batch_op.f('uq_user_email'), ['email'])
batch_op.create_unique_constraint(batch_op.f('uq_user_username'), ['username'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('uq_user_username'), type_='unique')
batch_op.drop_constraint(batch_op.f('uq_user_email'), type_='unique')
with op.batch_alter_table('question', schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f('fk_question_user_id_user'), type_='foreignkey')
batch_op.drop_column('user_id')
with op.batch_alter_table('answer', schema=None) as batch_op:
batch_op.alter_column('ip',
existing_type=sa.VARCHAR(length=50),
nullable=True)
op.create_table('_alembic_tmp_answer',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('question_id', sa.INTEGER(), nullable=True),
sa.Column('content', sa.TEXT(), nullable=False),
sa.Column('create_date', sa.DATETIME(), nullable=False),
sa.Column('ip', sa.VARCHAR(length=50), nullable=False),
sa.ForeignKeyConstraint(['question_id'], ['question.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
| [
"[email protected]"
] | |
3720c2cfb59920028d138cfe49a9a780696b3a31 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03804/s226430328.py | 40ea2fbbe5322fb1e9b734e857d574fcafee112b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | N, M = map(int, input().split())
A = [input() for _ in range(N)]
B = [input() for _ in range(M)]
for i in range(N - M + 1):
for j in range(N - M + 1):
check = True
count = 0
for k in range(M):
if (A[i + k][j: j + M] == B[k]):
# print(A[i + k][j:j + M], B[k])
count += 1
continue
else:
check = False
break
if (check and count == M):
print('Yes')
exit()
print('No')
| [
"[email protected]"
] | |
a3c15c175c51765051f69df3b52980e1fd7a3f0a | e3ec7260806c1e2b045a0de93a150a5c3fc1b9df | /test_sin.py | cfd93e45de3207cebfeb5d1bfd66b21c78b149ef | [
"Apache-2.0"
] | permissive | FearFactor1/SPA | 58a21c9ec7a72a78f5ff50214e58faac43a3059d | a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7 | refs/heads/master | 2021-07-07T04:25:12.525595 | 2020-11-16T14:35:33 | 2020-11-16T14:35:33 | 204,684,720 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | from selenium import webdriver
import time
from selenium.webdriver.support.ui import Select
link = "http://suninjuly.github.io/selects2.html"
#nav__item-auto > a
try:
browser = webdriver.Chrome()
browser.get(link)
num1 = browser.find_element_by_css_selector("#num1").text
num2 = browser.find_element_by_css_selector("#num2").text
sum = int(num1) + int(num2)
select = Select(browser.find_element_by_tag_name("select"))
select.select_by_value(str(sum))
button = browser.find_element_by_xpath("//*/button[contains(text(), 'Submit')]")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit() | [
"[email protected]"
] | |
e514273c815a754c9cc94115339b8f0ab6c3c284 | 58e8117c418a8931d56fe57ba0edd38214c79642 | /ate/context.py | ed59e79081a32350eef9099c194c3270b9fc91da | [
"MIT"
] | permissive | 229051923/ApiTestEngine | 90b495ee7be037a35f63ffa3b2c95f6ba24f43ce | 4a529068f451c8880681286518ff6a643ecf9067 | refs/heads/master | 2021-01-01T16:09:10.737245 | 2017-07-19T15:29:29 | 2017-07-19T15:29:29 | 97,780,015 | 2 | 0 | null | 2017-07-20T02:09:36 | 2017-07-20T02:09:36 | null | UTF-8 | Python | false | false | 7,621 | py | import copy
import importlib
import re
import types
from collections import OrderedDict
from ate import exception, testcase, utils
def is_function(tup):
""" Takes (name, object) tuple, returns True if it is a function.
"""
name, item = tup
return isinstance(item, types.FunctionType)
class Context(object):
""" Manages context functions and variables.
context has two levels, testset and testcase.
"""
def __init__(self):
self.testset_config = {}
self.testset_shared_variables_mapping = dict()
self.testcase_config = {}
self.testcase_variables_mapping = dict()
self.init_context()
def init_context(self, level='testset'):
"""
testset level context initializes when a file is loaded,
testcase level context initializes when each testcase starts.
"""
if level == "testset":
self.testset_config["functions"] = {}
self.testset_config["variables"] = OrderedDict()
self.testset_config["request"] = {}
self.testset_shared_variables_mapping = {}
self.testcase_config["functions"] = {}
self.testcase_config["variables"] = OrderedDict()
self.testcase_config["request"] = {}
self.testcase_variables_mapping = copy.deepcopy(self.testset_shared_variables_mapping)
def import_requires(self, modules):
""" import required modules dynamicly
"""
for module_name in modules:
globals()[module_name] = importlib.import_module(module_name)
def bind_functions(self, function_binds, level="testcase"):
""" Bind named functions within the context
This allows for passing in self-defined functions in testing.
e.g. function_binds:
{
"add_one": lambda x: x + 1, # lambda function
"add_two_nums": "lambda x, y: x + y" # lambda function in string
}
"""
eval_function_binds = {}
for func_name, function in function_binds.items():
if isinstance(function, str):
function = eval(function)
eval_function_binds[func_name] = function
self.__update_context_config(level, "functions", eval_function_binds)
def import_module_functions(self, modules, level="testcase"):
""" import modules and bind all functions within the context
"""
for module_name in modules:
imported = importlib.import_module(module_name)
imported_functions_dict = dict(filter(is_function, vars(imported).items()))
self.__update_context_config(level, "functions", imported_functions_dict)
def register_variables_config(self, variable_binds, level="testcase"):
""" register variable configs
@param (list) variable_binds, variable can be value or custom function
e.g.
[
{"TOKEN": "debugtalk"},
{"random": "${gen_random_string(5)}"},
{"json": {'name': 'user', 'password': '123456'}},
{"md5": "${gen_md5($TOKEN, $json, $random)}"}
]
"""
if level == "testset":
for variable_bind in variable_binds:
self.testset_config["variables"].update(variable_bind)
elif level == "testcase":
self.testcase_config["variables"] = copy.deepcopy(self.testset_config["variables"])
for variable_bind in variable_binds:
self.testcase_config["variables"].update(variable_bind)
def register_request(self, request_dict, level="testcase"):
self.__update_context_config(level, "request", request_dict)
def __update_context_config(self, level, config_type, config_mapping):
"""
@param level: testset or testcase
@param config_type: functions, variables or request
@param config_mapping: functions config mapping or variables config mapping
"""
if level == "testset":
self.testset_config[config_type].update(config_mapping)
elif level == "testcase":
self.testcase_config[config_type].update(config_mapping)
def get_parsed_request(self):
""" get parsed request, with each variable replaced by bind value.
testcase request shall inherit from testset request configs,
but can not change testset configs, that's why we use copy.deepcopy here.
"""
testcase_request_config = utils.deep_update_dict(
copy.deepcopy(self.testset_config["request"]),
self.testcase_config["request"]
)
parsed_request = testcase.parse_template(
testcase_request_config,
self._get_evaluated_testcase_variables()
)
return parsed_request
def bind_extracted_variables(self, variables_mapping):
""" bind extracted variable to current testcase context and testset context.
since extracted variable maybe used in current testcase and next testcases.
"""
self.testset_shared_variables_mapping.update(variables_mapping)
self.testcase_variables_mapping.update(variables_mapping)
def get_testcase_variables_mapping(self):
return self.testcase_variables_mapping
def _get_evaluated_testcase_variables(self):
""" variables in variables_config will be evaluated each time
"""
testcase_functions_config = copy.deepcopy(self.testset_config["functions"])
testcase_functions_config.update(self.testcase_config["functions"])
self.testcase_config["functions"] = testcase_functions_config
testcase_variables_config = copy.deepcopy(self.testset_config["variables"])
testcase_variables_config.update(self.testcase_config["variables"])
self.testcase_config["variables"] = testcase_variables_config
for var_name, var_value in self.testcase_config["variables"].items():
self.testcase_variables_mapping[var_name] = self.get_eval_value(var_value)
return self.testcase_variables_mapping
def get_eval_value(self, data):
""" evaluate data recursively, each variable in data will be evaluated.
"""
if isinstance(data, (list, tuple)):
return [self.get_eval_value(item) for item in data]
if isinstance(data, dict):
evaluated_data = {}
for key, value in data.items():
evaluated_data[key] = self.get_eval_value(value)
return evaluated_data
if isinstance(data, (int, float)):
return data
# data is in string format here
data = "" if data is None else data.strip()
if utils.is_variable(data):
# variable marker: $var
variable_name = utils.parse_variable(data)
value = self.testcase_variables_mapping.get(variable_name)
if value is None:
raise exception.ParamsError(
"%s is not defined in bind variables!" % variable_name)
return value
elif utils.is_functon(data):
# function marker: ${func(1, 2, a=3, b=4)}
fuction_meta = utils.parse_function(data)
func_name = fuction_meta['func_name']
args = fuction_meta.get('args', [])
kwargs = fuction_meta.get('kwargs', {})
args = self.get_eval_value(args)
kwargs = self.get_eval_value(kwargs)
return self.testcase_config["functions"][func_name](*args, **kwargs)
else:
return data
| [
"[email protected]"
] | |
a6db9d0ebe8e9a8f0ab0a1cacff578441a2234ba | 4fcfc6834f598954e069e9481e4f69d6f7205f3b | /Week1/day_3/Flask_Intro/first_flask_app/server.py | a7693151c6981a66648352d4cf8857f87de47de7 | [] | no_license | sadieBoBadie/jan_2020_python_stack | dadc77a8a76fd4b900bc31ad83ed4680a0802aa1 | b91da3da23ea57a27c086075f4b86c5bfec413d0 | refs/heads/main | 2023-03-02T22:27:46.877290 | 2021-01-26T19:04:57 | 2021-01-26T19:04:57 | 326,852,429 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def puppies():
return "<h1 style='color: red'>Puppies are cute!</h1>"
@app.route('/<animal>/<color>')
@app.route('/<animal>')
@app.route('/<animal>/<color>/<int:num>')
def display_animal(animal, color="blue", num=5):
print(f"Animal: {animal}")
print(f"Color: {color}")
print("Type of the num var: ", type(num))
return render_template('index.html', animal=animal, color=color, num=num)
if __name__=="__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
3a7c1d7adfb59f00b11ae77e1d37b1885d33f881 | d1ad7bfeb3f9e3724f91458277284f7d0fbe4b2d | /react/003-react-django-justdjango/backend/env/bin/sqlformat | b08eaac3345a9fc3b0a7dbb48e6607276b57395a | [] | no_license | qu4ku/tutorials | 01d2d5a3e8740477d896476d02497d729a833a2b | ced479c5f81c8aff0c4c89d2a572227824445a38 | refs/heads/master | 2023-03-10T20:21:50.590017 | 2023-03-04T21:57:08 | 2023-03-04T21:57:08 | 94,262,493 | 0 | 0 | null | 2023-01-04T21:37:16 | 2017-06-13T22:07:54 | PHP | UTF-8 | Python | false | false | 307 | #!/Users/kamilwroniewicz/_code/_github/_tutorials/react/003-react-django-justdjango/backend/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
a1df8914c35f5e949416165a0782c85926e4e9f7 | 2cf9f165cb4d6e8e9009d74b43020fe2d5c1964f | /chat/migrations/0001_initial.py | a2db3bb2cfb40b358f72ef4113b611bc648018b0 | [] | no_license | jimy1824/chat | 29f5039c6284083b8328502932795bee586dec14 | 627ad4678c6215d37322737b38b3e5eb6d69696f | refs/heads/master | 2023-04-27T15:10:51.316824 | 2019-11-08T05:00:12 | 2019-11-08T05:00:12 | 220,081,959 | 1 | 0 | null | 2023-04-21T20:40:47 | 2019-11-06T20:17:20 | HTML | UTF-8 | Python | false | false | 1,194 | py | # Generated by Django 2.2.6 on 2019-11-06 19:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_receiver', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_sender', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| [
"[email protected]"
] | |
3b930951163c7e85c90c1df23c7a762af58fc623 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/pyinstaller/tests/unit/test_modulegraph/testpkg-relimport/pkg/relimport.py | 0e76e39fedf777016e011f2daa0a233827236b57 | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 127 | py | version https://git-lfs.github.com/spec/v1
oid sha256:b1c982ab98ef0425093ad8bafe4c9deb7aad009260b384b762696e8fc4c0e375
size 22
| [
"[email protected]"
] | |
f64c6ffb584cd043d80268c613a23fadf9f3d960 | d0e268862f359bbeec426b00a0c45788f6fb0b4e | /lesson22-优化小实例/main.py | 10fefdf9bbed196814d0bc20e94497752dbfa13d | [] | no_license | jpegbert/PyTorch | f87c2e38572c51842785de5ed1b39bb641402ac6 | 482421c76a093312ffeff7e5af4ecd3ab0cdcf30 | refs/heads/master | 2023-08-27T03:56:26.883297 | 2021-11-08T06:03:30 | 2021-11-08T06:03:30 | 326,677,679 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | import numpy as np
from matplotlib import pyplot as plt
import torch
def himmelblau(x):
return (x[0] ** 2 + x[1] - 11) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2
x = np.arange(-6, 6, 0.1)
y = np.arange(-6, 6, 0.1)
print('x,y range:', x.shape, y.shape)
X, Y = np.meshgrid(x, y)
print('X,Y maps:', X.shape, Y.shape)
Z = himmelblau([X, Y])
fig = plt.figure('himmelblau')
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z)
ax.view_init(60, -30)
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.show()
# [1., 0.], [-4, 0.], [4, 0.]
x = torch.tensor([-4., 0.], requires_grad=True)
print(x)
optimizer = torch.optim.Adam([x], lr=1e-3)
for step in range(20000):
pred = himmelblau(x)
optimizer.zero_grad()
pred.backward()
optimizer.step()
if step % 2000 == 0:
print('step {}: x = {}, f(x) = {}'.format(step, x.tolist(), pred.item()))
| [
"[email protected]"
] | |
035504981c5c4ce873430a3439ea302e21244885 | 53c3462ff265b6273f4a4fa17f6d59688f69def0 | /剑指offer/65_hasPath.py | fc4f3bf5633e263578be82adeacb409feea73759 | [] | no_license | 17764591637/jianzhi_offer | b76e69a3ecb2174676da2c8d8d3372a3fc27b5c4 | 27e420ee302d5ab6512ecfdb8d469b043fb7102d | refs/heads/master | 2023-08-03T01:32:51.588472 | 2019-10-13T07:56:21 | 2019-10-13T07:56:21 | 197,692,548 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,300 | py | '''
请设计一个函数,用来判断在一个矩阵中是否存在一条包含某字符串所有字符的路径。
路径可以从矩阵中的任意一个格子开始,每一步可以在矩阵中向左,向右,向上,向下移动一个格子。
如果一条路径经过了矩阵中的某一个格子,则之后不能再次进入这个格子。
例如 a b c e s f c s a d e e 这样的3 X 4 矩阵中包含一条字符串"bcced"的路径,但是矩阵中不包含"abcb"路径,
因为字符串的第一个字符b占据了矩阵中的第一行第二个格子之后,路径不能再次进入该格子。
分析:回溯算法
这是一个可以用回朔法解决的典型题。首先,在矩阵中任选一个格子作为路径的起点。如果路径上的第i个字符不是ch,
那么这个格子不可能处在路径上的第i个位置。如果路径上的第i个字符正好是ch,那么往相邻的格子寻找路径上的第i+1个字符。
除在矩阵边界上的格子之外,其他格子都有4个相邻的格子。重复这个过程直到路径上的所有字符都在矩阵中找到相应的位置。
由于回朔法的递归特性,路径可以被开成一个栈。当在矩阵中定位了路径中前n个字符的位置之后,在与第n个字符对应的格子
的周围都没有找到第n+1个字符,这个时候只要在路径上回到第n-1个字符,重新定位第n个字符。
由于路径不能重复进入矩阵的格子,还需要定义和字符矩阵大小一样的布尔值矩阵,用来标识路径是否已经进入每个格子。
当矩阵中坐标为(row,col)的格子和路径字符串中相应的字符一样时,从4个相邻的格子(row,col-1),(row-1,col),
(row,col+1)以及(row+1,col)中去定位路径字符串中下一个字符如果4个相邻的格子都没有匹配字符串中下一个的字符,
表明当前路径字符串中字符在矩阵中的定位不正确,我们需要回到前一个,然后重新定位。一直重复这个过程,
直到路径字符串上所有字符都在矩阵中找到合适的位置。
'''
class Solution:
def hasPath(self, matrix, rows, cols, path):
# write code here
for i in range(rows):
for j in range(cols):
if matrix[i*cols+j] == path[0]:
#print(i,j)
if self.find(list(matrix),rows,cols,path[1:],i,j):
return True
return False
def find(self,matrix,rows,cols,path,i,j):
if not path:
return True
matrix[i*cols+j]='0'#记录是否已经走过,0表示已经走过
if j+1<cols and matrix[i*cols+j+1]==path[0]:
return self.find(matrix,rows,cols,path[1:],i,j+1)#往右
elif j-1>=0 and matrix[i*cols+j-1]==path[0]:
return self.find(matrix,rows,cols,path[1:],i,j-1)#往左
elif i+1<rows and matrix[(i+1)*cols+j]==path[0]:
return self.find(matrix,rows,cols,path[1:],i+1,j)#往下
elif i-1>=0 and matrix[(i-1)*cols+j]==path[0]:
return self.find(matrix,rows,cols,path[1:],i-1,j)#往上
else:
return False
s = Solution()
res = s.hasPath(['a','b','c', 'e', 's' ,'f' ,'c', 's', 'a', 'd', 'e', 'e'],3,4,'bcced')
print(res)
| [
"[email protected]"
] | |
eb66983747fd37d5bad2b03c62aa2cb5b9820300 | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /A_Primer_on_Scientific_Programming_with_Python/input/c2f_cml_v3.py | 9d13a354db2390ff9b57049d4ab8c67135d83538 | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 217 | py | import sys
try:
C = float(sys.argv[1])
except:
print 'You failed to provide Celsius degrees as input '\
'on the command line!'
sys.exit(1) # abort
F = 9.0*C/5 + 32
print '%gC is %.1fF' % (C, F)
| [
"[email protected]"
] | |
e06e1100601a1bacb795bb1f1efe4a2e96a3d781 | 221d5405763d1a6ab3c6755583e557c14b9f3742 | /gusregon/gus.py | ca34d58334fd1bd2383ff69dfbb77899f224cc07 | [
"BSD-2-Clause"
] | permissive | tpro/django-gusregon | 4bd7253be9d43345376e36312d763d4653d0bbcd | 75d4f291ae805bd986e1b4cb03b3b94e52a48076 | refs/heads/master | 2021-01-18T18:11:50.746453 | 2015-04-19T11:00:49 | 2015-04-19T11:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | import requests
import json
GUS_API_URL = 'https://wyszukiwarkaregon.stat.gov.pl/wsBIR/UslugaBIRzewnPubl.svc/ajaxEndpoint/'
LOGIN_ENDPOINT = 'Zaloguj'
CAPTCHA_ENDPOINT = 'PobierzCaptcha'
CHECK_CAPTCHA_ENDPOINT = 'SprawdzCaptcha'
SEARCH_ENDPOINT = 'daneSzukaj'
COMPANY_DETAILS_ENDPOINT = 'DanePobierzPelnyRaport'
class GUS(object):
sid = None
report_type = {
'F': 'DaneRaportFizycznaPubl',
'P': 'DaneRaportPrawnaPubl'}
prefix_data = {
'F': 'fiz_',
'P': 'praw_'}
def __init__(self, sid=None):
self.sid = sid
def login(self):
data = {'pKluczUzytkownika': 'aaaaaabbbbbcccccdddd'}
self.sid = self._post(LOGIN_ENDPOINT, data=json.dumps(data))
return self.sid
def get_captcha(self):
return self._post(CAPTCHA_ENDPOINT)
def check_captcha(self, captcha):
data = {'pCaptcha': captcha}
return self._post(
CHECK_CAPTCHA_ENDPOINT, data=json.dumps(data))
def search(self, nip=None, regon=None, krs=None,
detailed=True, no_prefix=True):
if not any([nip, regon, krs]):
raise AttributeError(
'At least one parameter (nip, regon, krs) is required.')
if nip:
search_params = {'Nip': nip}
elif regon:
search_params = {'Regon': regon}
else:
search_params = {'Krs': krs}
data = {'pParametryWyszukiwania': search_params}
basic_info = self._post(
SEARCH_ENDPOINT, data=json.dumps(data))
if not detailed or not basic_info:
return basic_info
basic_info = json.loads(basic_info)[0]
data = {
'pNazwaRaportu': self.report_type.get(basic_info['Typ']),
'pRegon': basic_info['Regon'],
'pSilosID': 1,
}
details = json.loads(self._post(
COMPANY_DETAILS_ENDPOINT, data=json.dumps(data)))[0]
if no_prefix:
return self._remove_prefix(details)
return details
def _post(self, url, **kwargs):
headers = {'Content-Type': 'application/json'}
if self.sid:
headers.update({'sid': self.sid})
url = '%s%s' % (GUS_API_URL, url)
response = requests.post(url, headers=headers, **kwargs)
return json.loads(response.content)['d']
def _remove_prefix(self, data):
data_without_prefix = {}
for key, value in data.iteritems():
if key.startswith(tuple(self.prefix_data.values())):
key = key[key.find('_') + 1:]
data_without_prefix[key] = value
return data_without_prefix
| [
"[email protected]"
] | |
2d46fb8bfbd693468cd059acdc41ca93221da9c6 | c90b3ac3e5ad11cb93d4e6b76b9b9c4a19d0f512 | /.history/copytest_20200502124009.py | 413d6f42f34cf44c8b5ee71050cdd80e9dccb60d | [] | no_license | rbafna6507/passwordstorageproject | 6465585e36c81075856af8d565fe83e358b4a40a | 480c30e358f7902ac0ef5c4e8d9556cb1d6d33f4 | refs/heads/master | 2022-11-25T12:05:02.625968 | 2020-07-27T21:33:38 | 2020-07-27T21:33:38 | 283,021,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | import pickle
import cryptography
from cryptography.fernet import Fernet
def encrypt(message: bytes, key: bytes) -> bytes:
return Fernet(key).encrypt(message)
def decrypt(token: bytes, key: bytes) -> bytes:
return Fernet(key).decrypt(token)
infile = open('jeff.pkl','rb')
z = pickle.load(infile)
key = Fernet.generate_key()
e_userpass = z
username = input("Username: ")
password = input("password: ")
website = input("Website: ")
e_username = encrypt(username.encode(), key)
e_password = encrypt(password.encode(), key)
e_list = [b"Username: " + e_username, b"Password: " + e_password]
e_userpass["Website: " + website] = e_list
outfile = open("jeff.pkl", "wb")
pickle.dump(e_userpass, outfile)
outfile.close()
infile = open('jeff.pkl','rb')
z = pickle.load(infile)
e_userpass = z
j = [e_userpass[k] for k in e_userpass]
e = [r.encode() for r in j]
q = decrypt(e, key)
"""for key, value in d_userpass.items():
print(key, ' : ', value)""" | [
"[email protected]"
] | |
9436483bd9dce7f9bd3c1c1c7e5b4682ce2f8b80 | e357f337ef80487aa6fafacb3e61cd5b56c78b60 | /src/verification/__init__.py | 9c7d02b050afd632c953e658bd2ee2c42fdd8642 | [] | no_license | kaleissin/django-verification | 1537b7e78a8ca07a091bcad5fd014a4544809ba5 | 33f7d0b7d9cc1494269f16b6f07f6077ca974fff | refs/heads/master | 2021-07-15T20:12:43.628199 | 2021-07-05T11:43:39 | 2021-07-05T13:08:50 | 18,205,426 | 12 | 2 | null | 2021-07-05T13:13:16 | 2014-03-28T08:19:41 | Python | UTF-8 | Python | false | false | 60 | py | default_app_config = 'verification.apps.VerificationConfig'
| [
"[email protected]"
] | |
a19137e2bc295d4d4e9c77c15d61e3a9e4d708f9 | ff20661ef00b2db927c78f95a08cd6c40f950ee0 | /inputmorethanone.py | 81994128fa875ec38b52ef7cf8ec19866fc7810f | [] | no_license | Techsrijan/mppython2021 | 57ca26e1acdf5adad2afa692dd5ae23336273603 | 583a991f85e2414c6b8ffe0405f727f3f5d38eee | refs/heads/main | 2023-06-18T22:05:44.602220 | 2021-07-16T00:42:26 | 2021-07-16T00:42:26 | 374,290,977 | 0 | 9 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | '''f=int(input("Enter the first number"))
s=int(input("Enter the Second number"))
'''
f,s=input("Enter two number").split(',')
print("F=",f,"S=",s)
j,k=input("Enter two number").split(' ')
print("j=",j,"k=",k)
print("add=",j+k) | [
"[email protected]"
] | |
ba3177e820dd8a4793158eb218326d48229866ef | bb372428bb90fa80f2e87820b3c8c5ba305dcd4c | /python/bot/eups.py | 0e4812155219126d211b7dcd779287ab6d1ce9ec | [] | no_license | TallJimbo/lsst-bot | 7eb9b7a71a87a1ed416397c193931c80639bd746 | 0843afb2fdd5cc9ba62cf424a7dd73672b10e28f | refs/heads/master | 2021-01-19T05:43:55.451321 | 2016-06-04T00:07:25 | 2016-06-04T00:07:25 | 60,484,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | #!/usr/bin/env python
from __future__ import absolute_import
import eups.table
import os
import logging
__all__ = "get_dependencies"
def get_dependencies(config, path, pkg, recursive=False):
"""Return immediate dependencies from inspecting a table file.
NOTE: recursive=True has not been tested.
"""
e = eups.Eups()
t = eups.table.Table(os.path.join(path, "ups", pkg + ".table"))
dependencies = t.dependencies(e, recursive=recursive)
if recursive:
dependencies.sort(key=lambda x: x[2])
for product, optional, depth in dependencies:
yield product.name, optional
def declare(config, path, pkg, version, tag_only=False):
e = eups.Eups()
if not tag_only:
logging.debug("Declaring {pkg} {version}.".format(pkg=pkg, version=version))
e.declare(productName=pkg, versionName=version, productDir=path)
for tmp in config.eups.tags:
tag = tmp.format(eups=config.eups)
logging.debug("Assigning tag {tag} to {pkg}.".format(pkg=pkg, tag=tag))
e.assignTag(tag, productName=pkg, versionName=version)
def undeclare(config, pkg, version):
e = eups.Eups()
e.undeclare(productName=pkg, versionName=version)
def setup(pkg, version, nodepend=False):
e = eups.Eups(max_depth=(0 if nodepend else -1))
e.setup(productName=pkg, versionName=version)
def tag(pkg, version, tag):
e = eups.Eups()
logging.debug("Assigning tag {tag} to {pkg}.".format(pkg=pkg, tag=tag))
e.assignTag(tag, productName=pkg, versionName=version)
| [
"[email protected]"
] | |
68870acb9c8c92e4ebbaf015abbd0b6109358e24 | 97aa1181a8305fab0cfc635954c92880460ba189 | /torch/__init__.py | 4442d7e952cc12a7e75937c35b091330e7c69570 | [
"BSD-2-Clause"
] | permissive | zhujiang73/pytorch_mingw | 64973a4ef29cc10b96e5d3f8d294ad2a721ccacb | b0134a0acc937f875b7c4b5f3cef6529711ad336 | refs/heads/master | 2022-11-05T12:10:59.045925 | 2020-08-22T12:10:32 | 2020-08-22T12:10:32 | 123,688,924 | 8 | 4 | NOASSERTION | 2022-10-17T12:30:52 | 2018-03-03T12:15:16 | C++ | UTF-8 | Python | false | false | 14,830 | py |
r"""
The torch package contains data structures for multi-dimensional
tensors and mathematical operations over these are defined.
Additionally, it provides many utilities for efficient serializing of
Tensors and arbitrary types, and other useful utilities.
It has a CUDA counterpart, that enables you to run your tensor computations
on an NVIDIA GPU with compute capability >= 3.0.
"""
import os
import sys
import platform
import ctypes
if sys.version_info < (3,):
raise Exception("Python 2 has reached end-of-life and is no longer supported by PyTorch.")
from ._utils import _import_dotted_name
from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
#from .version import __version__
from ._six import string_classes as _string_classes
from typing import Set, Type
__all__ = [
'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
'no_grad', 'enable_grad', 'rand', 'randn',
'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
'lobpcg', '_set_deterministic', '_is_deterministic'
]
################################################################################
# Load the extension module
################################################################################
# See Note [Global dependencies]
def _load_global_deps():
if platform.system() == 'Windows':
return
lib_name = 'libtorch_global_deps' + ('.dylib' if platform.system() == 'Darwin' else '.so')
here = os.path.abspath(__file__)
lib_path = os.path.join(os.path.dirname(here), 'lib', lib_name)
ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \
platform.system() != 'Windows':
# Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a
# few circumstances:
#
# 1. You're in a build environment (e.g., fbcode) where
# libtorch_global_deps is not available, but you still need
# to get mkl to link in with RTLD_GLOBAL or it will just
# not work.
#
# 2. You're trying to run PyTorch under UBSAN and you need
# to ensure that only one copy of libtorch is loaded, so
# vptr checks work properly
#
# If you're using this setting, you must verify that all the libraries
# you load consistently use the same libstdc++, or you may have
# mysterious segfaults.
#
import os as _dl_flags
if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_LAZY'):
try:
# next try if DLFCN exists
import DLFCN as _dl_flags # type: ignore
except ImportError:
# as a last attempt, use compile-time constants
import torch._dl as _dl_flags # type: ignore
old_flags = sys.getdlopenflags()
sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_LAZY)
from torch._C import *
sys.setdlopenflags(old_flags)
del old_flags
del _dl_flags
else:
# Easy way. You want this most of the time, because it will prevent
# C++ symbols from libtorch clobbering C++ symbols from other
# libraries, leading to mysterious segfaults.
#
# If building in an environment where libtorch_global_deps isn't available
# like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will
# want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False
#
# See Note [Global dependencies]
if USE_GLOBAL_DEPS:
_load_global_deps()
from torch._C import *
# Appease the type checker; ordinarily this binding is inserted by the
# torch._C module initialization code in C
if False:
import torch._C as _C
__all__ += [name for name in dir(_C)
if name[0] != '_' and
not name.endswith('Base')]
################################################################################
# Define basic utilities
################################################################################
def typename(o):
if isinstance(o, torch.Tensor):
return o.type()
module = ''
class_name = ''
if hasattr(o, '__module__') and o.__module__ != 'builtins' \
and o.__module__ != '__builtin__' and o.__module__ is not None:
module = o.__module__ + '.'
if hasattr(o, '__qualname__'):
class_name = o.__qualname__
elif hasattr(o, '__name__'):
class_name = o.__name__
else:
class_name = o.__class__.__name__
return module + class_name
def is_tensor(obj):
r"""Returns True if `obj` is a PyTorch tensor.
Note that this function is simply doing ``isinstance(obj, Tensor)``.
Using that ``isinstance`` check is better for typechecking with mypy,
and more explicit - so it's recommended to use that instead of
``is_tensor``.
Args:
obj (Object): Object to test
"""
return isinstance(obj, torch.Tensor)
def is_storage(obj):
r"""Returns True if `obj` is a PyTorch storage object.
Args:
obj (Object): Object to test
"""
return type(obj) in _storage_classes
def set_default_tensor_type(t):
r"""Sets the default ``torch.Tensor`` type to floating point tensor type
``t``. This type will also be used as default floating point type for
type inference in :func:`torch.tensor`.
The default floating point tensor type is initially ``torch.FloatTensor``.
Args:
t (type or string): the floating point tensor type or its name
Example::
>>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32
torch.float32
>>> torch.set_default_tensor_type(torch.DoubleTensor)
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
torch.float64
"""
if isinstance(t, _string_classes):
t = _import_dotted_name(t)
_C._set_default_tensor_type(t)
def set_default_dtype(d):
r"""Sets the default floating point dtype to :attr:`d`.
This dtype is:
1. The inferred dtype for python floats in :func:`torch.tensor`.
2. Used to infer dtype for python complex numbers. The default complex dtype is set to
``torch.complex128`` if default floating point dtype is ``torch.float64``,
otherwise it's set to ``torch.complex64``
The default floating point dtype is initially ``torch.float32``.
Args:
d (:class:`torch.dtype`): the floating point dtype to make the default
Example::
>>> # initial default for floating point is torch.float32
>>> torch.tensor([1.2, 3]).dtype
torch.float32
>>> # initial default for floating point is torch.complex64
>>> torch.tensor([1.2, 3j]).dtype
torch.complex64
>>> torch.set_default_dtype(torch.float64)
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
torch.float64
>>> torch.tensor([1.2, 3j]).dtype # a new complex tensor
torch.complex128
"""
_C._set_default_dtype(d)
def _set_deterministic(d):
r"""Sets a global flag to force all operations to use a deterministic
implementation if available. If an operation that does not have a
deterministic implementation is called while this setting is True, the
operation will throw a RuntimeError.
Note that deterministic operations tend to have worse performance than
non-deterministic operations.
Args:
d (:class:`bool`): If True, force operations to be deterministic.
If False, allow non-deterministic operations.
.. warning::
This feature is experimental and not complete. The above docstring
represents what the future behavior is intended to be. Right now,
`_set_deterministic` will only affect `torch.bmm` and convolution
operators.
"""
_C._set_deterministic(d)
def _is_deterministic():
r"""Returns True if the global deterministic flag is turned on and
operations are being forced to use a deterministic implementation.
.. warning::
This feature is experimental and not complete. The above docstring
represents what the future behavior is intended to be. Right now,
the global deterministic flag will only affect `torch.bmm` and
convolution operators.
"""
return _C._get_deterministic()
# If you edit these imports, please update torch/__init__.py.in as well
from .random import set_rng_state, get_rng_state, manual_seed, initial_seed, seed
from .serialization import save, load
from ._tensor_str import set_printoptions
################################################################################
# Define Storage and Tensor classes
################################################################################
from .tensor import Tensor
from .storage import _StorageBase
class DoubleStorage(_C.DoubleStorageBase, _StorageBase):
pass
class FloatStorage(_C.FloatStorageBase, _StorageBase):
pass
class HalfStorage(_C.HalfStorageBase, _StorageBase):
pass
class LongStorage(_C.LongStorageBase, _StorageBase):
pass
class IntStorage(_C.IntStorageBase, _StorageBase):
pass
class ShortStorage(_C.ShortStorageBase, _StorageBase):
pass
class CharStorage(_C.CharStorageBase, _StorageBase):
pass
class ByteStorage(_C.ByteStorageBase, _StorageBase):
pass
class BoolStorage(_C.BoolStorageBase, _StorageBase):
pass
class BFloat16Storage(_C.BFloat16StorageBase, _StorageBase):
pass
class ComplexDoubleStorage(_C.ComplexDoubleStorageBase, _StorageBase):
pass
class ComplexFloatStorage(_C.ComplexFloatStorageBase, _StorageBase):
pass
class QUInt8Storage(_C.QUInt8StorageBase, _StorageBase):
pass
class QInt8Storage(_C.QInt8StorageBase, _StorageBase):
pass
class QInt32Storage(_C.QInt32StorageBase, _StorageBase):
pass
_storage_classes = {
DoubleStorage, FloatStorage, LongStorage, IntStorage, ShortStorage,
CharStorage, ByteStorage, HalfStorage, BoolStorage, QUInt8Storage, QInt8Storage,
QInt32Storage, BFloat16Storage, ComplexFloatStorage, ComplexDoubleStorage
}
# The _tensor_classes set is initialized by the call to _C._initialize_tensor_type_bindings()
_tensor_classes: Set[Type] = set()
################################################################################
# Initialize extension
################################################################################
def manager_path():
if platform.system() == 'Windows':
return b""
path = get_file_path('torch', 'bin', 'torch_shm_manager')
prepare_multiprocessing_environment(get_file_path('torch'))
if not os.path.exists(path):
raise RuntimeError("Unable to find torch_shm_manager at " + path)
return path.encode('utf-8')
# Shared memory manager needs to know the exact location of manager executable
_C._initExtension(manager_path())
del manager_path
# Appease the type checker: it can't deal with direct setting of globals().
# Note that we will see "too many" functions when reexporting this way; there
# is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions
# so that this import is good enough
if False:
from torch._C._VariableFunctions import *
for name in dir(_C._VariableFunctions):
if name.startswith('__'):
continue
globals()[name] = getattr(_C._VariableFunctions, name)
__all__.append(name)
################################################################################
# Import interface functions defined in Python
################################################################################
# needs to be after the above ATen bindings so we can overwrite from Python side
from .functional import *
################################################################################
# Remove unnecessary members
################################################################################
del DoubleStorageBase
del FloatStorageBase
del LongStorageBase
del IntStorageBase
del ShortStorageBase
del CharStorageBase
del ByteStorageBase
del BoolStorageBase
del QUInt8StorageBase
del BFloat16StorageBase
del ComplexDoubleStorageBase
del ComplexFloatStorageBase
################################################################################
# Import most common subpackages
################################################################################
import torch.cuda
import torch.autograd
from torch.autograd import no_grad, enable_grad, set_grad_enabled
import torch.futures
import torch.nn
import torch.nn.intrinsic
import torch.nn.quantized
import torch.optim
import torch.multiprocessing
import torch.sparse
import torch.utils.backcompat
import torch.onnx
import torch.jit
import torch.hub
import torch.random
import torch.distributions
import torch.testing
import torch.backends.cuda
import torch.backends.mkl
import torch.backends.mkldnn
import torch.backends.openmp
import torch.backends.quantized
import torch.quantization
import torch.utils.data
import torch.__config__
import torch.__future__
_C._init_names(list(torch._storage_classes))
# attach docstrings to torch and tensor functions
from . import _torch_docs, _tensor_docs, _storage_docs
del _torch_docs, _tensor_docs, _storage_docs
def compiled_with_cxx11_abi():
r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""
return _C._GLIBCXX_USE_CXX11_ABI
# Import the ops "namespace"
from torch._ops import ops
from torch._classes import classes
# Import the quasi random sampler
import torch.quasirandom
# If you are seeing this, it means that this call site was not checked if
# the memory format could be preserved, and it was switched to old default
# behaviour of contiguous
legacy_contiguous_format = contiguous_format
# Register fork handler to initialize OpenMP in child processes (see gh-28389)
from torch.multiprocessing._atfork import register_after_fork
register_after_fork(torch.get_num_threads)
del register_after_fork
# Import tools that require fully imported torch (for applying
# torch.jit.script as a decorator, for instance):
from ._lobpcg import lobpcg
# These were previously defined in native_functions.yaml and appeared on the
# `torch` namespace, but we moved them to c10 dispatch to facilitate custom
# class usage. We add these lines here to preserve backward compatbility.
quantized_lstm = torch.ops.aten.quantized_lstm
quantized_gru = torch.ops.aten.quantized_gru
| [
"[email protected]"
] | |
31a96cf391d906b0d3d59fcd37437e16f21f474b | fd326562890d4f1987c384fc7c60374938231222 | /OOP/ExamPrep/Exam10April21/project/decoration/ornament.py | 90d7980c034613da08f4ee857bf726562ac89427 | [] | no_license | miro-lp/SoftUni | cc3b0ff742218c9ceaf93f05c319ccfeed5bc8a4 | 283d9328537919de49f7f6a301e58593bae9ca2a | refs/heads/main | 2023-08-23T21:22:07.856226 | 2021-08-25T15:10:18 | 2021-08-25T15:10:18 | 318,134,101 | 2 | 1 | null | 2021-08-10T12:51:54 | 2020-12-03T09:03:08 | Python | UTF-8 | Python | false | false | 151 | py | from project.decoration.base_decoration import BaseDecoration
class Ornament(BaseDecoration):
def __init__(self):
super().__init__(1, 5)
| [
"[email protected]"
] | |
588948095f2db1f4d431c649e77a76b72ecf54b8 | 68f57fd1dd274be72af6d85762b67bbf8d2ef6d6 | /tests/test_cosine.py | 3ac719652f889a7529befb8bcbf87a328c003cfa | [] | no_license | afcarl/simplecosine | 287cbf40ef8aa2251ea538b7b3c2d28c5b6f2488 | 1ba869198ab3211dd4b0412e80e670308007f687 | refs/heads/master | 2020-03-17T23:56:28.854494 | 2017-06-15T21:33:36 | 2017-06-15T21:33:36 | 134,069,251 | 1 | 0 | null | 2018-05-19T14:29:05 | 2018-05-19T14:29:05 | null | UTF-8 | Python | false | false | 2,909 | py | import unittest
from simplecosine.cosine import CosineSetSimilarity, CosineTextSimilarity
import numpy
import pickle
class TestSetCosineClass(unittest.TestCase):
def setUp(self):
self.ilist = [('a', 'b', 'c'),
['b', 'c', 'd k'],
('d k', 'e', 'f')
]
def test_cosine(self):
cosine = CosineSetSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
cosine_sim = cosine(('g', 'h', 'd k', 'd k'), s2)
self.assertAlmostEqual(cosine_sim, 0.267, places=3)
def test_cosine_na(self):
cosine = CosineSetSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], ())
assert numpy.isnan(cosine_sim)
def test_cosine_identical(self):
cosine = CosineSetSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], self.ilist[0])
self.assertAlmostEqual(cosine_sim, 1, places=5)
def test_cosine_cache(self):
cosine = CosineSetSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
def test_cosine_no_corpus(self):
cosine = CosineSetSimilarity([])
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.667, places=3)
cosine_sim = cosine(('g', 'h', 'd k'), s2)
self.assertAlmostEqual(cosine_sim, 0.333, places=3)
def test_cosine_pickle(self) :
cosine = CosineSetSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
pickle.dumps(cosine)
cosine = CosineSetSimilarity([])
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
pickle.dumps(cosine)
class TestTextCosineClass(unittest.TestCase):
def setUp(self):
self.ilist = ['a b c',
'b c d',
'd e f']
def test_cosine(self):
cosine = CosineTextSimilarity(self.ilist)
s1 = self.ilist[0]
s2 = self.ilist[1]
cosine_sim = cosine(s1, s2)
self.assertAlmostEqual(cosine_sim, 0.378, places=3)
def test_cosine_na(self):
cosine = CosineTextSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], '')
assert numpy.isnan(cosine_sim)
def test_cosine_identical(self):
cosine = CosineTextSimilarity(self.ilist)
cosine_sim = cosine(self.ilist[0], self.ilist[0])
self.assertAlmostEqual(cosine_sim, 1, places=5)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
9e8faea111d72f8b2e5ec2a6e79cd03f082cad11 | 8f24e443e42315a81028b648e753c50967c51c78 | /python/ray/train/examples/horovod/horovod_pytorch_example.py | f4d15ae0515b7fad3547711c35c685f4407f2ced | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | simon-mo/ray | d07efdada8d05c6e10417f96e8dfc35f9ad33397 | 1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8 | refs/heads/master | 2023-03-06T00:09:35.758834 | 2022-12-23T18:46:48 | 2022-12-23T18:46:48 | 122,156,396 | 4 | 2 | Apache-2.0 | 2023-03-04T08:56:56 | 2018-02-20T04:47:06 | Python | UTF-8 | Python | false | false | 8,402 | py | import argparse
from filelock import FileLock
import horovod.torch as hvd
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
from torchvision import datasets, transforms
from ray.air import session
from ray.air.config import ScalingConfig
from ray.train.horovod import HorovodTrainer
from ray.train.torch.torch_checkpoint import TorchCheckpoint
import ray.train.torch
def metric_average(val, name):
tensor = torch.tensor(val)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def setup(config):
data_dir = config.get("data_dir", None)
seed = config.get("seed", 42)
batch_size = config.get("batch_size", 64)
use_adasum = config.get("use_adasum", False)
lr = config.get("lr", 0.01)
momentum = config.get("momentum", 0.5)
use_cuda = config.get("use_cuda", False)
# Horovod: initialize library.
hvd.init()
torch.manual_seed(seed)
if use_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
kwargs = {"pin_memory": True} if use_cuda else {}
data_dir = data_dir or "~/data"
with FileLock(os.path.expanduser("~/.horovod_lock")):
train_dataset = datasets.MNIST(
data_dir,
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
# Horovod: use DistributedSampler to partition the training data.
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank()
)
# Note, don't set `num_workers` in DataLoader (not even 1),
# as that will separately start multiple processes (each corresponding to 1 worker)
# to load the data. This is known to cause issues with Ray.
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler, **kwargs
)
model = Net()
# By default, Adasum doesn't need scaling up learning rate.
lr_scaler = hvd.size() if not use_adasum else 1
if use_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(model.parameters(), lr=lr * lr_scaler, momentum=momentum)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
op=hvd.Adasum if use_adasum else hvd.Average,
)
return model, optimizer, train_loader, train_sampler
def train_epoch(
model, optimizer, train_sampler, train_loader, epoch, log_interval, use_cuda
):
loss = None
model.train()
# Horovod: set epoch to sampler for shuffling.
train_sampler.set_epoch(epoch)
for batch_idx, (data, target) in enumerate(train_loader):
if use_cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
# Horovod: use train_sampler to determine the number of
# examples in this worker's partition.
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_sampler),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
return loss.item() if loss else None
def train_func(config):
num_epochs = config.get("num_epochs", 10)
log_interval = config.get("log_interval", 10)
use_cuda = config.get("use_cuda", False)
save_model_as_dict = config.get("save_model_as_dict", False)
model, optimizer, train_loader, train_sampler = setup(config)
results = []
for epoch in range(num_epochs):
loss = train_epoch(
model, optimizer, train_sampler, train_loader, epoch, log_interval, use_cuda
)
if save_model_as_dict:
checkpoint = TorchCheckpoint.from_state_dict(model.state_dict())
else:
checkpoint = TorchCheckpoint.from_model(model)
results.append(loss)
session.report(dict(loss=loss), checkpoint=checkpoint)
# Only used for testing.
return results
def main(num_workers, use_gpu, kwargs):
trainer = HorovodTrainer(
train_loop_per_worker=train_func,
train_loop_config={
"num_epochs": kwargs["num_epochs"],
"log_interval": kwargs["log_interval"],
"use_cuda": kwargs["use_cuda"],
},
scaling_config=ScalingConfig(num_workers=num_workers, use_gpu=use_gpu),
)
result = trainer.fit()
print(result)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(
description="PyTorch MNIST Example",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--num-epochs",
type=int,
default=5,
metavar="N",
help="number of epochs to train (default: 10)",
)
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)",
)
parser.add_argument(
"--use-gpu", action="store_true", default=False, help="enables CUDA training"
)
parser.add_argument(
"--seed", type=int, default=42, metavar="S", help="random seed (default: 42)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction",
)
parser.add_argument(
"--num-workers",
type=int,
default=2,
help="Number of Ray workers to use for training.",
)
parser.add_argument(
"--data-dir",
help="location of the training dataset in the local filesystem ("
"will be downloaded if needed)",
)
parser.add_argument(
"--address",
required=False,
type=str,
default=None,
help="Address of Ray cluster.",
)
args = parser.parse_args()
if args.address:
ray.init(args.address)
else:
ray.init()
use_cuda = args.use_gpu if args.use_gpu is not None else False
kwargs = {
"data_dir": args.data_dir,
"seed": args.seed,
"use_cuda": use_cuda,
"batch_size": args.batch_size,
"use_adasum": args.use_adasum if args.use_adasum else False,
"lr": args.lr,
"momentum": args.momentum,
"num_epochs": args.num_epochs,
"log_interval": args.log_interval,
}
main(num_workers=args.num_workers, use_gpu=use_cuda, kwargs=kwargs)
| [
"[email protected]"
] | |
93e2cb9162dfaedfe3a58c9892ccb9936f9405c9 | 9e7d7b4d029554eed0f760a027cd94558b919ae2 | /CHAPTER15/overlaying.py | e320bf396d4410f1a0cc189810fc886ac93deca0 | [] | no_license | pooja1506/AutomateTheBoringStuff_2e | 8247b68a195d5e1976c6474f0e97d947906ffd35 | 5bab9ccdcdb22ee10fe1272c91042be40fd67c17 | refs/heads/master | 2022-04-10T19:21:44.402829 | 2020-04-05T12:10:32 | 2020-04-05T12:10:32 | 249,620,282 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | import PyPDF2
minutesFile = open('meetingminutes.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(minutesFile)
minutesFirstPage = pdfReader.getPage(0)
pdfWatermarkReader = PyPDF2.PdfFileReader(open('watermark.pdf', 'rb'))
minutesFirstPage.mergePage(pdfWatermarkReader.getPage(0))
pdfWriter = PyPDF2.PdfFileWriter()
pdfWriter.addPage(minutesFirstPage)
for pageNum in range(1, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
resultPdfFile = open('watermarkedCover.pdf', 'wb')
pdfWriter.write(resultPdfFile)
minutesFile.close()
resultPdfFile.close() | [
"[email protected]"
] | |
da4d9970097abb9879bdaf10f8d859c5287053b0 | 5b8fcb1bf82a7c1ef5b6c2a939b1d1597bc7a24b | /create_json_for_airtable_operator.py | e00238b2f39eae43c6d55eae4974dcf2d194d262 | [] | no_license | katerinekhh/airflow_custom_stuff | 2420c3ee95dab01e5eeeb8248500e253126e5b48 | 43ba78d96770a575ba7ab11a691b101e6d6604af | refs/heads/master | 2022-10-12T13:55:01.916266 | 2020-06-12T13:17:06 | 2020-06-12T13:17:06 | 271,645,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | py | from datetime import datetime
import json
from airflow.utils.decorators import apply_defaults
from airflow.models.baseoperator import BaseOperator
from airflow.hooks.http_hook import HttpHook
class CreateJsonForAirtableOperator(BaseOperator):
@apply_defaults
def __init__(
self,
endpoint: str,
http_conn_id: str,
message_id_filepath: str,
update_filepath: str,
method='GET',
request_params=None,
*args, **kwargs,
):
super().__init__(*args, **kwargs)
self.http_conn_id = http_conn_id
self.request_params = request_params or {}
self.endpoint = endpoint
self.message_id_filepath = message_id_filepath
self.update_filepath = update_filepath
self.hook = HttpHook(
method=method,
http_conn_id=http_conn_id)
def execute(self, context):
response = self.hook.run(self.endpoint, data=self.request_params)
with open(self.message_id_filepath, 'r') as id_file:
message_id = id_file.read()
json_response = json.loads(response.text)
airtable_updates_data = {}
airtable_updates_data['records'] = []
for update in json_response['result']:
update_data_fields = {}
update_data = {}
if update['callback_query']['message']['message_id'] == int(message_id):
chat_id = update['callback_query']['message']['chat']['id']
username = update['callback_query']['from']['username']
triggered_at = datetime.fromtimestamp(
update['callback_query']['message']['date']).isoformat()[:-3] + "Z"
update_data['chat_id'] = chat_id
update_data['username'] = username
update_data['triggered_at'] = triggered_at
update_data['event_type'] = 'push_go_button'
update_data['reporter_name'] = 'khkaterina'
update_data_fields['fields'] = update_data
airtable_updates_data['records'].append(update_data_fields)
with open(self.update_filepath, 'w') as file:
json.dump(airtable_updates_data, file)
| [
"[email protected]"
] | |
ef2911b4133217bc48dbf92e02a62bd1d9b5d171 | e168a16fdd43d3023d16d8a643ccca318a44c327 | /evm/logic/call.py | 42acedd0f1791f1cebd63438077524bdee541b46 | [] | no_license | DavidKnott/py-evm | c589c88af55c121ea375bfdb0a53ecc6a4836119 | 66c47f58a62e995b5ce89e47007c8b03796c80b9 | refs/heads/master | 2021-01-01T04:08:39.921768 | 2017-07-18T13:03:45 | 2017-07-18T13:03:45 | 97,128,228 | 1 | 0 | null | 2017-07-13T13:54:57 | 2017-07-13T13:54:56 | null | UTF-8 | Python | false | false | 7,349 | py | from evm import constants
from evm.opcode import (
Opcode,
)
from evm.utils.address import (
force_bytes_to_address,
)
class BaseCall(Opcode):
def compute_msg_gas(self, computation, gas, to, value):
raise NotImplementedError("Must be implemented by subclasses")
def get_call_params(self, computation):
raise NotImplementedError("Must be implemented by subclasses")
def __call__(self, computation):
computation.gas_meter.consume_gas(
self.gas_cost,
reason=self.mnemonic,
)
(
gas,
value,
to,
sender,
code_address,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
should_transfer_value,
) = self.get_call_params(computation)
computation.extend_memory(memory_input_start_position, memory_input_size)
computation.extend_memory(memory_output_start_position, memory_output_size)
call_data = computation.memory.read(memory_input_start_position, memory_input_size)
#
# Message gas allocation and fees
#
child_msg_gas, child_msg_gas_fee = self.compute_msg_gas(computation, gas, to, value)
computation.gas_meter.consume_gas(child_msg_gas_fee, reason=self.mnemonic)
# Pre-call checks
sender_balance = computation.state_db.get_balance(
computation.msg.storage_address,
)
insufficient_funds = should_transfer_value and sender_balance < value
stack_too_deep = computation.msg.depth + 1 > constants.STACK_DEPTH_LIMIT
if insufficient_funds or stack_too_deep:
if self.logger:
if insufficient_funds:
err_message = "Insufficient Funds: have: {0} | need: {1}".format(
sender_balance,
value,
)
elif stack_too_deep:
err_message = "Stack Limit Reached"
else:
raise Exception("Invariant: Unreachable code path")
self.logger.debug(
"%s failure: %s",
self.mnemonic,
err_message,
)
computation.gas_meter.return_gas(child_msg_gas)
computation.stack.push(0)
else:
if code_address:
code = computation.state_db.get_code(code_address)
else:
code = computation.state_db.get_code(to)
child_msg_kwargs = {
'gas': child_msg_gas,
'value': value,
'to': to,
'data': call_data,
'code': code,
'code_address': code_address,
'should_transfer_value': should_transfer_value,
}
if sender is not None:
child_msg_kwargs['sender'] = sender
child_msg = computation.prepare_child_message(**child_msg_kwargs)
if child_msg.is_create:
child_computation = computation.vm.apply_create_message(child_msg)
else:
child_computation = computation.vm.apply_message(child_msg)
computation.children.append(child_computation)
if child_computation.error:
computation.stack.push(0)
else:
actual_output_size = min(memory_output_size, len(child_computation.output))
computation.gas_meter.return_gas(child_computation.gas_meter.gas_remaining)
computation.memory.write(
memory_output_start_position,
actual_output_size,
child_computation.output[:actual_output_size],
)
computation.stack.push(1)
class Call(BaseCall):
def compute_msg_gas(self, computation, gas, to, value):
account_exists = computation.state_db.account_exists(to)
transfer_gas_fee = constants.GAS_CALLVALUE if value else 0
create_gas_fee = constants.GAS_NEWACCOUNT if not account_exists else 0
total_fee = gas + transfer_gas_fee + create_gas_fee
child_msg_gas = gas + (constants.GAS_CALLSTIPEND if value else 0)
return child_msg_gas, total_fee
def get_call_params(self, computation):
gas = computation.stack.pop(type_hint=constants.UINT256)
to = force_bytes_to_address(computation.stack.pop(type_hint=constants.BYTES))
(
value,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
) = computation.stack.pop(num_items=5, type_hint=constants.UINT256)
return (
gas,
value,
to,
None, # sender
None, # code_address
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
True, # should_transfer_value,
)
class CallCode(BaseCall):
def compute_msg_gas(self, computation, gas, to, value):
transfer_gas_cost = constants.GAS_CALLVALUE if value else 0
total_fee = transfer_gas_cost + gas
child_msg_gas = gas + (constants.GAS_CALLSTIPEND if value else 0)
return child_msg_gas, total_fee
def get_call_params(self, computation):
gas = computation.stack.pop(type_hint=constants.UINT256)
code_address = force_bytes_to_address(computation.stack.pop(type_hint=constants.BYTES))
(
value,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
) = computation.stack.pop(num_items=5, type_hint=constants.UINT256)
to = computation.msg.storage_address
sender = computation.msg.storage_address
return (
gas,
value,
to,
sender,
code_address,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
True, # should_transfer_value,
)
class DelegateCall(CallCode):
def compute_msg_gas(self, computation, gas, to, value):
return gas, gas
def get_call_params(self, computation):
gas = computation.stack.pop(type_hint=constants.UINT256)
code_address = force_bytes_to_address(computation.stack.pop(type_hint=constants.BYTES))
(
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
) = computation.stack.pop(num_items=4, type_hint=constants.UINT256)
to = computation.msg.storage_address
sender = computation.msg.sender
value = computation.msg.value
return (
gas,
value,
to,
sender,
code_address,
memory_input_start_position,
memory_input_size,
memory_output_start_position,
memory_output_size,
False, # should_transfer_value,
)
| [
"[email protected]"
] | |
2bff7ce472c638cc2952ee313e844673778ab37c | 5faecec9b20d262150e48ac9f31c396f840b1f2f | /migrations/0010_auto_20200804_0913.py | f175b678b5857527caa863cd6db136e7bc3d803b | [] | no_license | binkesi/blogsgn | fb767b0d22e3eb1c32ea7ee8fd0796766e3a8600 | 579b374f802a5651d20c3b3f85d8ff6a22476bdd | refs/heads/master | 2022-11-27T23:24:45.574601 | 2020-08-04T10:06:28 | 2020-08-04T10:06:28 | 283,161,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # Generated by Django 3.0.6 on 2020-08-04 01:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogsgn', '0009_auto_20200804_0653'),
]
operations = [
migrations.AlterField(
model_name='author',
name='nation',
field=models.CharField(choices=[('CH', 'China'), ('US', 'America'), ('UK', 'England'), ('GE', 'German'), ('CA', 'Canada')], max_length=80, verbose_name='Nationality'),
),
]
| [
"[email protected]"
] | |
0b583e86f97c1a537be2b27d6980f3a3dd93df1a | 528c811306faa4a34bf51fca7955b7a24ac2e30c | /Python/Valid Anagram.py | 263508830b33b30fd769bcad02fa5dbf91901f61 | [] | no_license | ganjingcatherine/LeetCode-1 | 1addbd7e4d9254a146601f9d5e28b8becb8235a6 | 488782d3f1e759da2d32b4e82dbf55b96c431244 | refs/heads/master | 2021-05-11T03:15:16.810035 | 2016-02-06T06:19:18 | 2016-02-06T06:19:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | """
Given two strings s and t, write a function to determine if t is an anagram of s.
For example,
s = "anagram", t = "nagaram", return true.
s = "rat", t = "car", return false.
Note:
You may assume the string contains only lowercase alphabets.
"""
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t): return False
table = {}
for i in xrange(len(s)):
if s[i] not in table:
table[s[i]] = 1
else:
table[s[i]] += 1
for i in xrange(len(t)):
if t[i] in table and table[t[i]] > 0:
table[t[i]] -= 1
else:
return False
return True
| [
"[email protected]"
] | |
7713fd10c64850e9770370122883e5b6ea01086f | e2ae96b74289a04a2386294bf51bacad92e2a830 | /city_scrapers_core/spiders/legistar.py | 29c3176db02b4b0851fd939f9f79845a629163c5 | [
"MIT"
] | permissive | will-snavely/city-scrapers-core | 6afa9d78fb1c325420baaae030633b01111f11bb | cb865069e49d09ab251b7f99247df5e13c5d0241 | refs/heads/main | 2022-12-11T21:39:03.307347 | 2020-09-09T13:29:53 | 2020-09-09T13:29:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,265 | py | from datetime import datetime
from typing import Iterable, List, Mapping, Optional, Tuple
from urllib.parse import urlparse
import scrapy
from legistar.events import LegistarEventsScraper
from ..items import Meeting
from .spider import CityScrapersSpider
LINK_TYPES = ["Agenda", "Minutes", "Video", "Summary", "Captions"]
class LegistarSpider(CityScrapersSpider):
"""Subclass of :class:`CityScrapersSpider` that handles processing Legistar sites,
which almost always share the same components and general structure.
Uses the `Legistar events scraper <https://github.com/opencivicdata/python-legistar-scraper/blob/master/legistar/events.py>`_
from the ```python-legistar-scraper`` library <https://github.com/opencivicdata/python-legistar-scraper>`.
Any methods that don't pull the correct values can be replaced.
""" # noqa
link_types = []
def parse(self, response: scrapy.http.Response) -> Iterable[Meeting]:
"""Parse response from the :class:`LegistarEventsScraper`. Ignores the ``scrapy``
:class:`Response` which is still requested to be able to hook into ``scrapy``
broadly.
:param response: Scrapy response to be ignored
:return: Iterable of processed meetings
"""
events = self._call_legistar()
return self.parse_legistar(events)
def parse_legistar(
self, events: Iterable[Tuple[Mapping, Optional[str]]]
) -> Iterable[Meeting]:
"""Method to be implemented by Spider classes that will handle the response from
Legistar. Functions similar to ``parse`` for other Spider classes.
:param events: Iterable consisting of tuples of a dict-like object of scraped
results from legistar and an agenda URL (if available)
:raises NotImplementedError: Must be implemented in subclasses
:return: [description]
"""
raise NotImplementedError("Must implement parse_legistar")
def _call_legistar(
self, since: int = None
) -> Iterable[Tuple[Mapping, Optional[str]]]:
les = LegistarEventsScraper()
les.BASE_URL = self.base_url
les.EVENTSPAGE = f"{self.base_url}/Calendar.aspx"
if not since:
since = datetime.today().year
return les.events(since=since)
def legistar_start(self, item: Mapping) -> datetime:
"""Pulls the start time from a Legistar item
:param item: Scraped item from Legistar
:return: Meeting start datetime
"""
start_date = item.get("Meeting Date")
start_time = item.get("Meeting Time")
if start_date and start_time:
try:
return datetime.strptime(
f"{start_date} {start_time}", "%m/%d/%Y %I:%M %p"
)
except ValueError:
return datetime.strptime(start_date, "%m/%d/%Y")
def legistar_links(self, item: Mapping) -> List[Mapping[str, str]]:
"""Pulls relevant links from a Legistar item
:param item: Scraped item from Legistar
:return: List of meeting links
"""
links = []
for link_type in LINK_TYPES + self.link_types:
if isinstance(item.get(link_type), dict) and item[link_type].get("url"):
links.append({"href": item[link_type]["url"], "title": link_type})
return links
def legistar_source(self, item: Mapping) -> str:
"""Pulls the source URL from a Legistar item. Pulls a specific meeting URL if
available, otherwise defaults to the general Legistar calendar page.
:param item: Scraped item from Legistar
:return: Source URL
"""
default_url = f"{self.base_url}/Calendar.aspx"
if isinstance(item.get("Name"), dict):
return item["Name"].get("url", default_url)
if isinstance(item.get("Meeting Details"), dict):
return item["Meeting Details"].get("url", default_url)
return default_url
@property
def base_url(self) -> str:
"""Property with the Legistar site's base URL
:return: Legistar base URL
"""
parsed_url = urlparse(self.start_urls[0])
return f"{parsed_url.scheme}://{parsed_url.netloc}"
| [
"[email protected]"
] | |
848257d62f49ecdcc747c38384d79aa0afb7700b | 8db1ab4f9a2e47f7e8d69a685837d7e747bf9442 | /cocos2d-x-tool/py_tool/syncResToProject.py | 0773ceebfd0313dd7ab2c0df0f04cec7b688b661 | [] | no_license | tanzuoliang/python | 051d6e46cebd7fdb74a0173aca0ca7a2b3ef5986 | 70f782cf3c72d2b7043727910509eb2d2f2fe065 | refs/heads/master | 2021-10-20T05:36:03.732738 | 2019-02-26T02:37:18 | 2019-02-26T02:37:18 | 111,288,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,595 | py | #!/usr/bin/python
#encoding=utf-8
from myutil.utils import syncDir,checkSVNStatus
import os
fromRoot = '/Users/tanzuoliang/art_resource'
toRoot = "../res/new_ui"
toLanguage = "../res"
ll = [
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ icon','天天坦克/UI 效果图+输出 20170214 优化版/00 icon','icon'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ button','天天坦克/UI 效果图+输出 20170214 优化版/00 button','button'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ wenzi','天天坦克/UI 效果图+输出 20170214 优化版/00 wenzi','wenzi'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 通用','天天坦克/UI 效果图+输出 20170214 优化版/00 通用','common'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 字体','天天坦克/UI 效果图+输出 20170214 优化版/00 字体','fnt'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ BG','天天坦克/UI 效果图+输出 20170214 优化版/00 BG','bg')
]
"""
语言分类资源
"""
lll = [
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 英文翻译','天天坦克/UI 效果图+输出 20170214 优化版/00 英文翻译','lang_en'),
('天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 翻译原版','天天坦克/UI 效果图+输出 20170214 优化版/00 翻译原版','lang_chs')
]
from myutil.utils import getDirsize
import os
if os.path.exists('../res-new') and getDirsize('../res') < getDirsize('../res-new'):
print "当前res是压缩后的"
else:
os.system('svn up %s'%toLanguage)
for tu in ll:
fromDir = os.path.join(fromRoot, tu[0])
toDir = os.path.join(toRoot, tu[2])
os.system("svn up %s"%fromDir)
fromDir = os.path.join(fromRoot, tu[1])
syncDir(fromDir, toDir,False)
checkSVNStatus(toRoot,[tu[2]])
for tu in lll:
fromDir = os.path.join(fromRoot, tu[0])
toDir = os.path.join(toLanguage, "language_img", tu[2],"res","new_ui")
os.system("svn up %s"%fromDir)
fromDir = os.path.join(fromRoot, tu[1])
if not os.path.exists(toDir):
os.makedir(toDir)
syncDir(fromDir, toDir,False)
checkSVNStatus(os.path.join(toLanguage, "language_img"),[tu[2]])
"""
英文引导
"""
# os.system("cp %s %s"%(os.path.join(fromRoot,"天天坦克/UI\ 效果图+输出\ 20170214\ 优化版/00\ 英文翻译/Novice\ guide/controlexplain.jpg"),os.path.join(toRoot, "bg/lang_en_controlexplain.jpg")))
os.system("rm -rf %s"%(os.path.join(toLanguage,"language_img/lang_en/res/new_ui/Novice\ guide")))
os.system('svn ci %s -m "同步资源"'%toLanguage) | [
"[email protected]"
] | |
87a0d04e73c54c1e0daef6dcf0e338c6af43be21 | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc024/B/answers/111654_Gale.py | b31d17de7f8e5d4c0d019d4cbf95c0c6f7e11513 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | n, t = map(int, input().split())
a = [int(input()) for i in range(n)]
ans = t
for i in range(1, n):
ans += t
if a[i] <= a[i - 1] + t:
ans = ans - (a[i - 1] + t - a[i])
print(ans)
| [
"[email protected]"
] | |
da60ebdb0c9f8856df41a22fbe7a5925c0a77927 | a4d250ce393012dc251cb955096cb8f284c57439 | /gunion/data/battle.py | 2e86c5bc5fb1e0344791a01e941d7330c554ed7e | [] | no_license | daxingyou/test-2 | b02af312784d06a46e29acd42e756e92afee6ed9 | a16c872ba781855a8c891eff41e8e651cd565ebf | refs/heads/master | 2023-03-16T04:21:23.704482 | 2018-09-28T00:51:23 | 2018-09-28T00:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,188 | py | #coding:utf8
"""
Created on 2016-07-28
@Author: jiangtaoran([email protected])
@Brief : 联盟战争
"""
import base64
from utils import logger
from utils import utils
from datalib.data_loader import data_loader
class UnionBattleInfo(object):
"""一场联盟战争
"""
BATTLE_STAGE_INVALID = 0 #非法
BATTLE_STAGE_IDLE = 1 #无战争
BATTLE_STAGE_PREPARE = 2 #备战阶段
BATTLE_STAGE_FIGHT = 3 #战斗阶段
BATTLE_STAGE_CLOSE = 4 #结束
__slots__ = [
"id",
"union_id",
"index",
"stage",
"rival_union_id",
"rival_battle_id",
"is_initiator", #是否战争发起方
"launch_time", #战争发起时间
"fight_time", #战争开战时间
"close_time", #战争结束时间
"finish_time", #战争生命周期终止时间,可以开始下一场战争
"is_deployed", #是否已经完成防御部署
"battle_count", #战斗的数量
"score", #胜场积分
"individuals_score", #成员战功之和
"drum",
"attack_level",
"attack_win_count_this_level", #本轮战斗中攻击胜利次数
"attack_lose_count_this_level", #本轮战斗中攻击失败次数
"defend_nodes_level", #防守方的节点level
"record_index",
"accepted_members", #大宝箱领取的记录
"accepted_names",
"accepted_icons",
"reward_items",
"reward_nums",
"accept_times"
]
def __init__(self):
self.id = 0
self.union_id = 0
self.index = 0
self.stage = UnionBattleInfo.BATTLE_STAGE_INVALID
self.rival_union_id = 0
self.rival_battle_id = 0
self.is_initiator = False
self.launch_time = 0
self.fight_time = 0
self.close_time = 0
self.finish_time = 0
self.is_deployed = False
self.battle_count = 0
self.score = 0
self.individuals_score = 0
self.drum = 0
self.attack_level = 1
self.attack_win_count_this_level = 0
self.attack_lose_count_this_level = 0
self.defend_nodes_level = ""
self.record_index = 0
@staticmethod
def generate_id(union_id, index):
id = union_id << 32 | index
return id
@staticmethod
def create(union_id, index, invalid):
battle = UnionBattleInfo()
battle.id = UnionBattleInfo.generate_id(union_id, index)
battle.union_id = union_id
battle.index = index
if invalid:
battle.stage = UnionBattleInfo.BATTLE_STAGE_INVALID #无法发起战争
else:
battle.stage = UnionBattleInfo.BATTLE_STAGE_IDLE
battle.rival_union_id = 0
battle.rival_battle_id = 0
battle.is_initiator = False
battle.launch_time = 0
battle.fight_time = 0
battle.close_time = 0
battle.finish_time = 0
battle.is_deployed = False
battle.battle_count = 0
battle.score = 0
battle.individuals_score = 0
battle.drum = 0
battle.attack_level = 1
battle.attack_win_count_this_level = 0
battle.attack_lose_count_this_level = 0
battle.defend_nodes_level = ""
battle.record_index = 0
battle.accepted_members = "" #奖励箱领取
battle.accepted_names = ""
battle.accepted_icons = ""
battle.reward_items = ""
battle.reward_nums = ""
battle.accept_times = ""
return battle
def force_update_fight_time(self, time):
"""强制改变开战时间(内部接口)
"""
assert self.stage == self.BATTLE_STAGE_PREPARE
self.fight_time = time
def force_update_close_time(self, time):
"""强制改变结束战斗时间(内部接口)
"""
assert self.stage == self.BATTLE_STAGE_FIGHT
self.close_time = time
def force_update_finish_time(self, time):
"""强制改变结束时间(内部接口)
"""
assert self.stage == self.BATTLE_STAGE_CLOSE
self.finish_time = time
def is_able_to_join(self):
"""是否可以参战
"""
return self.stage == self.BATTLE_STAGE_IDLE
def is_able_to_deploy(self):
"""是否可以部署防御
"""
return self.stage == self.BATTLE_STAGE_PREPARE
def is_able_to_drum(self):
"""是否可以擂鼓
"""
return self.stage == self.BATTLE_STAGE_FIGHT
def is_at_war(self):
"""是否在交战中
"""
return (self.stage == self.BATTLE_STAGE_PREPARE or
self.stage == self.BATTLE_STAGE_FIGHT or
self.stage == self.BATTLE_STAGE_CLOSE)
def launch(self, now, rival_union_id, rival_battle_id, initiative = True):
"""发起战争
"""
assert self.stage == self.BATTLE_STAGE_IDLE
self.stage = self.BATTLE_STAGE_PREPARE
self.rival_union_id = rival_union_id
self.rival_battle_id = rival_battle_id
self.is_initiator = initiative
self.launch_time = now
#self.fight_time = utils.get_spec_second(
# self.launch_time, "22:30") + utils.SECONDS_OF_DAY #launch time 次日22:30
#self.close_time = self.fight_time + utils.SECONDS_OF_DAY #fight time 次日22:30
#self.finish_time = utils.get_spec_second(
# self.close_time, "05:00" ) + utils.SECONDS_OF_DAY #close time 次日05:00
self.fight_time = utils.get_spec_second(self.launch_time, "21:00") #fight time 当日21:00
self.close_time = utils.get_spec_second(self.launch_time, "23:00") #close time 当日23:00
self.finish_time = utils.get_spec_second(self.launch_time, "05:00"
) + utils.SECONDS_OF_DAY #finish time 次日05:00
self.is_deployed = False
self.accepted_members = ""
self.accepted_names = ""
self.accepted_icons = ""
self.reward_items = ""
self.reward_nums = ""
self.accept_times = ""
def start_fight(self, now):
"""进入开战阶段
"""
assert self.stage == self.BATTLE_STAGE_PREPARE
self.stage = self.BATTLE_STAGE_FIGHT
self.is_deployed = True
self.attack_level = 1
def is_fight_closed(self, now):
"""战斗结算是否结束
"""
return self.launch_time != 0 and now >= self.close_time
def close_fight(self):
"""战争结束
"""
#assert self.stage == self.BATTLE_STAGE_FIGHT
self.stage = self.BATTLE_STAGE_CLOSE
def is_finished(self, now):
"""战争是否结束
"""
return self.launch_time != 0 and now >= self.finish_time and now >= self.close_time
def is_able_to_start(self):
"""是否可以开战
"""
return self.stage == self.BATTLE_STAGE_FIGHT
def beat_drum(self, value = 1):
"""擂鼓
"""
assert value >= 0
self.drum += value
def get_attack_buff_count(self):
"""获取当前攻击 buff 加成
"""
drum_ratio = int(float(
data_loader.UnionConfInfo_dict["attack_buff_count_per_drum"].value))
lose_ratio = int(float(
data_loader.UnionConfInfo_dict["attack_buff_count_per_lose"].value))
return self.drum * drum_ratio + self.attack_lose_count_this_level * lose_ratio
def get_attack_buff_temporary_count(self):
"""获取当前轮次临时攻击 buff 加成
"""
lose_ratio = int(float(
data_loader.UnionConfInfo_dict["attack_buff_count_per_lose"].value))
return self.attack_lose_count_this_level * lose_ratio
def mark_attack_result(self, win):
"""记录攻击结果
"""
if win:
self.attack_win_count_this_level += 1
#else:
# self.attack_lose_count_this_level += 1
#攻击进入下一轮
count = int(float(data_loader.UnionConfInfo_dict["battle_map_node_count"].value))
if self.attack_win_count_this_level >= count:
self.attack_level += 1
self.attack_win_count_this_level = 0
self.attack_lose_count_this_level = 0
def gain_union_score(self, value = 1):
"""增加联盟胜场积分
"""
assert value >= 0
self.score += value
def gain_individuals_score(self, value):
"""增加成员战功点数
"""
assert value >= 0
self.individuals_score += value
def get_next_record_index(self):
"""获取下一个战斗记录 index
"""
self.record_index += 1
return self.record_index
def is_able_to_accept_box(self):
"""是否可以领取大宝箱/
"""
if self.stage != self.BATTLE_STAGE_CLOSE:
return False
level = int(float(data_loader.UnionConfInfo_dict["battle_map_total_level"].value))
count = int(float(data_loader.UnionConfInfo_dict["battle_map_node_count"].value))
if level * count > self.score:
return False
return True
def get_accepted_members(self):
"""获取领取过奖励的成员"""
return utils.split_to_int(self.accepted_members)
def get_reward_record(self):
"""获取奖励领取记录"""
members = utils.split_to_int(self.accepted_members)
names = utils.split_to_string(self.accepted_names)
icons = utils.split_to_int(self.accepted_icons)
items_id = utils.split_to_int(self.reward_items)
items_num = utils.split_to_int(self.reward_nums)
times = utils.split_to_int(self.accept_times)
names = [base64.b64decode(name) for name in names]
return map(None, members, names, icons, items_id, items_num, times)
def add_reward_record(self, user_id, user_name, icon_id, item_id, item_num, now):
"""添加领奖记录"""
members = utils.split_to_int(self.accepted_members)
names = utils.split_to_string(self.accepted_names)
icons = utils.split_to_int(self.accepted_icons)
items_id = utils.split_to_int(self.reward_items)
items_num = utils.split_to_int(self.reward_nums)
times = utils.split_to_int(self.accept_times)
members.append(user_id)
names.append(user_name)
icons.append(icon_id)
items_id.append(item_id)
items_num.append(item_num)
times.append(now)
self.accepted_members = utils.join_to_string(members)
self.accepted_names = utils.join_to_string(names)
self.accepted_icons = utils.join_to_string(icons)
self.reward_items = utils.join_to_string(items_id)
self.reward_nums = utils.join_to_string(items_num)
self.accept_times = utils.join_to_string(times)
| [
"[email protected]"
] | |
626be54fe2c402a3a685abc6d8479c10ea8a75aa | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /CalibMuon/RPCCalibration/python/l1MuonOutputModule_cfi.py | 6dbdc357f06e53ed7641a5fc49576123b5f1a25e | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 419 | py | import FWCore.ParameterSet.Config as cms
from CalibMuon.RPCCalibration.l1Muon_EventContent_cff import *
L1MuonEventContent = cms.OutputModule("PoolOutputModule",
l1Muon_EventContent,
l1MuonEventSelection,
datasets = cms.untracked.PSet(
filterName = cms.untracked.string('l1Muon_Filter'),
dataTier = cms.untracked.string('USER')
),
fileName = cms.untracked.string('l1Muon.root')
)
| [
"[email protected]"
] | |
3230d5448ef48ac2a50e98f9791b15a0ed770f9f | 0147677b611e40ac695ba07f914264b3470a7401 | /src/mac_address_info.py | 4ad9f15c04cbbf5909df457315f089a8c5f1a0cb | [] | no_license | mblomdahl/sniffer | a2aed3ee37bb9a39d3c13ad8455ce7c7a2fc58c7 | 9101c59f958bb94fe1443fd90e95d333a02b785f | refs/heads/master | 2021-01-24T00:23:30.318623 | 2015-08-14T12:56:33 | 2015-08-14T12:56:33 | 41,627,533 | 0 | 0 | null | 2015-08-30T12:11:01 | 2015-08-30T12:11:01 | null | UTF-8 | Python | false | false | 5,190 | py | import json
import urllib2
import os
class MacAddressInfo:
def __init__(self):
self.mac_address = ""
self.company = ""
self.address1 = ""
self.address2 = ""
self.address3 = ""
self.country = ""
class MacAddressStorage:
def __init__(self):
self.data = [] # creates a new empty list
def mac_address_lookup_from_internet(self, mac_address):
try:
print "Load from Internet %s" % mac_address
# Set the request URL http://www.macvendorlookup.com/api/v2/08-86-3B-D4-90-C0
url = 'http://www.macvendorlookup.com/api/v2/' + mac_address
# Send the GET request
response = urllib2.urlopen(url)
resp = response.read()
mac_object = MacAddressInfo
data = []
if resp:
# Interpret the JSON response
#data = json.loads(resp.decode('utf8'))
data = json.loads(resp)
mac_object.mac_address = mac_address
for company in data:
mac_object.company = company['company']
for address1 in data:
mac_object.address1 = address1['addressL1']
for address2 in data:
mac_object.address2 = address2['addressL2']
for address3 in data:
mac_object.address3 = address3['addressL3']
for country in data:
mac_object.country = country['country']
else:
mac_object.mac_address = mac_address
mac_object.company = ""
mac_object.address1 = ""
mac_object.address2 = ""
mac_object.address3 = ""
mac_object.country = ""
return mac_object
except :
print "Unexpected error:", url, resp
return None
def mac_address_lookup_from_cache(self, mac_address):
try:
self.load_data_from_file()
count = len( self.data["mac addresses"] )
for index in range(count):
if self.data["mac addresses"][index]["macaddress"] == mac_address:
mac_object = MacAddressInfo
mac_object.mac_address = mac_address
mac_object.company = self.data["mac addresses"][index]["company"]
mac_object.address1 = self.data["mac addresses"][index]["address1"]
mac_object.address2 = self.data["mac addresses"][index]["address2"]
mac_object.address3 = self.data["mac addresses"][index]["address3"]
mac_object.country = self.data["mac addresses"][index]["country"]
return mac_object
return None
except :
print "mac_address_lookup_from_cache error:"
return None
def mac_address_lookup(self, mac_address):
try:
mac_object = self.mac_address_lookup_from_cache(mac_address)
if mac_object is None :
mac_object = self.mac_address_lookup_from_internet(mac_address)
if mac_object is not None :
#self.load_data_from_file()
print mac_address
self.data["mac addresses"].append( {"macaddress":mac_address, "company":mac_object.company, "address1":mac_object.address1, "address2":mac_object.address2, "address3":mac_object.address3, "country":mac_object.country} )
self.store_data_to_file()
else :
return None
return mac_object
except :
print "mac_address_lookup error:"
return None
def load_data_from_file(self):
if len( self.data ) == 0:
if os.path.exists("/home/pi/sniffer/mac_addresses.json"):
file_handel = open('/home/pi/sniffer/mac_addresses.json', 'r')
self.data = json.load(file_handel)
#print "Load"
else:
#file_handel = open('/home/pi/sniffer/mac_addresses.json', 'w')
self.data.append( {"mac addresses":[]} )
#print "new"
def store_data_to_file(self):
file_handel = open('/home/pi/sniffer/mac_addresses.json', 'w')
json.dump(self.data, file_handel, sort_keys=True, indent=2)
#file_handel.write('\n')
if __name__ == '__main__':
storage = MacAddressStorage()
mac_object = MacAddressInfo()
#mac_object = storage.mac_address_lookup("08:86:3B:D4:90:C0")
#mac_object = storage.mac_address_lookup("6C:F3:73:E6:0A:11")
mac_object = storage.mac_address_lookup("9C:6C:15:97:76:04")
#print storage.mac_address_lookup("08-86-3B-D4-90-C0").mac_address
if mac_object :
print mac_object.mac_address
print mac_object.company
print mac_object.address1
print mac_object.address2
print mac_object.address3
print mac_object.country
else :
print "Error"
| [
"="
] | = |
b54fd0bc290b3f5a82c4cad6ff829f7b399573f4 | ded81a7568fe04f3227562cc5f67ffc675617cc0 | /cheer_app/migrations/0002_comment.py | a7803e53c60185ed5d941b24bfcce9f91293cac8 | [] | no_license | shin04/cheer | 3e220afc1fb0a4329ff7c16bd4823da1c09ee0a9 | da39bbc584350c0ac89c23dbbfaf1c96ab9148fd | refs/heads/master | 2020-07-02T16:07:44.280390 | 2020-05-20T11:13:03 | 2020-05-20T11:13:03 | 183,242,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | # Generated by Django 2.2 on 2019-08-05 04:29
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cheer_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='cheer_app.Post')),
],
),
]
| [
"[email protected]"
] | |
4d6647ad50459cf616a4eaaa782651b3b18edf2d | 1af1f89eb9a178b95d1ba023b209b7538fb151f0 | /Algorithms/146. LRU Cache.py | 09989dad11e2256fc8b9ce1d4d8a754a15563be9 | [] | no_license | 0xtinyuk/LeetCode | 77d690161cc52738e63a4c4b6595a6012fa5c21e | 08bc96a0fc2b672282cda348c833c02218c356f1 | refs/heads/master | 2023-02-21T16:58:39.881908 | 2021-01-25T08:00:13 | 2021-01-25T08:00:13 | 292,037,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | from collections import OrderedDict
class LRUCache():
def __init__(self, capacity):
self.memory = OrderedDict()
self.capacity = capacity
def get(self, key):
if key not in self.memory:
return - 1
self.memory.move_to_end(key)
return self.memory[key]
def put(self, key, value):
if key in self.memory:
self.memory.move_to_end(key)
self.memory[key] = value
if len(self.memory) > self.capacity:
self.memory.popitem(last = False)
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value) | [
"[email protected]"
] | |
9f0d1351e1fd94cb4f0674d16df5403e9b562544 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-7279.py | 7b346f67e4f794d398658cee0c9138683fa18c0b | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,612 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
$ElifElse
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
40c87a8686a9baae4fa8aeedbe59625b342a5fff | 485384a2e1faa338d3372a8c000ccab6f6891cfa | /CGAT/Mali.py | 6337aeb47d18672ec25b144ac94a69699dfb9d3a | [
"BSD-2-Clause"
] | permissive | wangzhennan14/cgat | 30c8ec4f8d1e14413c5d8193352348137d60a611 | 38ff24edc2a1f8b0da5716aea4fbf6a9328342b0 | refs/heads/master | 2021-01-18T15:43:02.544097 | 2017-03-16T15:27:11 | 2017-03-16T15:27:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,924 | py | '''
Mali.py - Tools for multiple alignments
=======================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Code
----
'''
import re
import string
import math
import copy
import random
class AlignedString:
mGapChars = ("-", ".")
mGapChar = "-"
def __init__(self, identifier, fr, to, s, is_mangled=False):
self.mId = identifier
self.mFrom = fr
self.mTo = to
self.mString = s
self.mIsMangled = is_mangled
def __len__(self):
return len(self.mString)
def maskColumn(self, column, mask_char="x"):
self.maskColumns(self, [column, ], mask_char=mask_char)
def maskColumns(self, columns, mask_char="x"):
s = list(self.mString)
for c in columns:
if s[c] not in self.mGapChars:
s[c] = mask_char
self.mString = "".join(s)
def mapColumns(self, columns, map_function):
s = list(self.mString)
for c in columns:
if s[c] not in self.mGapChars:
s[c] = map_function(s[c])
self.mString = "".join(s)
def takeColumns(self, columns):
"""take columns.
not implemented yet: updating of residue numbers.
"""
s = []
for c in columns:
s.append(self.mString[c])
self.mString = "".join(s)
def truncate(self, first, last):
"""truncate aligned string.
not implemented yet: updating of residue numbers.
"""
self.mString = self.mString[first:last]
def insertColumns(self, position, num_columns, char="-"):
"""insert num_columns columns at position."""
self.mString = self.mString[:position] + \
char * num_columns + self.mString[position:]
def getIdentifier(self):
'''return demangled identifier.'''
if self.mIsMangled:
return self.mId.rsplit("_", 1)[0]
else:
return self.mId
def getResidueNumber(self, position):
x = self.mFrom
for c in self.mString[:position]:
if c not in self.mGapChars:
x += 1
return x
def getSequence(self):
"""return sequence without gaps."""
return re.sub("[%s]" % "".join(self.mGapChars), "", self.mString)
def getSegments(self, transitions, gap_chars="-."):
"""return segments of alignment according to
transitions.
Transitions is a list of residue numbers.
"""
segments = []
pos = self.mFrom
first_x = 0
for x in range(len(self.mString)):
char = self.mString[x]
if char not in gap_chars:
if pos in transitions:
if first_x != x:
segments.append((first_x, x))
first_x = x
pos += 1
# add last segment, unless it has already been added
# because it was part of the transitions
if first_x != x:
segments.append((first_x, len(self.mString)))
elif pos in transitions:
segments.append((first_x, len(self.mString)))
return segments
def threadSequence(self, new_sequence, map_old2new):
"""thread a new sequence onto this sequence replacing
characters in this sequence with those in new sequence.
``map_old2new`` lists the positions that should be replaced.
It maps non-gap positions in old to those in new. Non-gap
positions than are not mapped are set to :attr:`gapchar`.
"""
s = list(self.mString)
c = 0
for x in range(len(s)):
m = map_old2new.mapRowToCol(c)
if m >= 0:
s[c] = new_sequence[m]
else:
s[c] = self.mGapChar
if s[c] not in self.mGapChars:
c += 1
self.mString = "".join(s)
class MaliData:
def __init__(self, line, gap_chars="-.", mask_chars="Nn"):
self.mNGaps = len(re.sub("[^%s]" % gap_chars, "", line))
self.mNMasked = len(re.sub("[^%s]" % mask_chars, "", line))
self.mNAll = len(line)
self.mNChars = self.mNAll - self.mNMasked - self.mNGaps
def __str__(self):
return "%i\t%i\t%i\t%i" % (self.mNAll, self.mNChars,
self.mNGaps, self.mNMasked)
class Mali:
mGapChars = ("-", ".")
mGapPattern = re.compile("[.-]")
# character used for gaps
mGapChar = "-"
# admissable gap characters
mGapChars = ("-", ".")
mMaskChar = "X"
def __init__(self):
self.mIdentifiers = []
self.mMali = {}
self.mLength = 0
self.mAnnotations = {}
self.mName = None
# set to false, if ranges shall not be output.
self.mWriteRanges = True
def __contains__(self, key):
return key in self.mMali
def __getitem__(self, key):
return self.mMali[key].mString
def __len__(self):
return len(self.mIdentifiers)
def __delitem__(self, key):
del self.mMali[key]
self.mIdentifiers.remove(key)
def getClone(self):
return copy.deepcopy(self)
def iteritems(self):
return iter(self.mMali.items())
def items(self):
return list(self.mMali.items())
def values(self):
return list(self.mMali.values())
def keys(self):
return list(self.mMali.keys())
def getName(self):
return self.mName
def setName(self, name):
self.mName = name
def getIdentifiers(self):
return self.mIdentifiers
def getLength(self):
"""deprecated."""
return self.getNumSequences()
def getNumSequences(self):
return self.__len__()
def getNumColumns(self):
if self.mIdentifiers:
return len(self.mMali[self.mIdentifiers[0]])
else:
return 0
def getWidth(self):
"""deprecated."""
return self.getNumColumns()
def rename(self, old_name, new_name):
"""rename an entry."""
if old_name not in self.mMali:
raise KeyError("%s not in mali" % old_name)
self.mIdentifiers[self.mIdentifiers.index(old_name)] = new_name
self.mMali[new_name] = self.mMali[old_name]
del self.mMali[old_name]
def isEmpty(self):
return len(self.mMali) == 0
def getSequence(self, key):
return self.mMali[key]
def getResidueNumber(self, key, position):
"""return residue number in sequence key at position position."""
return self.mMali[key].getResidueNumber(position)
def setSequence(self, key, sequence):
# TODO: updating of residue numbers
self.mMali[key].mString = sequence
def getEntry(self, key):
return self.mMali[key]
def addEntry(self, s):
"""add an aligned string object."""
if s.mId in list(self.mMali.keys()):
raise KeyError("id %s already in mali" % s.mId)
self.mIdentifiers.append(s.mId)
self.mMali[s.mId] = s
def addSequence(self, id, fr, to, sequence):
if to < 0:
to = self.countCharacters(sequence)
s = AlignedString(id, fr, to, sequence)
self.addEntry(s)
def countCharacters(self, row):
return len(row) - len(self.mGapPattern.findall(row))
def deleteEntry(self, identifier):
if identifier not in self.mMali:
raise KeyError("identifier %s not in mali." % identifier)
del self.mMali[identifier]
self.mIdentifiers.remove(identifier)
def getColumns(self):
"""return mali in column orientation."""
args = [self.mMali[x].mString for x in self.mIdentifiers]
return ["".join(x) for x in zip(*args)]
def getConsensus(self, mark_with_gaps=False):
"""return consensus string.
The consensus string returns the most frequent character per column
that is not a gap.
If mark_with_gaps is set to True, positions with any gap characater are
set to gaps.
"""
columns = self.getColumns()
seq = []
for x in range(len(columns)):
s = columns[x]
counts = [(a, s.count(a))
for a in set(list(s)).difference(set(self.mGapChar))]
if mark_with_gaps and self.mGapChar in columns[x]:
seq.append(self.mGapChar)
else:
counts.sort(key=lambda x: -x[1])
seq.append(counts[0][0])
return "".join(seq)
def readFromFile(self, infile, format="fasta"):
"""read multiple alignment from file in various format."""
self.mMali = {}
self.mIdentifiers = []
pattern_parse_ranges = re.compile("(\S+)/(\d+)-(\d+)")
# read profiles - a profile possibly consists of several
# entries per file so treat it differently
if format.lower() == "profile":
while 1:
line = infile.readline()
if not line:
return False
if line[0] != "#":
break
if line[0] != ">":
raise "expected '>' at as first character in line %s" % line
try:
self.mName, length, width = re.match(
">profile=(\S+) length=(\d+) width=(\d+)", line).groups()
except AttributeError:
raise "could not parse header line %s" % line
width = int(width)
for x in range(0, width):
id = "seq%i" % x
self.mIdentifiers.append(id)
line = infile.readline()
if not line:
raise "expected %i sequences, only got %i" % (width, x)
self.mMali[id] = AlignedString(
id, 0, self.countCharacters(line[:-1]), line[:-1])
return True
if isinstance(infile, list) or \
isinstance(infile, tuple):
lines = infile
else:
lines = infile.readlines()
if format not in ("stockholm"):
# save comments
self.mComments = [x for x in lines if x[0] == "#"]
lines = [x for x in lines if x[0] != "#"]
else:
self.mComments = []
# remove empty lines
lines = [x for x in lines if x.strip()]
if not lines:
raise AttributeError("empty alignment")
def getId(id, s):
x = pattern_parse_ranges.match(id)
if x:
id, fr, to = x.groups()
fr, to = int(fr) - 1, int(to)
else:
fr, to = 0, self.countCharacters(s)
self.mWriteRanges = False
return id, fr, to
#######################################################################
if format.lower() == "plain":
for line in lines:
if not line.strip():
continue
data = line[:-1].split("\t")
id = data[3]
xid = id
x = 0
while xid in self.mMali:
xid = id + "-" + str(x)
x += 1
self.addEntry(
AlignedString(xid,
int(data[0]) - 1,
int(data[2]),
data[1]))
#######################################################################
elif format.lower() == "fasta":
pattern_identifier = "\S+"
id = None
fragments = []
for line in lines:
if line[0] == ">":
if id:
s = re.sub("\s", "", string.join(fragments, ""))
id, fr, to = getId(id, s)
self.addEntry(AlignedString(id, fr, to, s))
id = re.search(
"^(%s)" % pattern_identifier, line[1:-1]).group(0)
fragments = []
continue
fragments.append(line[:-1])
s = re.sub("\s", "", string.join(fragments, ""))
id, fr, to = getId(id, s)
self.addEntry(AlignedString(id, fr, to, s))
#######################################################################
elif format.lower() == "phylip":
nsequences, nchars = re.split("\s+", lines[0][:-1].strip())
nsequences = int(nsequences)
for line in lines[1:]:
l = line[:-1].strip()
if not l:
continue
id, sequence = re.match("(\S+)\s+(.*)", l).groups()
sequence = re.sub("\s", "", sequence)
if id not in self.mMali:
self.mIdentifiers.append(id)
self.mMali[id] = []
self.mMali[id].append(sequence)
for id, frags in list(self.mMali.items()):
s = "".join(frags)
fr, to = 0, self.countCharacters(s)
self.mMali[id] = AlignedString(id, fr, to, s)
#######################################################################
elif format.lower() == "clustal":
# skip header line
del lines[0]
fragments = {}
# prune lines
lines = [x.strip() for x in lines]
# remove empty lines
lines = [x for x in lines if len(x[:-1]) > 0]
for line in lines:
# remove consensus lines
if line[0] in ("*", ":"):
continue
data = re.split("\s+", line)
if len(data) != 2:
raise ValueError("parsing error in line %s" % line)
id, fragment = data
if id not in fragments:
fragments[id] = []
self.mIdentifiers.append(id)
fragments[id].append(fragment)
for id, f in list(fragments.items()):
s = re.sub("\s", "", string.join(f, ""))
self.mMali[id] = AlignedString(
id, 0, self.countCharacters(s), s)
elif format.lower() == "stockholm":
# skip header line
assert lines[0].startswith(
"# STOCKHOLM"), "file is not in stockholm format"
del lines[0]
fragments = {}
annotations = {}
# prune lines
lines = [x.strip() for x in lines]
# remove empty lines
lines = [x for x in lines if len(x[:-1]) > 0]
for line in lines:
data = re.split("\s+", line)
if data[0] == "//":
break
if line[0] == '#':
if data[0] == "#=GC":
id, fragment = data[1:3]
else:
self.mComments.append(line)
continue
if id not in annotations:
annotations[id] = []
annotations[id].append(fragment)
else:
if len(data) > 2:
raise ValueError("parsing error in line %s" % line)
elif len(data) == 1:
# treat empty alignments/lines
id = data[0]
fragment = ""
else:
id, fragment = data
if id not in fragments:
fragments[id] = []
self.mIdentifiers.append(id)
fragments[id].append(fragment)
n = []
for id in self.mIdentifiers:
f = fragments[id]
s = re.sub("\s", "", string.join(f, ""))
x = pattern_parse_ranges.match(id)
if x:
id, fr, to = x.groups()
fr, to = int(fr) - 1, int(to)
else:
fr, to = 0, self.countCharacters(s)
n.append(id)
self.mMali[id] = AlignedString(id, fr, to, s)
self.mIdentifiers = n
for id, f in list(annotations.items()):
s = re.sub("\s", "", string.join(f, ""))
annotations[id] = s
self.mAnnotations = annotations
else:
raise "unknown alignment format %s" % format
if len(self.mMali) == 0:
self.mLength = 0
else:
self.mLength = min(
[len(x.mString) for x in list(self.mMali.values())])
def writeToFile(self, outfile, write_ranges=True, format="plain",
options=None):
"""write alignment to file.
If options is given, these lines are output into the multiple
alignment.
"""
if format == "plain-fasta":
format = "fasta"
write_ranges = False
write_ranges = write_ranges and self.mWriteRanges
if format == "plain":
for identifier in self.mIdentifiers:
m = self.mMali[identifier]
outfile.write("%i\t%s\t%i\t%s\n" % (
m.mFrom + 1, m.mString, m.mTo, m.getIdentifier()))
elif format == "fasta":
for identifier in self.mIdentifiers:
m = self.mMali[identifier]
if write_ranges:
outfile.write(
">%s/%i-%i\n%s\n" % (m.getIdentifier(), m.mFrom + 1, m.mTo, m.mString))
else:
outfile.write(">%s\n%s\n" % (identifier, m.mString))
elif format == "stockholm":
outfile.write("# STOCKHOLM 1.0\n")
if options:
for o in options:
outfile.write("%s\n" % o)
# calculate offset:
max_l = 0
for identifier in self.mIdentifiers:
m = self.mMali[identifier]
id = m.getIdentifier()
# tab does not work as separator
if m.mTo and write_ranges:
x = "%s/%i-%i" % (id, m.mFrom + 1, m.mTo)
else:
x = "%s" % (id)
max_l = max(max_l, len(x))
for identifier in list(self.mAnnotations.keys()):
x = "#=GC %s" % identifier
max_l = max(max_l, len(x))
format = "%-" + str(max_l) + "s %s\n"
for identifier in self.mIdentifiers:
m = self.mMali[identifier]
id = m.getIdentifier()
# tab does not work as separator
if m.mTo and write_ranges:
x = "%s/%i-%i" % (id, m.mFrom + 1, m.mTo)
else:
x = "%s" % (id)
outfile.write(format % (x, m.mString))
for identifier, value in list(self.mAnnotations.items()):
x = "#=GC %s" % identifier
outfile.write(format % (x, value))
outfile.write("//\n")
elif format == "phylip":
outfile.write("%i %i\n" % (self.getLength(), self.getWidth()))
for identifier in self.mIdentifiers:
outfile.write("%s %s\n" %
(identifier, self.mMali[identifier].mString))
elif format.lower() == "profile":
if self.mName:
name = self.mName
else:
name = ",".join(self.mIdentifiers)
outfile.write(">profile=%s length=%i width=%i\n" %
(name, self.getWidth(), self.getLength()))
for identifier in self.mIdentifiers:
outfile.write("%s\n" % (self.mMali[identifier].mString))
elif format == "nexus":
# nexus formatted output - MrBayes conformant.
outfile.write("#NEXUS\n")
outfile.write("begin data;\n")
outfile.write(" dimensions ntax=%i nchar=%i;\n" %
(self.getLength(), self.getWidth()))
outfile.write(
" format datatype=dna interleave=no gap=%s;\n" % (self.mGapChar))
outfile.write(" matrix\n")
max_len = max([len(x) for x in self.mIdentifiers])
format = " %-" + str(max_len) + "s %s\n"
for identifier in self.mIdentifiers:
outfile.write(
format % (identifier, self.mMali[identifier].mString))
outfile.write(" ;\n")
outfile.write("end;\n")
else:
raise "unknown alignment format %s" % format
def removeUnalignedEnds(self):
"""remove unaligned ends in the multiple alignment.
unaligned ends correspond to lower-case characters.
"""
pattern_start = re.compile("^([- .a-z]+)")
pattern_unaligned = re.compile("[a-z]")
for s in list(self.mMali.values()):
first = pattern_start.match(s.mString)
if first:
first = first.groups()[0]
nchars = len(pattern_unaligned.findall(first))
s.mFrom += nchars
s.mString = self.mGapChar * len(first) + s.mString[len(first):]
# search from the back end by reversing. This is much faster than
# using $ from the back.
last = pattern_start.match(s.mString[::-1])
if last:
last = last.groups()[0]
nchars = len(pattern_unaligned.findall(last))
s.mTo -= nchars
l = len(s) - len(last)
s.mString = s.mString[:l] + self.mGapChar * l
def upperCase(self):
"""set all characters to upper case."""
for k, s in list(self.mMali.items()):
s.mString = s.mString.upper()
def lowerCase(self):
"""set all characters to lower case."""
for k, s in list(self.mMali.items()):
s.mString = s.mString.lower()
def removeEndGaps(self):
"""remove end gaps.
end gaps do not include any characters and thus
the alignment coordinates won't change.
"""
pattern_start_gaps = re.compile("^([- ]+)")
min_from = self.mLength
max_to = 0
for s in list(self.mMali.values()):
first = pattern_start_gaps.match(s.mString)
if first:
first = first.groups()[0]
min_from = min(min_from, len(first))
# search from the back end by reversing. This is much faster than
# using $ from the back.
last = pattern_start_gaps.search(s.mString[::-1])
if last:
last = last.groups()[0]
max_to = max(max_to, len(s) - len(last))
for s in list(self.mMali.values()):
s.mString = s.mString[min_from:max_to]
self.mLength = min([x.mString for x in list(self.mMali.values())])
def insertColumns(self, position, num_gaps, keep_fixed=None, char="-"):
"""insert gaps at position into multiple alignment.
if keep_constant is a list of identifiers, those are kept constant,
instead, gaps are added to the end.
"""
last_pos = min(self.getWidth(), position + num_gaps)
for id, seq in list(self.mMali.items()):
if keep_fixed and id in keep_fixed:
seq.insertColumns(last_pos, num_gaps, char)
else:
seq.insertColumns(position, num_gaps, char)
def removeGaps(self,
allowed_gaps=0,
minimum_gaps=1,
frame=1):
"""remove gappy columns.
allowed_gaps: number of gaps allowed for column to be kept
minimum_gaps: number of gaps for column to be removed
set minimum_gaps to the number of sequences to remove columns
with all gaps.
If frame is > 1 (3 most likely), then a whole codon is removed
as soon as there is one column to be removed.
"""
self.removePattern(
match_function=lambda x: x in self.mGapChars,
allowed_matches=allowed_gaps,
minimum_matches=minimum_gaps,
delete_frame=frame)
def removePattern(self,
match_function,
allowed_matches=0,
minimum_matches=1,
delete_frame=1,
search_frame=1):
"""remove columns (or group of columns), that match a certain pattern.
allowed_matches: number of matches allowed so that column is still kept
minimum_matches: number of matches required for column to be removed
set minimum_matches to the number of sequences to remove columns
with all gaps.
Patterns are matches in search_frame. For example, if frame is
3, whole codons are supplied to match_function.
delete_frame specifies the frame for deletion. If it is set to 3,
codons are removed if already one column matches.
Example: remove all columns that contain at least one stop-codon:
removePattern( lambda x: x.upper() in ("TAG", "TAA", "TGA"),
allowed_matches = 0,
minimum_matches = 1,
search_frame = 3,
delete_frame = 3)
"""
nmatches = [0] * self.getWidth()
for s in [x.mString for x in list(self.mMali.values())]:
for x in range(0, len(s), search_frame):
segment = s[x:x + search_frame]
if match_function(segment):
nmatches[x] += 1
columns = []
delete_columns = []
for x in range(len(nmatches)):
if nmatches[x] >= allowed_matches and nmatches[x] < minimum_matches:
columns.append(x)
else:
delete_columns.append(x)
if delete_frame != 1:
s = set(columns)
for x in delete_columns:
start = int(math.floor(float(x) / delete_frame) * delete_frame)
end = start + delete_frame
for c in range(start, end):
if c in s:
s.remove(c)
columns = list(s)
columns.sort()
self.takeColumns(columns)
def removeEmptySequences(self):
"""remove sequences that are completely empty.
"""
new_ids = []
for id in self.mIdentifiers:
if self.countCharacters(self.mMali[id].mString) == 0:
del self.mMali[id]
continue
new_ids.append(id)
self.mIdentifiers = new_ids
def upper(self):
"""convert all characters in mali to uppercase."""
for s in list(self.mMali.values()):
s.mString = s.mString.upper()
def lower(self):
"""convert all characters in mali to lowercase."""
for s in list(self.mMali.values()):
s.mString = s.mString.lower()
def shiftAlignment(self, map_id2offset):
"""shift alignment by offset."""
for identifier, m in list(self.mMali.items()):
if identifier in map_id2offset:
o = map_id2offset[identifier]
m.mFrom += o
m.mTo += o
def markCodons(self, mode="case"):
"""mark codons.
"""
for identifier, m in list(self.mMali.items()):
s = m.mString
if len(s) % 3 != 0:
raise "sequence %s not divisible by 3" % (m.mId)
is_upper = True
sequence = []
for x in range(0, len(s), 3):
if is_upper:
sequence.append(s[x:x + 3].upper())
is_upper = False
else:
sequence.append(s[x:x + 3].lower())
is_upper = True
m.mString = "".join(sequence)
def markTransitions(self, map_id2transitions, mode="case"):
"""mark transitions in the multiple alignment.
if mode == case, then upper/lower case is used for the transitions
Otherwise, a character given by mode is inserted.
"""
if mode in ("case", "keep-odd", "keep-even"):
# check, if the whole alignment needs to be masked/marked:
if "mali" in map_id2transitions:
transitions = map_id2transitions["mali"]
for identifier, s in list(self.mMali.items()):
new_chars = []
is_upper = True
is_first = False
for c in range(len(s)):
if c in transitions:
is_first = True
if is_upper:
is_upper = False
else:
is_upper = True
x = s.mString[c]
if mode == "case":
if x in string.lowercase:
x = self.mMaskChar
if is_upper:
x = string.upper(x)
else:
x = string.lower(x)
elif mode == "keep-even":
if is_upper:
x = self.mGapChar
elif mode == "keep-odd":
if not is_upper:
x = self.mGapChar
new_chars.append(x)
s.mString = "".join(new_chars)
# now do individual sequences
for identifier, s in list(self.mMali.items()):
if identifier not in map_id2transitions:
continue
new_chars = []
c = s.mFrom
is_upper = True
transitions = map_id2transitions[identifier]
for x in s.mString:
if x in self.mGapChars:
pass
else:
if c in map_id2transitions[identifier]:
if is_upper:
is_upper = False
else:
is_upper = True
c += 1
if x in string.lowercase:
x = self.mMaskChar
if mode == "case":
if is_upper:
x = string.upper(x)
else:
x = string.lower(x)
elif mode == "keep-even":
if is_upper:
x = self.mGapChar
elif mode == "keep-odd":
if not is_upper:
x = self.mGapChar
new_chars.append(x)
s.mString = "".join(new_chars)
else:
raise ValueError("character insertion not implemented yet")
def buildColumnMap(self, other, join_field=None):
"""build map of columns in other to this."""
if not join_field:
join_field = other.mIdentifiers[0]
if join_field not in other.mMali or \
join_field not in self.mMali:
raise "line %s not in both alignments." % (join_field)
this_seq = self.mMali[join_field]
other_seq = other.mMali[join_field]
if this_seq.mFrom != other_seq.mFrom or \
this_seq.mTo != other_seq.mTo:
raise ValueError("residue ranges for sequence %s doe not correspond." % (
join_field))
map_this2other = []
this_seq = this_seq.mString.upper()
other_seq = other_seq.mString.upper()
# position in other
o = 0
for c in this_seq:
if c in self.mGapChars:
map_this2other.append(None)
else:
while other_seq[o] != c:
o += 1
map_this2other.append(o)
o += 1
return map_this2other
def shuffle(self, frame=1):
"""shuffle multiple alignment.
The frame determines the block size for shuffling. Use 3 for codons in
a multiple alignment without frame-shifts.
"""
columns = list(range(self.getNumColumns() // frame))
random.shuffle(columns)
if frame > 1:
cc = []
for x in columns:
cc += [x * frame + y for y in range(frame)]
columns = cc
self.takeColumns(columns)
def propagateMasks(self, min_chars=1, mask_char="x"):
"""propagate masked characters to all rows of a multiple alignment
within a column.
If there is at least min_chars in a mali column, that are masks,
propagate the masks to all other rows.
"""
masks_per_column = {}
for identifier, s in list(self.mMali.items()):
r = s.mString.lower()
for x in range(len(r)):
if r[x] == mask_char:
if x not in masks_per_column:
masks_per_column[x] = 0
masks_per_column[x] += 1
columns_to_mask = []
for c, n in list(masks_per_column.items()):
if n >= min_chars:
columns_to_mask.append(c)
columns_to_mask.sort()
self.maskColumns(columns_to_mask, mask_char=mask_char)
def propagateTransitions(self, min_chars=1):
"""propagate lower case in a column to all residues.
"""
columns_to_change = set()
for identifier, s in list(self.mMali.items()):
r = s.mString
for x in range(len(r)):
if r[x] in string.lowercase:
columns_to_change.add(x)
columns_to_change = list(columns_to_change)
columns_to_change.sort()
self.mapColumns(columns_to_change, string.lower)
def takeColumns(self, columns):
"""restrict alignments to certain columns."""
for identifier, s in list(self.mMali.items()):
s.takeColumns(columns)
for key, anno in list(self.mAnnotations.items()):
self.mAnnotations[key] = "".join([anno[c] for c in columns])
def maskColumns(self, columns, mask_char="x"):
"""mask columns in a multiple alignment."""
for identifier, s in list(self.mMali.items()):
s.maskColumns(columns, mask_char=mask_char)
def mapColumns(self, columns, map_function):
"""apply map_function to all residues in columns."""
for identifier, s in list(self.mMali.items()):
s.mapColumns(columns, map_function)
def recount(self, reset_first=False):
"""recount residue in alignments."""
for id, seq in list(self.mMali.items()):
if reset_first:
seq.mFrom = 0
seq.mTo = seq.mFrom + self.countCharacters(seq.mString)
def maskColumn(self, column, mask_char="x"):
"""mask a column."""
for identifier, s in list(self.mMali.items()):
s.maskColumn(column, mask_char)
def copyAnnotations(self, other):
"""copy annotations from annother mali."""
map_this2other = self.buildColumnMap(other)
ncols = self.getWidth()
for key, annotation in list(other.mAnnotations.items()):
a = []
for x in range(ncols):
m = map_this2other[x]
if m is not None:
a.append(annotation[m])
else:
a.append(self.mGapChar)
self.mAnnotations[key] = "".join(a)
def getAnnotation(self, key):
"""return annotation associated with key."""
return self.mAnnotations[key]
def setAnnotation(self, key, value):
"""set annotation associated with key to value."""
self.mAnnotations[key] = value
def addAnnotation(self, key, annotation):
"""add annotation."""
xkey = key
n = 0
while xkey in self.mAnnotations:
xkey = "%s_%i" % (key, n)
self.mAnnotations[xkey] = re.sub("\s", "", annotation)
def truncate(self, first, last):
"""truncate alignment within range."""
for key, value in list(self.mMali.items()):
value.truncate(first, last)
def mapIdentifiers(self, map_old2new=None, pattern_identifier="ID%06i"):
"""map identifiers in multiple aligment.
if map_old2new is not given, a new map is created (map_new2old)
"""
new_identifiers = []
if map_old2new is not None:
for id in self.mIdentifiers:
new_id = map_old2new[id]
new_identifiers.append(new_id)
entry = self.mMali[id]
del self.mMali[id]
# entry.mId = new_id
self.mMali[new_id] = entry
else:
map_old2new = {}
n = 1
for id in self.mIdentifiers:
new_id = pattern_identifier % n
new_identifiers.append(new_id)
entry = self.mMali[id]
del self.mMali[id]
# entry.mId = new_id
self.mMali[new_id] = entry
map_old2new[new_id] = id
n += 1
self.mIdentifiers = new_identifiers
return map_old2new
def getAlphabet(self):
"""get alphabet from the multiple alignment.
Alphabet is "na", if more than 90% of characaters are "actgxn",
otherwise it is "aa".
"""
s = "".join([x.mString for x in list(self.values())]).lower()
s = re.sub("[%s]" % "".join(self.mGapChars), "", s)
ss = re.sub("[acgtxn]", "", s)
if float(len(ss)) < (len(s) * 0.1):
return "na"
else:
return "aa"
def checkLength(self):
"""check lengths of aligned strings.
Return false if they are inconsistent."""
l = None
for s in list(self.mMali.values()):
if l is None:
l = len(s.mString)
else:
if l != len(s.mString):
return False
return True
def clipByAnnotation(self, key, chars=""):
"""restrict alignment to positions where annotation identified by key
in chars.
if chars is empty, nothing is clipped.
"""
if key not in self.mAnnotations:
return
if chars == "":
return
if self.getNumColumns() == 0:
return
anno = self.mAnnotations[key]
columns = []
for x in range(len(anno)):
if anno[x] in chars:
columns.append(x)
self.takeColumns(columns)
def apply(self, f):
"""apply function f to every row in the multiple alignment."""
for s in list(self.mMali.values()):
f(s)
def filter(self, f):
'''filter multiple alignment using function *f*.
The function *f* should return True for entries that should be
kept and False for those that should be removed.
'''
ids, vals = [], []
for id in self.mIdentifiers:
s = self.mMali[id]
if f(s):
vals.append(s)
ids.append(id)
self.mIdentifiers = ids
self.mMali = vals
class SequenceCollection(Mali):
"""reads in a sequence collection, but permits several entries per id.
Note that this might cause problems with interleaved formats like
phylips or clustal.
This mapping is achieved by appending a numeric suffix. The suffix
is retained for the life-time of the object, but not output to a
file.
"""
def __init__(self):
Mali.__init__(self)
def addEntry(self, s):
"""add an aligned string object."""
id = s.mId
if id in list(self.mMali.keys()):
x = 1
while "%s_%i" % (id, x) in list(self.mMali.keys()):
x += 1
id = "%s_%i" % (id, x)
self.mIdentifiers.append(id)
s.mIsMangled = True
self.mMali[id] = s
def readFromFile(self, infile, format="fasta"):
"""read multiple alignment from file in various format."""
self.mMali = {}
self.mIdentifiers = []
pattern_parse_ranges = re.compile("(\S+)/(\d+)-(\d+)")
if isinstance(infile, list) or \
isinstance(infile, tuple):
lines = infile
else:
lines = infile.readlines()
if format not in ("stockholm"):
# save comments
self.mComments = [x for x in lines if x[0] == "#"]
lines = [x for x in lines if x[0] != "#"]
else:
self.mComments = []
def getId(id, s):
x = pattern_parse_ranges.match(id)
if x:
id, fr, to = x.groups()
fr, to = int(fr) - 1, int(to)
else:
fr, to = 0, self.countCharacters(s)
self.mWriteRanges = False
return id, fr, to
#######################################################################
if format.lower() == "plain":
for line in lines:
if not line.strip():
continue
data = line[:-1].split("\t")
id = data[3]
xid = id
x = 0
while xid in self.mMali:
xid = id + "-" + str(x)
x += 1
self.addEntry(
AlignedString(xid,
int(data[0]) - 1,
int(data[2]),
data[1]))
#######################################################################
elif format.lower() == "fasta":
pattern_identifier = "\S+"
id = None
fragments = []
for line in lines:
if line[0] == ">":
if id:
s = re.sub("\s", "", string.join(fragments, ""))
id, fr, to = getId(id, s)
self.addEntry(AlignedString(id, fr, to, s))
id = re.search(
"^(%s)" % pattern_identifier, line[1:-1]).group(0)
fragments = []
continue
fragments.append(line[:-1])
s = re.sub("\s", "", string.join(fragments, ""))
id, fr, to = getId(id, s)
self.addEntry(AlignedString(id, fr, to, s))
#######################################################################
elif format.lower() == "phylip":
raise ValueError("phylip not implemented")
#######################################################################
elif format.lower() == "clustal":
raise ValueError("clustal not implemented")
elif format.lower() == "stockholm":
raise ValueError("stockholm not implemented")
if len(self.mMali) == 0:
self.mLength = 0
else:
self.mLength = min(
[len(x.mString) for x in list(self.mMali.values())])
def convertMali2Alignlib(mali):
'''convert a multiple alignment of type :class:`Mali`
into an alignlib_lite.py_multiple alignment object.
'''
import alignlib_lite
m = alignlib_lite.py_makeMultipleAlignment()
for identifier in mali.getIdentifiers():
a = alignlib_lite.py_makeAlignatum(mali[identifier])
m.add(a)
return m
def convertAlignlib2Mali(mali, identifiers=None, seqs=None):
"""convert a multiple alignment into an alignlib_lite.py_multiple
alignment object."""
m = Mali()
if not identifiers:
identifiers = ["%i" % x for x in range(mali.getNumSequences())]
if seqs is None:
# old style MultipleAlignment
for x in range(mali.getNumSequences()):
a = mali.getRow(x)
m.addSequence(
identifiers[x], a.getFrom(), a.getTo(), a.getString())
else:
import alignlib_lite
output = alignlib_lite.py_MultAlignmentFormatPlain(mali, seqs)
for x in range(mali.getNumSequences()):
a = output.mData[x]
m.addSequence(
identifiers[x], a.getFrom(), a.getTo(), a.getString())
return m
| [
"[email protected]"
] | |
215d0564daeceb18cdbfe7df3305df4cf9aaddc4 | ddea930392ac5360b21e9043b620e703a9ccb31c | /tfx/components/example_gen/csv_example_gen/component.py | e98fab352364bc59a5a175075c9b90dce53af5c7 | [
"Apache-2.0"
] | permissive | Ark-kun/tfx | 9c82b688776c80b2435bbb6154476526e8525ec8 | f685f0387bd145316f43ceb484e64f893e749dcb | refs/heads/master | 2021-07-25T05:58:15.168607 | 2020-05-22T01:07:44 | 2020-05-22T01:08:18 | 180,868,735 | 0 | 0 | Apache-2.0 | 2019-04-11T20:01:57 | 2019-04-11T20:01:57 | null | UTF-8 | Python | false | false | 3,690 | py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX CsvExampleGen component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, Optional, Text, Union
from tfx import types
from tfx.components.base import executor_spec
from tfx.components.example_gen import component
from tfx.components.example_gen.csv_example_gen import executor
from tfx.proto import example_gen_pb2
class CsvExampleGen(component.FileBasedExampleGen): # pylint: disable=protected-access
"""Official TFX CsvExampleGen component.
The csv examplegen component takes csv data, and generates train
and eval examples for downsteam components.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
input: types.Channel = None, # pylint: disable=redefined-builtin
input_config: Optional[Union[example_gen_pb2.Input, Dict[Text,
Any]]] = None,
output_config: Optional[Union[example_gen_pb2.Output, Dict[Text,
Any]]] = None,
example_artifacts: Optional[types.Channel] = None,
input_base: Optional[types.Channel] = None,
instance_name: Optional[Text] = None,
enable_cache: Optional[bool] = None):
"""Construct a CsvExampleGen component.
Args:
input: A Channel of type `standard_artifacts.ExternalArtifact`, which
includes one artifact whose uri is an external directory containing csv
files (required).
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input_base will be treated as a
single split. If any field is provided as a RuntimeParameter,
input_config should be constructed as a dict with the same field names
as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as a RuntimeParameter,
output_config should be constructed as a dict with the same field names
as Output proto message.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
input_base: Backwards compatibility alias for the 'input' argument.
instance_name: Optional unique instance name. Necessary if multiple
CsvExampleGen components are declared in the same pipeline.
enable_cache: Optional boolean to indicate if cache is enabled for the
CsvExampleGen component. If not specified, defaults to the value
specified for pipeline's enable_cache parameter.
"""
super(CsvExampleGen, self).__init__(
input=input,
input_config=input_config,
output_config=output_config,
example_artifacts=example_artifacts,
input_base=input_base,
instance_name=instance_name,
enable_cache=enable_cache)
| [
"[email protected]"
] | |
5e8efd9eb59f40d86c42c63a6d9310545e0a1134 | 51f2492a5c207e3664de8f6b2d54bb93e313ca63 | /atcoder/abc102/b.py | 4be8dec8f9d480d7b0af81ef662a21f1f1ef5c4f | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | abeaumont/competitive-programming | 23c5aabd587d7bb15a61efd3428838cb934233dd | a24c9b89941a59d344b51dc1010de66522b1a0dd | refs/heads/master | 2023-09-01T09:50:58.267361 | 2023-07-31T18:00:10 | 2023-07-31T18:00:10 | 117,589,708 | 618 | 262 | WTFPL | 2023-07-12T17:36:20 | 2018-01-15T20:00:56 | C++ | UTF-8 | Python | false | false | 158 | py | #!/usr/bin/env python3
# https://abc102.contest.atcoder.jp/tasks/abc102_b
n = int(input())
a = [int(x) for x in input().split()]
a.sort()
print(a[-1] - a[0])
| [
"[email protected]"
] | |
d22f9e180410bcb47f4308eb442280a1d6958408 | b3197b795911a2ebdd3308f39d0e7be4b4626a44 | /homework.4/4.task1.py | a81fcfb717c8ca4018adb4ef5c82f3125501d029 | [] | no_license | VPetrashchu/python-course | 9e2af9582f1600201c6f28681ead7426501a82b6 | d188c3f42f7fd70aad1535e0141e7ff5fddd1d8f | refs/heads/master | 2023-02-23T09:41:56.079047 | 2021-01-31T20:12:08 | 2021-01-31T20:12:08 | 317,589,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | number = int(input('Enter number: '))
fact = 1
for i in range(1, number + 1):
fact = fact * i
print('Factorial is: {}'.format(fact))
# while ( number > 0):
# fact = fact * number
# number = number - 1
# print('Factorial is: {}'.format(fact))
| [
"="
] | = |
97d915750244d1397fea6975d202218d1ad853f4 | 29f4de72b9aadaba277b4adb5e5cee5d8dd71f1e | /projection_data/make_ai.py | 49ce8d7363987cb88c00c23533048229bdb00207 | [] | no_license | fgassert/aqueduct_atlas | 87be4e1fbe9686cf06ff9c65257deabc617344e9 | d00cd78ef3122aeda6eb563d0913baf73a9bb80e | refs/heads/master | 2021-01-21T21:48:26.821562 | 2016-04-21T22:02:58 | 2016-04-21T22:02:58 | 15,684,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,658 | py |
import arcpy as ap
AI_RES = 600
def render_fields(prefix, polygons, mxd_template, layer_template, map_fields, map_layer_labels, map_values, ai_res=AI_RES):
mxd = ap.mapping.MapDocument(mxd_template)
df = ap.mapping.ListDataFrames(mxd)[0]
if map_layer_labels is None:
map_layer_labels = map_fields
if len(map_fields)!=len(map_layer_labels):
print "ERROR: Labels != fields"
if mxd.relativePaths == False:
print "ERROR: RelativePaths == False"
if type(map_fields) == str:
map_fields = [map_fields]
grpLyr = ap.mapping.ListLayers(mxd,"indicators")[0]
for i in range(len(map_fields)):
print "dissolving %s" % map_fields[i]
dissolved = "dis_%s_%s" % (map_layer_labels[i])
if not ap.Exists(dissolved):
ap.Dissolve_management(polygons,dissolved,map_fields[i])
else:
print "%s exists, skipping" % dissolved
lyr = ap.mapping.Layer(layer_template)
lyr.name = map_layer_labels[i]
lyr.replaceDataSource(WORKSPACE, "FILEGDB_WORKSPACE", dissolved, True)
lyr.symbology.valueField = map_fields[i]
lyr.symbology.classValues = map_values[map_fields[i]]
if grpLyr.isGroupLayer:
ap.mapping.AddLayerToGroup(df,grpLyr,lyr)
else:
ap.mapping.AddLayer(df, lyr)
outfile = "bin/%s%s_%s.ai"%(prefix, map_layer_labels[i])
print "exporting %s" % outfile
ap.mapping.ExportToAI(mxd, outfile,resolution=ai_res)
ap.mapping.RemoveLayer(df, ap.mapping.ListLayers(mxd,lyr.name)[0])
| [
"[email protected]"
] | |
a7c2ff64776197033f6935ebd084216784ca1b4f | 1521d32e3a2747054eea03df3195ca0fd52cfe71 | /src/python/zensols/garmdown/__init__.py | 1ed5dd6e2c05b14c7469f2b9a750d47327855ca1 | [
"MIT"
] | permissive | garmin-data/garmdown | 533c525512914b97cbf42a919d670feb59c3269a | 42509ddcc11bd7469e3a80d648fabd155657a074 | refs/heads/master | 2023-07-05T09:07:36.476348 | 2022-02-28T19:36:01 | 2022-02-28T19:36:01 | 191,933,069 | 15 | 6 | MIT | 2023-02-15T23:23:44 | 2019-06-14T11:37:45 | Python | UTF-8 | Python | false | false | 212 | py | from .domain import *
from .fetcher import *
from .persist import Persister
from .sheets import SheetUpdater
from .backup import *
from .reporter import *
from .mng import *
from .cli import *
from .app import *
| [
"[email protected]"
] | |
4442d3be186c0780c78d903f7110f0e29096dfb6 | 8cbf335c5a39f2bbf1912b937ea4c3a31ab76f53 | /kakuro.py | 3869cab2399a04e10f9778aee78dd0fa41a9b26b | [] | no_license | louisabraham/kakuro.py | e72e5a0dd4d1fc8b43bb8b1004ce7b46e5bf88bf | 28ab8e5b066773a0f27f9eff6629391d21b167fc | refs/heads/master | 2023-08-13T12:28:18.538669 | 2021-10-14T21:28:19 | 2021-10-14T21:28:19 | 417,281,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,969 | py | from functools import lru_cache, partial
from collections import defaultdict
from set_cover import solve as solve_set_cover
def encode(pattern, cols, lines):
grid = [[c == "0" for c in line] for line in pattern.split()]
n = len(grid)
constraints = []
# build constraints on lines
vars = []
cur = 0
for i in range(n):
for j in range(n):
if grid[i][j]:
vars.append((i, j))
if (j == n - 1 or not grid[i][j]) and vars:
constraints.append((lines[cur], vars))
cur += 1
vars = []
# build constraints on columns
vars = []
cur = 0
for j in range(n):
for i in range(n):
if grid[i][j]:
vars.append((i, j))
if (i == n - 1 or not grid[i][j]) and vars:
constraints.append((cols[cur], vars))
cur += 1
vars = []
# map variables to constraints
var_to_cons = defaultdict(list)
for c, (_, vars) in enumerate(constraints):
for var in vars:
var_to_cons[var].append(c)
Y = {}
for i in range(n):
for j in range(n):
if not grid[i][j]:
continue
for x in range(1, 10):
# each cell has exactly one value
Y[i, j, x] = [("pos", i, j)]
for c in var_to_cons[i, j]:
# each value can be used at most once
Y[i, j, x].append(("con", c, x))
# add the "complement" values
for c, (tot, vars) in enumerate(constraints):
for t in decomp(45 - tot, 9 - len(vars)):
Y[c, t] = [("con", c)]
for x in t:
Y[c, t].append(("con", c, x))
# build X from Y
X = defaultdict(set)
for y, l in Y.items():
for x in l:
X[x].add(y)
return n, X, Y
@lru_cache(None)
def decomp(n, k, mini=1):
if n < mini:
return []
if k == 1:
return [(n,)] if n < 10 else []
ans = []
for x in range(mini, 10):
for t in decomp(n - x, k - 1, mini=x + 1):
ans.append((x,) + t)
return ans
def pp_sol(n, sol):
grid = [[0 for _ in range(n)] for _ in range(n)]
for x in sol:
if len(x) == 3:
i, j, x = x
grid[i][j] = x
return "\n".join("".join(str(x) for x in line) for line in grid)
def solve(pattern, cols, lines):
n, X, Y = encode(pattern, cols, lines)
yield from map(partial(pp_sol, n), solve_set_cover(X, Y))
if __name__ == "__main__":
pattern = """
0000X000
00000000
00000000
X0000000
0000000X
00000000
00000000
000X0000
"""
cols = [10, 13, 38, 39, 31, 28, 36, 39, 12, 10]
lines = [14, 8, 38, 36, 35, 35, 37, 36, 6, 11]
print(next(solve(pattern, cols, lines)))
| [
"[email protected]"
] | |
7788d6d2554c64b729e9701d0fe4596e17cccfe8 | 5f22ddbd3eeb99709e43e7b9a7958c9987c7efa4 | /__competitions/2014/11_03_w12/01.py | d50b3c914cc79f47bca1e6cd9529281c8b5f817c | [] | no_license | salvador-dali/algorithms_general | 04950bd823fc354adc58a4f23b7d2f3d39664798 | aeee3356e2488c6fab08741b1ac26e8bd5e4ac0d | refs/heads/master | 2020-12-14T06:24:10.466601 | 2016-07-17T06:00:17 | 2016-07-17T06:00:17 | 47,397,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | # https://www.hackerrank.com/contests/w12/challenges/priyanka-and-toys
# sort numbers and use greedy solution to find which one overlap
def toBuy(arr):
arr.sort()
num, maxPrice = 0, -1
for i in arr:
if i > maxPrice:
num += 1
maxPrice = i + 4
return num
input()
print toBuy(list(map(int, raw_input().split())))
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.