blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9fc53d2f86b48376293a5a058d7bc51c042f9954 | 432a8d6bc8ad5af9cb5585c2184b05f58e842285 | /realestate/utils/lib.py | 4d27c004e2ffb49fcfc1a66ea242da7599daace1 | [] | no_license | 314casso/estate-agent | 963e2a909ac9b190253d8ee40a69947cf19b1261 | ccd07bd599dc51251523cf5e4ea6991b1d0d529d | refs/heads/master | 2022-03-21T04:37:44.946548 | 2022-03-15T19:29:06 | 2022-03-15T19:29:06 | 4,037,752 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py | # -*- coding: utf-8 -*-
from estatebase.models import LayoutFeature
_capital_letters = {
u'А': u'A',
u'Б': u'B',
u'В': u'V',
u'Г': u'G',
u'Д': u'D',
u'Е': u'E',
u'Ё': u'E',
u'Ж': u'Zh',
u'З': u'Z',
u'И': u'I',
u'Й': u'Y',
u'К': u'K',
u'Л': u'L',
u'М': u'M',
u'Н': u'N',
u'О': u'O',
u'П': u'P',
u'Р': u'R',
u'С': u'S',
u'Т': u'T',
u'У': u'U',
u'Ф': u'F',
u'Х': u'H',
u'Ц': u'Ts',
u'Ч': u'Ch',
u'Ш': u'Sh',
u'Щ': u'Sch',
u'Ъ': u'',
u'Ы': u'Y',
u'Ь': u'',
u'Э': u'E',
u'Ю': u'Yu',
u'Я': u'Ya',}
_lower_case_letters = {
u'а': u'a',
u'б': u'b',
u'в': u'v',
u'г': u'g',
u'д': u'd',
u'е': u'e',
u'ё': u'e',
u'ж': u'zh',
u'з': u'z',
u'и': u'i',
u'й': u'y',
u'к': u'k',
u'л': u'l',
u'м': u'm',
u'н': u'n',
u'о': u'o',
u'п': u'p',
u'р': u'r',
u'с': u's',
u'т': u't',
u'у': u'u',
u'ф': u'f',
u'х': u'h',
u'ц': u'ts',
u'ч': u'ch',
u'ш': u'sh',
u'щ': u'sch',
u'ъ': u'',
u'ы': u'y',
u'ь': u'',
u'э': u'e',
u'ю': u'yu',
u'я': u'ya',}
def transliterate(string):
capital_letters = _capital_letters
lower_case_letters = _lower_case_letters
len_str = len(string)
translit_string = u""
for index, char in enumerate(string, 1):
repl = lower_case_letters.get(char)
if repl:
translit_string += repl
continue
repl = capital_letters.get(char)
if repl:
if len_str > index:
if string[index] not in lower_case_letters:
repl = repl.upper()
else:
repl = repl.upper()
else:
repl = ''
translit_string += repl
return translit_string
for st in LayoutFeature.objects.all().order_by('name'):
result = '%s = %s' % (transliterate(st.name), st.pk)
print result.upper()
# lst = list(WallConstrucion.objects.values_list('name', flat=True).order_by('id'))
# print ', '.join(lst) | [
"[email protected]"
] | |
7f5db971fd478bb2ddf727c5947c78c2ad8f595f | ef50ddb13bc1e21e0feb7ccef228d7593a67924a | /vize/130401064.py | cbb6d848233adc660af1fb03870ab2d065225646 | [
"Unlicense"
] | permissive | nyucel/blm2010 | 8577ffda17312b41545ad4b9e2fef10b99bd3d8e | 544df2b5c946fba1864c4c6c3a6e349d0f10d18e | refs/heads/master | 2022-11-16T16:12:50.811339 | 2020-06-27T11:10:55 | 2020-06-27T11:10:55 | 259,298,537 | 3 | 155 | Unlicense | 2020-06-27T11:10:57 | 2020-04-27T11:49:35 | Python | UTF-8 | Python | false | false | 2,622 | py | # Süleyman Baltacı - 130401064
# -*- coding: utf-8 -*-
import numpy as np
def RMSE(pred, target):
err = np.subtract(target, pred)
return (np.mean(err**2))**0.5
# veri dosyasi acilir
f = open("veriler.txt")
# veriler okunur, varsa bos satirlar silinir
data = f.readlines()
if "\n" in data: data.remove("\n")
# veriler numpy array seklinde y'ye kaydedilir, x 0'dan baslatilir
y = np.array(data, dtype=int)
x = np.array([i for i in range(len(y))], dtype=int)
# sonuc dosyasi acilir
f_sonuc = open("sonuclar.txt","w+")
f_sonuc.write("Tum veri uzerine tek bir polinom tanimlandiginda:\n\n")
## Tum veri uzerine tek bir polinom fit edilginde:
RMSE_list = [0]*6
for i in range(6):
# ip : interpolasyon fonksiyonu
poly = np.poly1d(np.polyfit(x, y, i+1))
f_sonuc.write(f"Polinom derecesi: {i+1} \n")
f_sonuc.write(f"Katsayilar: {poly.coeffs} \n")
# RMSE hesaplanir
RMSE_list[i] = RMSE(poly(x), y)
f_sonuc.write(f"RMSE: {RMSE_list[i]:.3f} \n\n")
# en iyi sonucu veren polinomun derecesi bulunur, RMSE ile birlikte yazdirilir
eniyi_derece = np.argmin(RMSE_list)+1
f_sonuc.write(f"En dusuk hatayi {eniyi_derece}. dereceden polinom vermektedir.\n")
f_sonuc.write(f"RMSE: {RMSE_list[eniyi_derece-1]:.3f} \n\n\n")
## veri onluk kisimlara bolunerek her birine polinom fit edildiginde:
f_sonuc.write("Her bir onluk icin farkli polinomlar bulundugunda:\n\n")
# kac farkli polinom gerektigi hesaplanir:
onluk_sayisi = int((len(x)/10)) + 1
for i in range(onluk_sayisi):
# polinom fit edilecek aralik icin indexler bulunur, x ve y datasi secilir:
i_min = i*10
i_max = min(i*10+9, len(x)-1)
x_curr = x[i_min:i_max+1:]
y_curr = y[i_min:i_max+1:]
# her bir dereceden polinomlarin ve RMSE'lerinin tutulacagi listler tanimlanir
poly_lst =[]
RMSE_list = []
# polinom fit edilecek aralik eger 7'den kucuk veri iceriyorsa,
# en fazla (bu araliktaki nokta sayisi) - 1 dereceli polinom denenir
for j in range(min(i_max-i_min, 6)):
# poly_lst listesine j dereceli polinom fit edilir, RMSE hesaplanir
poly_lst.append(np.poly1d(np.polyfit(x_curr, y_curr, j+1)))
RMSE_list.append(RMSE(poly_lst[j](x_curr), y_curr))
# en iyi sonucu veren polinom derecesi bulunur ve sonuc yazdirilir
eniyi_derece = np.argmin(RMSE_list) + 1
f_sonuc.write(f"x : [ {x[i_min]} {x[i_max]} ]\n")
f_sonuc.write(f"Polinom derecesi: {eniyi_derece}, ")
f_sonuc.write(f"RMSE: {RMSE_list[eniyi_derece-1]:.3f} \n\n")
f_sonuc.close()
f.close()
| [
"[email protected]"
] | |
628f41c51f58945e9f1e879863efe51d95e39ea8 | cdf38bcd5f8a1f383a6c3b7d427382c3b83d4831 | /users/schema.py | e49405ee50228175e5c6b66cdf5d7415c987d34b | [] | no_license | profmcdan/real-estate-api | 95725947f8893fbdf37156e2dc0055cd64e4f75e | 44fe874151844139dc1f912128534565a6cfc029 | refs/heads/master | 2023-08-09T07:15:25.400049 | 2019-10-19T20:05:44 | 2019-10-19T20:05:44 | 216,026,644 | 0 | 0 | null | 2023-07-22T19:08:33 | 2019-10-18T13:09:48 | Python | UTF-8 | Python | false | false | 1,247 | py | from django.contrib.auth import get_user_model
import graphene
from graphene_django import DjangoObjectType
from graphql import GraphQLError
class UserType(DjangoObjectType):
class Meta:
model = get_user_model()
class CreateUser(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
email = graphene.String(required=True)
password = graphene.String(required=True)
firstname = graphene.String(required=True)
lastname = graphene.String(required=True)
def mutate(self, info, email, firstname, lastname, password):
user = get_user_model().objects.create_user(email=email, password=password,
firstname=firstname, lastname=lastname)
return CreateUser(user=user)
class Mutation(graphene.ObjectType):
create_user = CreateUser.Field()
class Query(graphene.ObjectType):
users = graphene.List(UserType)
me = graphene.Field(UserType)
def resolve_users(self, info):
return get_user_model().objects.all()
def resolve_me(self, info):
user = info.context.user or None
if user.is_anonymous:
raise GraphQLError('Authentication required')
return user
| [
"[email protected]"
] | |
bd0ff58733337b54a62c497740f531e4ad1eb6c9 | e972e39c4580ce0099eb905c1922b501dce54901 | /update/manager.py | 425234f3f256bc175b8da462919582e9e8e6f79b | [
"Apache-2.0"
] | permissive | y-du/module-update-service | cf8070e1fa842ea7767fb8315b2bb1bd377ff623 | 34de1474ab26cd8f590d93809ddf1376a62d8b49 | refs/heads/master | 2022-09-12T09:21:12.200249 | 2020-05-25T18:46:26 | 2020-05-25T18:46:26 | 266,853,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,985 | py | """
Copyright 2020 Yann Dumont
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ("Manager", )
from .logger import getLogger
from .configuration import mu_conf, EnvVars
from .util import ModuleState, getDelay
import requests
import time
logger = getLogger(__name__.split(".", 1)[-1])
class Manager:
def __get(self, url):
response = requests.get(url=url)
if not response.status_code == 200:
raise RuntimeError(response.status_code)
return response.json()
def __getRemoteModules(self, url, mod_ids):
modules = dict()
for mod_id in mod_ids:
try:
modules[mod_id] = self.__get("{}/{}".format(url, mod_id))
except Exception as ex:
logger.error("can't retrieve module '{}' from registry - {}".format(mod_id, ex))
return modules
def __mergeConfigs(self, old: dict, new: dict):
for key in old:
for k, v in old[key]["service_configs"].items():
if k in new[key]["service_configs"] and not v == new[key]["service_configs"][k]:
logger.debug("found user value for '{}'".format(k))
new[key]["service_configs"][k] = v
def run(self):
try:
while True:
time.sleep(getDelay())
try:
local_mods = self.__get("{}/{}".format(mu_conf.MM.url, mu_conf.MM.api))
remote_mods = self.__getRemoteModules("{}/{}".format(mu_conf.MR.url, mu_conf.MR.api), local_mods.keys())
pending = list()
for mod_id in set(local_mods) & set(remote_mods):
logger.info("checking '{}' ...".format(local_mods[mod_id]["name"]))
if not local_mods[mod_id]["hash"] == remote_mods[mod_id]["hash"]:
pending.append(mod_id)
logger.info("update pending for '{}' ...".format(local_mods[mod_id]["name"]))
for mod_id in pending:
logger.info("merging configs for '{}' ...".format(local_mods[mod_id]["name"]))
configs = self.__get("{}/{}/{}".format(mu_conf.CS.url, mu_conf.CS.api, mod_id))
self.__mergeConfigs(configs, remote_mods[mod_id]["services"])
for mod_id in pending:
logger.info("updating '{}' ...".format(local_mods[mod_id]["name"]))
requests.patch(url="{}/{}/{}".format(mu_conf.MM.url, mu_conf.MM.api, mod_id), json={"state": ModuleState.inactive})
while True:
response = self.__get("{}/{}/{}".format(mu_conf.MM.url, mu_conf.MM.api, mod_id))
if response["state"] == ModuleState.inactive:
break
time.sleep(1)
remote_mods[mod_id]["id"] = mod_id
requests.post(url="{}/{}".format(mu_conf.MM.url, mu_conf.MM.api), json=remote_mods[mod_id])
requests.patch(url="{}/{}/{}".format(mu_conf.MM.url, mu_conf.MM.api, mod_id), json={"state": ModuleState.active})
logger.info("update for '{}' successful".format(local_mods[mod_id]["name"]))
except Exception as ex:
logger.exception("error during update:".format(ex))
finally:
pass
| [
"[email protected]"
] | |
ea0b5eede4c8cb9424aa453427c0777b0e2f0a88 | ac16a937f32602cf16114463f8e875a972f64c27 | /docs/dolfin/1.0.beta/python/source/demo/undocumented/curl-curl/python/demo_curl-curl.py | c4b4e14e33bf345faaaab62069618ae9936b675d | [] | no_license | mparno/fenics-web | 2073248da6f9918ffedbe9be8a3433bc1cbb7ffb | 7202752da876b1f9ab02c1d5a5f28ff5da526528 | refs/heads/master | 2021-05-05T04:45:46.436236 | 2016-12-06T20:25:44 | 2016-12-06T20:25:44 | 118,628,385 | 2 | 0 | null | 2018-01-23T15:21:47 | 2018-01-23T15:21:46 | null | UTF-8 | Python | false | false | 2,160 | py | """ Eddy currents phenomena in low conducting body can be
described using electric vector potential and curl-curl operator:
\nabla \times \nabla \times T = - \frac{\partial B}{\partial t}
Electric vector potential defined as:
\nabla \times T = J
Boundary condition:
J_n = 0,
T_t=T_w=0, \frac{\partial T_n}{\partial n} = 0
which is naturaly fulfilled for zero Dirichlet BC with Nedelec (edge)
elements.
"""
# Copyright (C) 2009 Bartosz Sawicki
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Anders Logg 2011
#
# First added: 2009-04-02
# Last changed: 2011-06-28
from dolfin import *
# Create mesh
mesh = UnitSphere(8)
# Define function spaces
PN = FunctionSpace(mesh, "Nedelec 1st kind H(curl)", 1)
P1 = VectorFunctionSpace(mesh, "CG", 1)
# Define test and trial functions
v0 = TestFunction(PN)
u0 = TrialFunction(PN)
v1 = TestFunction(P1)
u1 = TrialFunction(P1)
# Define functions
dbdt = Expression(("0.0", "0.0", "1.0"), degree=1)
zero = Expression(("0.0", "0.0", "0.0"), degree=1)
T = Function(PN)
J = Function(P1)
# Dirichlet boundary
class DirichletBoundary(SubDomain):
def inside(self, x, on_boundary):
return on_boundary
# Boundary condition
bc = DirichletBC(PN, zero, DirichletBoundary())
# Solve eddy currents equation (using potential T)
solve(inner(curl(v0), curl(u0))*dx == -inner(v0, dbdt)*dx, T, bc)
# Solve density equation
solve(inner(v1, u1)*dx == dot(v1, curl(T))*dx, J)
# Plot solution
plot(J)
file=File("current_density.pvd")
file << J
# Hold plot
interactive()
| [
"[email protected]"
] | |
f3cb3e73b19d480a327c9c4be7db3b599096e61e | 9918208c80a3c396d8a1e13783d501d60dbc2050 | /integration_tests/conftest.py | 3fb8ef70a199bdfdb9c0ef602f656687b6fc764c | [] | no_license | benjimin/digitalearthau | 2d3010be76fad0d0b6b4854dbbad07e98254b239 | 5098bf3c88627cad78a8caa5ab703c586c17a6f7 | refs/heads/develop | 2022-02-27T07:36:16.009689 | 2017-09-14T05:51:27 | 2017-09-14T05:51:27 | 103,460,937 | 0 | 0 | null | 2017-09-13T23:10:15 | 2017-09-13T23:10:15 | null | UTF-8 | Python | false | false | 3,552 | py | import itertools
import logging
import os
from contextlib import contextmanager
from pathlib import Path
import pytest
import shutil
import yaml
import digitalearthau
from datacube.config import LocalConfig
from datacube.index._api import Index
from datacube.index.postgres import PostgresDb
from datacube.index.postgres import _dynamic
from datacube.index.postgres.tables import _core
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
INTEGRATION_DEFAULT_CONFIG_PATH = Path(__file__).parent.joinpath('deaintegration.conf')
INTEGRATION_TEST_DATA = Path(__file__).parent / 'data'
PROJECT_ROOT = Path(__file__).parents[1]
DEA_MD_TYPES = digitalearthau.CONFIG_DIR / 'metadata-types.yaml'
DEA_PRODUCTS_DIR = digitalearthau.CONFIG_DIR / 'products'
def load_yaml_file(path):
with path.open() as f:
return list(yaml.load_all(f, Loader=SafeLoader))
@pytest.fixture
def integration_test_data(tmpdir):
d = tmpdir.join('integration_data')
shutil.copytree(str(INTEGRATION_TEST_DATA), str(d))
return Path(str(d))
@pytest.fixture
def dea_index(index: Index):
"""
An index initialised with DEA config (products)
"""
# Add DEA metadata types, products. They'll be validated etc.
for md_type_def in load_yaml_file(DEA_MD_TYPES):
index.metadata_types.add(index.metadata_types.from_doc(md_type_def))
for product_file in DEA_PRODUCTS_DIR.glob('*.yaml'):
for product_def in load_yaml_file(product_file):
index.products.add_document(product_def)
return index
@pytest.fixture
def datasets(dea_index):
# Add test datasets, collection definitions.
pass
@pytest.fixture
def integration_config_paths():
return (
str(INTEGRATION_DEFAULT_CONFIG_PATH),
os.path.expanduser('~/.datacube_integration.conf')
)
@pytest.fixture
def global_integration_cli_args(integration_config_paths):
"""
The first arguments to pass to a cli command for integration test configuration.
"""
# List of a config files in order.
return list(itertools.chain(*(('--config_file', f) for f in integration_config_paths)))
@pytest.fixture
def local_config(integration_config_paths):
return LocalConfig.find(integration_config_paths)
@pytest.fixture()
def db(local_config):
db = PostgresDb.from_config(local_config, application_name='dea-test-run', validate_connection=False)
# Drop and recreate tables so our tests have a clean db.
with db.connect() as connection:
_core.drop_db(connection._connection)
remove_dynamic_indexes()
# Disable informational messages since we're doing this on every test run.
with _increase_logging(_core._LOG) as _:
_core.ensure_db(db._engine)
# We don't need informational create/drop messages for every config change.
_dynamic._LOG.setLevel(logging.WARN)
yield db
db.close()
@contextmanager
def _increase_logging(log, level=logging.WARN):
previous_level = log.getEffectiveLevel()
log.setLevel(level)
yield
log.setLevel(previous_level)
def remove_dynamic_indexes():
"""
Clear any dynamically created indexes from the schema.
"""
# Our normal indexes start with "ix_", dynamic indexes with "dix_"
for table in _core.METADATA.tables.values():
table.indexes.intersection_update([i for i in table.indexes if not i.name.startswith('dix_')])
@pytest.fixture
def index(db):
"""
:type db: datacube.index.postgres._api.PostgresDb
"""
return Index(db)
| [
"[email protected]"
] | |
3035b1f0f246ca1584d7f6b2f6e66ffe09a73ff8 | 07f34b776ac30e0e5e431730826eba0b324c5ad6 | /fleet/deploy/template_manager.py | b8cfdc6736c8d9fdebaaed15d5a81f4add8297d2 | [
"MIT"
] | permissive | chrismcguire/fleet-py | bc54bd3293fcf5f921149ea412fe80ae8a9201bf | be8fdfc09e70110a8169add6a682e19d46ef6252 | refs/heads/master | 2021-01-17T09:24:16.390100 | 2015-08-13T22:55:16 | 2015-08-13T22:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | __author__ = 'sukrit'
from pkg_resources import resource_string
BUNDLED_TEMPLATE_PREFIX = "bundled://"
RAW_TEMPLATE_PREFIX = "raw://"
def fetch_template(template_url):
if template_url.startswith('http://') or \
template_url.startswith('https://'):
pass
if template_url.startswith(BUNDLED_TEMPLATE_PREFIX) and \
len(template_url) > len(BUNDLED_TEMPLATE_PREFIX):
template_file = template_url[len(BUNDLED_TEMPLATE_PREFIX):] +\
".service"
template = resource_string(__name__, '../../templates/'+template_file)
return template
if template_url.startswith(RAW_TEMPLATE_PREFIX) and \
len(template_url) > len(RAW_TEMPLATE_PREFIX):
return template_url[len(RAW_TEMPLATE_PREFIX):]
def fetch_bundled_template_url(group='default', template_type='app'):
template_url = '{}{}-{}'.format(BUNDLED_TEMPLATE_PREFIX, group,
template_type)
return template_url
| [
"[email protected]"
] | |
b03a754744d2e92f98ee97643c8249fbd63105f3 | 20f951bd927e4e5cde8ef7781813fcf0d51cc3ea | /fossir/core/db/sqlalchemy/__init__.py | a5540829307aa4983312efca38ece0cfeda969e1 | [] | no_license | HodardCodeclub/SoftwareDevelopment | 60a0fbab045cb1802925d4dd5012d5b030c272e0 | 6300f2fae830c0c2c73fe0afd9c684383bce63e5 | refs/heads/master | 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | # This file is part of fossir.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# fossir is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# fossir is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fossir; if not, see <http://www.gnu.org/licenses/>.
from .core import db
from .custom import *
| [
"[email protected]"
] | |
ae21bc2c5a6ff8248e3231bbae691421738545f1 | 0cb970785a746a30f9b44b3e5234157818688197 | /listsFolder/sort.py | 16f655fb2a7ec882998f946257dc9b5209e1ce96 | [] | no_license | charan2108/pythonprojectsNew | 4255bbb81b6cf0d47c51c131ed93a0bb331a669c | b2f273d44937ec576daa0235d0d0326ff5149bf8 | refs/heads/main | 2023-05-03T16:55:33.242693 | 2021-05-26T11:18:17 | 2021-05-26T11:18:17 | 371,001,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | cars = ['ferrari', 'ford', 'benz', 'audi', 'lamb']
print(cars)
cars.sort()
print(cars)
#reverse
cars = ['ferrari', 'ford', 'benz', 'audi', 'lamb']
print(cars)
cars.sort(reverse=True)
print(cars) | [
"[email protected]"
] | |
8d100df0263bb83dcda32092b5d7db7a35557b4f | db818127b373da9d88583e717f184f483a1f844d | /instruction_env/Lib/site-packages/sphinx/theming.py | 30e4dffdb64f9391dcd1cda6dd9c8bf3505c626b | [
"MIT"
] | permissive | lfunderburk/Effective-Instructions | 4af5a763b5021668abd6d37f1d860eeff07bfee8 | ce40f890fb8623ff1ec9c3e9e1190505cbd1e6db | refs/heads/main | 2023-04-14T22:43:48.363281 | 2021-04-26T05:40:19 | 2021-04-26T05:40:19 | 331,163,652 | 0 | 0 | MIT | 2021-04-26T05:40:22 | 2021-01-20T01:58:52 | null | UTF-8 | Python | false | false | 8,695 | py | """
sphinx.theming
~~~~~~~~~~~~~~
Theming support for HTML builders.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import configparser
import os
import shutil
import tempfile
from os import path
from typing import Any, Dict, List
from zipfile import ZipFile
import pkg_resources
from sphinx import package_dir
from sphinx.errors import ThemeError
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.osutil import ensuredir
if False:
# For type annotation
from sphinx.application import Sphinx
logger = logging.getLogger(__name__)
NODEFAULT = object()
THEMECONF = 'theme.conf'
def extract_zip(filename: str, targetdir: str) -> None:
"""Extract zip file to target directory."""
ensuredir(targetdir)
with ZipFile(filename) as archive:
for name in archive.namelist():
if name.endswith('/'):
continue
entry = path.join(targetdir, name)
ensuredir(path.dirname(entry))
with open(path.join(entry), 'wb') as fp:
fp.write(archive.read(name))
class Theme:
"""A Theme is a set of HTML templates and configurations.
This class supports both theme directory and theme archive (zipped theme)."""
def __init__(self, name: str, theme_path: str, factory: "HTMLThemeFactory") -> None:
self.name = name
self.base = None
self.rootdir = None
if path.isdir(theme_path):
# already a directory, do nothing
self.rootdir = None
self.themedir = theme_path
else:
# extract the theme to a temp directory
self.rootdir = tempfile.mkdtemp('sxt')
self.themedir = path.join(self.rootdir, name)
extract_zip(theme_path, self.themedir)
self.config = configparser.RawConfigParser()
self.config.read(path.join(self.themedir, THEMECONF))
try:
inherit = self.config.get('theme', 'inherit')
except configparser.NoSectionError as exc:
raise ThemeError(__('theme %r doesn\'t have "theme" setting') % name) from exc
except configparser.NoOptionError as exc:
raise ThemeError(__('theme %r doesn\'t have "inherit" setting') % name) from exc
if inherit != 'none':
try:
self.base = factory.create(inherit)
except ThemeError as exc:
raise ThemeError(__('no theme named %r found, inherited by %r') %
(inherit, name)) from exc
def get_theme_dirs(self) -> List[str]:
"""Return a list of theme directories, beginning with this theme's,
then the base theme's, then that one's base theme's, etc.
"""
if self.base is None:
return [self.themedir]
else:
return [self.themedir] + self.base.get_theme_dirs()
def get_config(self, section: str, name: str, default: Any = NODEFAULT) -> Any:
"""Return the value for a theme configuration setting, searching the
base theme chain.
"""
try:
return self.config.get(section, name)
except (configparser.NoOptionError, configparser.NoSectionError) as exc:
if self.base:
return self.base.get_config(section, name, default)
if default is NODEFAULT:
raise ThemeError(__('setting %s.%s occurs in none of the '
'searched theme configs') % (section, name)) from exc
else:
return default
def get_options(self, overrides: Dict[str, Any] = {}) -> Dict[str, Any]:
"""Return a dictionary of theme options and their values."""
if self.base:
options = self.base.get_options()
else:
options = {}
try:
options.update(self.config.items('options'))
except configparser.NoSectionError:
pass
for option, value in overrides.items():
if option not in options:
logger.warning(__('unsupported theme option %r given') % option)
else:
options[option] = value
return options
def cleanup(self) -> None:
"""Remove temporary directories."""
if self.rootdir:
try:
shutil.rmtree(self.rootdir)
except Exception:
pass
if self.base:
self.base.cleanup()
def is_archived_theme(filename: str) -> bool:
"""Check the specified file is an archived theme file or not."""
try:
with ZipFile(filename) as f:
return THEMECONF in f.namelist()
except Exception:
return False
class HTMLThemeFactory:
"""A factory class for HTML Themes."""
def __init__(self, app: "Sphinx") -> None:
self.app = app
self.themes = app.html_themes
self.load_builtin_themes()
if getattr(app.config, 'html_theme_path', None):
self.load_additional_themes(app.config.html_theme_path)
def load_builtin_themes(self) -> None:
"""Load built-in themes."""
themes = self.find_themes(path.join(package_dir, 'themes'))
for name, theme in themes.items():
self.themes[name] = theme
def load_additional_themes(self, theme_paths: str) -> None:
"""Load additional themes placed at specified directories."""
for theme_path in theme_paths:
abs_theme_path = path.abspath(path.join(self.app.confdir, theme_path))
themes = self.find_themes(abs_theme_path)
for name, theme in themes.items():
self.themes[name] = theme
def load_extra_theme(self, name: str) -> None:
"""Try to load a theme having specifed name."""
if name == 'alabaster':
self.load_alabaster_theme()
elif name == 'sphinx_rtd_theme':
self.load_sphinx_rtd_theme()
else:
self.load_external_theme(name)
def load_alabaster_theme(self) -> None:
"""Load alabaster theme."""
import alabaster
self.themes['alabaster'] = path.join(alabaster.get_path(), 'alabaster')
def load_sphinx_rtd_theme(self) -> None:
"""Load sphinx_rtd_theme theme (if exists)."""
try:
import sphinx_rtd_theme
theme_path = sphinx_rtd_theme.get_html_theme_path()
self.themes['sphinx_rtd_theme'] = path.join(theme_path, 'sphinx_rtd_theme')
except ImportError:
pass
def load_external_theme(self, name: str) -> None:
"""Try to load a theme using entry_points.
Sphinx refers to ``sphinx_themes`` entry_points.
"""
# look up for new styled entry_points at first
entry_points = pkg_resources.iter_entry_points('sphinx.html_themes', name)
try:
entry_point = next(entry_points)
self.app.registry.load_extension(self.app, entry_point.module_name)
return
except StopIteration:
pass
def find_themes(self, theme_path: str) -> Dict[str, str]:
"""Search themes from specified directory."""
themes = {} # type: Dict[str, str]
if not path.isdir(theme_path):
return themes
for entry in os.listdir(theme_path):
pathname = path.join(theme_path, entry)
if path.isfile(pathname) and entry.lower().endswith('.zip'):
if is_archived_theme(pathname):
name = entry[:-4]
themes[name] = pathname
else:
logger.warning(__('file %r on theme path is not a valid '
'zipfile or contains no theme'), entry)
else:
if path.isfile(path.join(pathname, THEMECONF)):
themes[entry] = pathname
return themes
def create(self, name: str) -> Theme:
"""Create an instance of theme."""
if name not in self.themes:
self.load_extra_theme(name)
if name not in self.themes:
if name == 'sphinx_rtd_theme':
raise ThemeError(__('sphinx_rtd_theme is no longer a hard dependency '
'since version 1.4.0. Please install it manually.'
'(pip install sphinx_rtd_theme)'))
else:
raise ThemeError(__('no theme named %r found '
'(missing theme.conf?)') % name)
return Theme(name, self.themes[name], factory=self)
| [
"[email protected]"
] | |
d134bb106b4a554dcd7baf533c2c404fd5273d94 | cfc49e6e65ed37ddf297fc7dffacee8f905d6aa0 | /modulo_counter.py | 5717eeb441ff429c28ab838b66ead40db1c14c04 | [] | no_license | IfDougelseSa/cursoPython | c94cc1215643f272f935d5766e7a2b36025ddbe2 | 3f9ceb9701a514106d49b2144b7f2845416ed8ec | refs/heads/main | 2023-06-12T16:51:29.413031 | 2021-07-07T00:20:53 | 2021-07-07T00:20:53 | 369,268,883 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | """
Módulo Collections - Counter(Contador)
Collections -> High-performance Container datetypes
Counter -> Recebe um iterável como parâmetro e cria um objeto do tipo Collections Counter que é parecido
com um dicionário, contendo como chave o elemento da lista passado como parâmetro e como valor a quantidade
de ocorrẽncias desse elemento.
# Utilizando o counter
from collections import Counter
#Exemplo 1
#Podemos utilizar qualquer iterável, aqui usamos uma lista.
lista = [1, 1, 2, 2, 3, 3, 3, 1 ,1 ,1 ,2 ,2 ,4 ,4 ,4 , 5, 5, 3 , 55, 33, 66 , 55, 44, 33, 22, 2, 22, 33]
res = Counter(lista)
print(type(res))
print(res)
# Counter({1: 5, 2: 5, 3: 4, 4: 3, 33: 3, 5: 2, 55: 2, 22: 2, 66: 1, 44: 1})
# Veja que para cada elemento da lista, o Counter criou uma chave e colocou como valor a quantidade
# de ocorrências.
# Exemplo 2
print(Counter('Geek University'))
Counter({'e': 3, 'i': 2, 'G': 1, 'k': 1, ' ': 1, 'U': 1, 'n': 1, 'v': 1, 'r': 1, 's': 1, 't': 1, 'y': 1})
"""
from collections import Counter
# Exemplo 3
texto = """ Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
when an unknown printer took a galley of type and scrambled it to make a type specimen book"""
palavras = texto.split()
# print(palavras)
res = Counter(palavras)
print(res)
#Encontrado as 5 palavras com mais ocorrência no texto.
print(res.most_common(5))
| [
"[email protected]"
] | |
1c4c018ec89c29b3fc8cded55da8088b460dfabd | 923a14dd594191d77e30465027ece8371f28a7a6 | /web-serpng/code/serpng/routers.py | 40ae10220712dfe584223b7dc6fce22c9e272418 | [] | no_license | alyago/django-web | 3af7b3389df59104eaf5e50ed9cc2c3e730fed7f | da3073eec6d676dfe0164502b80d2a1c75e89575 | refs/heads/master | 2021-01-10T19:33:45.425520 | 2013-11-21T09:43:37 | 2013-11-21T09:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | # Copyright (c) 2012, Simply Hired, Inc. All rights reserved.
"""Database Router"""
class SHRouter(object):
def db_for_read(self, model, **hints):
if model.__module__.startswith("autocomplete.models"):
return 'autocomplete'
else:
None
class AllowSyncDBRouter(object):
def allow_syncdb(self, db, model):
if db == 'default' and model.__module__.startswith('django.contrib.sessions.models'):
return True
if db == 'resume' and (model.__module__.startswith("serpng.resume.models") or
model.__module__.startswith('south.')):
return True
return False
| [
"[email protected]"
] | |
7e332c7f55f2003b9937427a4820c249e496d06f | 306555b6f10ce4d64caca22c4a291a5ac6337e07 | /Gaussian_Process.py | a9ae6361738bc3536a29d8302b8fce228b8be27e | [] | no_license | BerenMillidge/GPs | 1b107a265bb77452d2bb9a4a0854ae1b398ae6b7 | 3a8824c7c39a48d827ff098556bc23406cc7faad | refs/heads/master | 2022-03-27T10:58:32.282459 | 2017-09-28T15:02:39 | 2017-09-28T15:02:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py |
from __future__ import division
import numpy as np
from kernels import*
default_kernel = Kernel("exponential_kernel", [0.01])
class GP:
def __init__(self, dataprovider, kernel = default_kernel):
self.dataprovider = dataprovider
self.kernel = kernel
def one_d_prior(self,data_vector):
N = len(data_vector)
means = np.zeros(N)
cov = np.zeros([N,N])
for i in xrange(N):
for j in xrange(N):
cov[i][j] = self.kernel.calculate_kernel_value(data_vector[i], data_vector[j])
#draw from the distribution and return
return np.random.multivariate_normal(means,cov)
| [
"[email protected]"
] | |
acc8860470b0001df7b8ae5651c2f43957062c25 | ab9b1505a9f57d28cb12b853f14c7d00d34c36c5 | /cursopython/pythonteste/aula21ambiente02.py | 99e2618b048a734f08e61eb2d64041cd1d63e1f6 | [
"MIT"
] | permissive | AtilaCosta87/Python | 610a1170c8a043e09c3580f18a048181cfbc8348 | b4eea7885d16df80feecc4c699a8348ca13a80c2 | refs/heads/master | 2022-05-27T04:24:40.469166 | 2020-04-25T02:08:23 | 2020-04-25T02:08:23 | 258,666,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | def soma(a=0, b=0, c=0):
"""
-> Faz a soma de três valores e mostra o resultado na tela.
:param a: O primeiro valor
:param b: O seguando valor
:param c: O terceiro valor
Função criada por Átila Costa na aula do canal CursoemVideo
"""
s = a + b + c
print(f'A soma vale {s}', end='')
#soma(3, 2, 5)
#soma(3, 2)
#soma(3)
#soma()
#soma(3, 3, 5, 8) -> vai dar erro, pois pode receber até 3 parametros
#soma(3, 3, 5)
#soma(b=4, c=2)
soma(c=3, a=2)
| [
"[email protected]"
] | |
6644c8aa6bde8b175f454eb1aa510bbe1314618c | cebc80d0d9dcdd0b2458f4d2105dcc489d2079ee | /setup.py | 2d85325bff794a58bf51d88e285b8a390c7235ac | [] | no_license | Gilles86/flogiston | 1bd1fafc1a4a1efc5369293db34bcd0d4ce16582 | 62f27cdc4e740855c18139ff402face543bc5c92 | refs/heads/master | 2016-09-05T09:06:40.993535 | 2015-11-23T13:37:58 | 2015-11-23T13:37:58 | 29,321,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | #!/usr/bin/env python
#from distutils.core import setup
from setuptools import setup
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.add_subpackage('flogiston')
return config
def main():
from numpy.distutils.core import setup
setup(name='flogiston',
version='0.1',
description='Link fMRI data to cognitive models like the LBA model',
author='Gilles de Hollander',
author_email='[email protected]',
url='http://www.gillesdehollander.nl',
packages=['flogiston'],
configuration=configuration
)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
11ad67e1dedb58c8ff4f8931564ffb3a5a93e13f | 920520ec421d75e225cbee480a3f251fde5eeee4 | /tests/correct2/test-typing05.py | 057f3f7bc361a7b70d8878624348c722daded4d3 | [] | no_license | hbradlow/cs164 | 23fcbceabb9fe002b36e18036c0316b4476c1f03 | 2165e9fc99bbb977be56b98f2ad30013d371a3c9 | refs/heads/master | 2021-05-26T16:05:45.056557 | 2012-11-13T23:51:53 | 2012-11-13T23:51:53 | 5,835,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34 | py | x = "Hello"
y = ", world"
z = x+y
| [
"[email protected]"
] | |
5c3ec9b8bca633c0b13497f11002177fbd589d58 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_151/ch22_2020_09_09_18_55_45_398877.py | 949d4f34161a93ae72b7f3993ee0a87fd3a1d540 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | def reduction(x, y):
f = round((365 * x * y * 10) / (60 * 24), 0)
return f
a = int(input('cigarros p/ dia '))
b = int(input('fuma a quantos anos '))
c = reduction(a, b)
print(c) | [
"[email protected]"
] | |
128902adf28d2ffa058348060bc4cb5b09cddc8a | 5eb97d15b975c3ef3de10401594f31c502e04de4 | /YeNet-Tensorflow/testSRM.py | f04d18729452e84e6c517654c59ebc785251e5bd | [] | no_license | coriverchen/Steganalysis | 863f133f506afa9e8400d2c14acee833d942d51f | f7844698bff217ff206b9a3de15ccec708951c83 | refs/heads/master | 2022-03-30T10:25:30.118715 | 2019-11-29T04:21:59 | 2019-11-29T04:21:59 | 256,136,538 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 8 14:33:16 2019
@author: Lee
"""
import numpy as np
SRM_Kernels = np.load('SRM_Kernels.npy')
print(SRM_Kernels[:1]) | [
"[email protected]"
] | |
ddc0a997d94790c09c5b3f29e15b52bf20049b5f | 3d51fe39809673432adda8d7e10c1afca7d3a68f | /bcbio/variation/validate.py | afd03dd0c33eef1e94db1c14581d3ece24f472c5 | [
"MIT"
] | permissive | travc/bcbio-nextgen | 13c5cfeb917c65d42fa6432eb50b252b3fe2d4d6 | 63eb26ba46502fa1b3e691e7c81a36809dc288e9 | refs/heads/master | 2021-01-19T22:40:31.035339 | 2015-10-02T08:27:54 | 2015-10-02T08:27:54 | 43,542,446 | 0 | 0 | null | 2015-10-02T08:21:23 | 2015-10-02T08:21:22 | null | UTF-8 | Python | false | false | 20,069 | py | """Perform validation of final calls against known reference materials.
Automates the process of checking pipeline results against known valid calls
to identify discordant variants. This provides a baseline for ensuring the
validity of pipeline updates and algorithm changes.
"""
import collections
import csv
import os
import shutil
import subprocess
import time
from pysam import VariantFile
import toolz as tz
import yaml
from bcbio import broad, utils
from bcbio.bam import callable
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import bubbletree
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import validateplot, vcfutils, multi, naming
# ## Individual sample comparisons
def _get_validate(data):
"""Retrieve items to validate, from single samples or from combined joint calls.
"""
if data.get("vrn_file") and "validate" in data["config"]["algorithm"]:
return data
elif "group_orig" in data:
for sub in multi.get_orig_items(data):
if "validate" in sub["config"]["algorithm"]:
sub_val = utils.deepish_copy(sub)
sub_val["vrn_file"] = data["vrn_file"]
return sub_val
return None
def normalize_input_path(x, data):
"""Normalize path for input files, handling relative paths.
Looks for non-absolute paths in local and fastq directories
"""
if x is None:
return None
elif os.path.isabs(x):
return os.path.normpath(x)
else:
for d in [data["dirs"].get("fastq"), data["dirs"].get("work")]:
if d:
cur_x = os.path.normpath(os.path.join(d, x))
if os.path.exists(cur_x):
return cur_x
raise IOError("Could not find validation file %s" % x)
def _gunzip(f, data):
if f is None:
return None
elif f.endswith(".gz"):
out_file = f.replace(".gz", "")
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = "gunzip -c {f} > {tx_out_file}"
do.run(cmd.format(**locals()), "gunzip input file")
return out_file
else:
return f
def _get_caller(data):
callers = [tz.get_in(["config", "algorithm", "jointcaller"], data),
tz.get_in(["config", "algorithm", "variantcaller"], data),
"precalled"]
return [c for c in callers if c][0]
def compare_to_rm(data):
"""Compare final variant calls against reference materials of known calls.
"""
toval_data = _get_validate(data)
if toval_data:
if isinstance(toval_data["vrn_file"], (list, tuple)):
raise NotImplementedError("Multiple input files for validation: %s" % toval_data["vrn_file"])
else:
vrn_file = os.path.abspath(toval_data["vrn_file"])
rm_file = normalize_input_path(toval_data["config"]["algorithm"]["validate"], toval_data)
rm_interval_file = _gunzip(normalize_input_path(toval_data["config"]["algorithm"].get("validate_regions"),
toval_data),
toval_data)
caller = _get_caller(toval_data)
sample = dd.get_sample_name(toval_data)
base_dir = utils.safe_makedir(os.path.join(toval_data["dirs"]["work"], "validate", sample, caller))
rm_file = naming.handle_synonyms(rm_file, dd.get_ref_file(data), data["genome_build"], base_dir, data)
rm_interval_file = (naming.handle_synonyms(rm_interval_file, dd.get_ref_file(data),
data["genome_build"], base_dir, data)
if rm_interval_file else None)
vmethod = tz.get_in(["config", "algorithm", "validate_method"], data, "rtg")
if vmethod == "rtg":
eval_files = _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data)
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod == "bcbio.variation":
data["validate"] = _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir,
sample, caller, toval_data)
return [[data]]
# ## Real Time Genomics vcfeval
def _get_sample_and_caller(data):
return [tz.get_in(["metadata", "validate_sample"], data) or dd.get_sample_name(data),
_get_caller(data)]
def _rtg_add_summary_file(eval_files, base_dir, data):
"""Parse output TP FP and FN files to generate metrics for plotting.
"""
out_file = os.path.join(base_dir, "validate-summary.csv")
if not utils.file_uptodate(out_file, eval_files["tp"]):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["sample", "caller", "vtype", "metric", "value"])
base = _get_sample_and_caller(data)
for metric in ["tp", "fp", "fn"]:
for vtype, bcftools_types in [("SNPs", "snps"), ("Indels", "indels,mnps,other")]:
in_file = eval_files[metric]
cmd = ("bcftools view --types {bcftools_types} {in_file} | grep -v ^# | wc -l")
count = int(subprocess.check_output(cmd.format(**locals()), shell=True))
writer.writerow(base + [vtype, metric, count])
eval_files["summary"] = out_file
return eval_files
def _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Run evaluation of a caller against the truth set using rtg vcfeval.
"""
out_dir = os.path.join(base_dir, "rtg")
if not utils.file_exists(os.path.join(out_dir, "done")):
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
if not rm_file.endswith(".vcf.gz") or not os.path.exists(rm_file + ".tbi"):
rm_file = vcfutils.bgzip_and_index(rm_file, data["config"], out_dir=base_dir)
if len(vcfutils.get_samples(vrn_file)) > 1:
base, ext = utils.splitext_plus(vrn_file)
sample_file = os.path.join(base_dir, "%s-%s%s" % (base, dd.get_sample_name(data), ext))
vrn_file = vcfutils.select_sample(vrn_file, dd.get_sample_name(data), sample_file, data["config"])
if not vrn_file.endswith(".vcf.gz") or not os.path.exists(vrn_file + ".tbi"):
vrn_file = vcfutils.bgzip_and_index(vrn_file, data["config"], out_dir=base_dir)
interval_bed = _get_merged_intervals(rm_interval_file, base_dir, data)
ref_dir, ref_filebase = os.path.split(dd.get_ref_file(data))
rtg_ref = os.path.normpath(os.path.join(ref_dir, os.path.pardir, "rtg",
"%s.sdf" % (os.path.splitext(ref_filebase)[0])))
assert os.path.exists(rtg_ref), ("Did not find rtg indexed reference file for validation:\n%s\n"
"Run bcbio_nextgen.py upgrade --data --aligners rtg" % rtg_ref)
cmd = ["rtg", "vcfeval", "-b", rm_file, "--bed-regions", interval_bed,
"-c", vrn_file, "-t", rtg_ref, "-o", out_dir]
do.run(cmd, "Validate calls using rtg vcfeval", data)
return {"tp": os.path.join(out_dir, "tp.vcf.gz"),
"fp": os.path.join(out_dir, "fp.vcf.gz"),
"fn": os.path.join(out_dir, "fn.vcf.gz")}
def _get_merged_intervals(rm_interval_file, base_dir, data):
"""Retrieve intervals to run validation on, merging reference and callable BED files.
"""
a_intervals = get_analysis_intervals(data)
if a_intervals:
final_intervals = shared.remove_lcr_regions(a_intervals, [data])
if rm_interval_file:
sample, caller = _get_sample_and_caller(data)
combo_intervals = os.path.join(base_dir, "%s-%s-%s-wrm.bed" %
(utils.splitext_plus(os.path.basename(final_intervals))[0],
sample, caller))
if not utils.file_uptodate(combo_intervals, final_intervals):
with file_transaction(data, combo_intervals) as tx_out_file:
with utils.chdir(os.path.dirname(tx_out_file)):
# Copy files locally to avoid issues on shared filesystems
# where BEDtools has trouble accessing the same base
# files from multiple locations
a = os.path.basename(final_intervals)
b = os.path.basename(rm_interval_file)
try:
shutil.copyfile(final_intervals, a)
except IOError:
time.sleep(60)
shutil.copyfile(final_intervals, a)
try:
shutil.copyfile(rm_interval_file, b)
except IOError:
time.sleep(60)
shutil.copyfile(rm_interval_file, b)
cmd = ("bedtools intersect -nonamecheck -a {a} -b {b} > {tx_out_file}")
do.run(cmd.format(**locals()), "Intersect callable intervals for rtg vcfeval")
final_intervals = combo_intervals
else:
assert rm_interval_file, "No intervals to subset analysis with"
final_intervals = shared.remove_lcr_regions(rm_interval_file, [data])
return final_intervals
def get_analysis_intervals(data):
"""Retrieve analysis regions for the current variant calling pipeline.
"""
if data.get("ensemble_bed"):
return data["ensemble_bed"]
elif data.get("align_bam"):
return callable.sample_callable_bed(data["align_bam"], dd.get_ref_file(data), data)
elif data.get("work_bam"):
return callable.sample_callable_bed(data["work_bam"], dd.get_ref_file(data), data)
elif data.get("work_bam_callable"):
return callable.sample_callable_bed(data["work_bam_callable"], dd.get_ref_file(data), data)
else:
for key in ["callable_regions", "variant_regions"]:
intervals = data["config"]["algorithm"].get(key)
if intervals:
return intervals
# ## bcbio.variation comparison -- deprecated approach
def _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir, sample, caller, data):
"""Run validation of a caller against the truth set using bcbio.variation.
"""
val_config_file = _create_validate_config_file(vrn_file, rm_file, rm_interval_file,
base_dir, data)
work_dir = os.path.join(base_dir, "work")
out = {"summary": os.path.join(work_dir, "validate-summary.csv"),
"grading": os.path.join(work_dir, "validate-grading.yaml"),
"discordant": os.path.join(work_dir, "%s-eval-ref-discordance-annotate.vcf" % sample)}
if not utils.file_exists(out["discordant"]) or not utils.file_exists(out["grading"]):
bcbio_variation_comparison(val_config_file, base_dir, data)
out["concordant"] = filter(os.path.exists,
[os.path.join(work_dir, "%s-%s-concordance.vcf" % (sample, x))
for x in ["eval-ref", "ref-eval"]])[0]
return out
def bcbio_variation_comparison(config_file, base_dir, data):
"""Run a variant comparison using the bcbio.variation toolkit, given an input configuration.
"""
tmp_dir = utils.safe_makedir(os.path.join(base_dir, "tmp"))
bv_jar = config_utils.get_jar("bcbio.variation",
config_utils.get_program("bcbio_variation",
data["config"], "dir"))
resources = config_utils.get_resources("bcbio_variation", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
cmd = ["java"] + jvm_opts + broad.get_default_jvm_opts(tmp_dir) + \
["-jar", bv_jar, "variant-compare", config_file]
do.run(cmd, "Comparing variant calls using bcbio.variation", data)
def _create_validate_config_file(vrn_file, rm_file, rm_interval_file,
base_dir, data):
config_dir = utils.safe_makedir(os.path.join(base_dir, "config"))
config_file = os.path.join(config_dir, "validate.yaml")
if not utils.file_uptodate(config_file, vrn_file):
with file_transaction(data, config_file) as tx_config_file:
with open(tx_config_file, "w") as out_handle:
out = _create_validate_config(vrn_file, rm_file, rm_interval_file,
base_dir, data)
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return config_file
def _create_validate_config(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Create a bcbio.variation configuration input for validation.
"""
ref_call = {"file": str(rm_file), "name": "ref", "type": "grading-ref",
"fix-sample-header": True, "remove-refcalls": True}
a_intervals = get_analysis_intervals(data)
if a_intervals:
a_intervals = shared.remove_lcr_regions(a_intervals, [data])
if rm_interval_file:
ref_call["intervals"] = rm_interval_file
eval_call = {"file": vrn_file, "name": "eval", "remove-refcalls": True}
exp = {"sample": data["name"][-1],
"ref": dd.get_ref_file(data),
"approach": "grade",
"calls": [ref_call, eval_call]}
if a_intervals:
exp["intervals"] = os.path.abspath(a_intervals)
if data.get("align_bam"):
exp["align"] = data["align_bam"]
elif data.get("work_bam"):
exp["align"] = data["work_bam"]
return {"dir": {"base": base_dir, "out": "work", "prep": "work/prep"},
"experiments": [exp]}
# ## Summarize comparisons
def _flatten_grading(stats):
vtypes = ["snp", "indel"]
cat = "concordant"
for vtype in vtypes:
yield vtype, cat, stats[cat][cat].get(vtype, 0)
for vtype in vtypes:
for vclass, vitems in sorted(stats["discordant"].get(vtype, {}).iteritems()):
for vreason, val in sorted(vitems.iteritems()):
yield vtype, "discordant-%s-%s" % (vclass, vreason), val
yield vtype, "discordant-%s-total" % vclass, sum(vitems.itervalues())
def _has_grading_info(samples):
for data in (x[0] for x in samples):
for variant in data.get("variants", []):
if variant.get("validate"):
return True
return False
def _group_validate_samples(samples):
extras = []
validated = collections.defaultdict(list)
for data in (x[0] for x in samples):
is_v = False
for variant in data.get("variants", []):
if variant.get("validate"):
is_v = True
if is_v:
for batch_key in (["metadata", "validate_batch"], ["metadata", "batch"],
["description"]):
vname = tz.get_in(batch_key, data)
if vname:
break
if isinstance(vname, (list, tuple)):
vname = vname[0]
validated[vname].append(data)
else:
extras.append([data])
return validated, extras
def summarize_grading(samples):
"""Provide summaries of grading results across all samples.
"""
if not _has_grading_info(samples):
return samples
validate_dir = utils.safe_makedir(os.path.join(samples[0][0]["dirs"]["work"], "validate"))
header = ["sample", "caller", "variant.type", "category", "value"]
validated, out = _group_validate_samples(samples)
for vname, vitems in validated.iteritems():
out_csv = os.path.join(validate_dir, "grading-summary-%s.csv" % vname)
with open(out_csv, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(header)
plot_data = []
plot_files = []
for data in vitems:
for variant in data.get("variants", []):
if variant.get("validate"):
variant["validate"]["grading_summary"] = out_csv
if tz.get_in(["validate", "grading"], variant):
for row in _get_validate_plotdata_yaml(variant, data):
writer.writerow(row)
plot_data.append(row)
else:
plot_files.append(variant["validate"]["summary"])
if plot_files:
plots = validateplot.classifyplot_from_plotfiles(plot_files, out_csv)
elif plot_data:
plots = validateplot.create(plot_data, header, 0, data["config"],
os.path.splitext(out_csv)[0])
else:
plots = None
for data in vitems:
for variant in data.get("variants", []):
if variant.get("validate"):
variant["validate"]["grading_plots"] = plots
out.append([data])
return out
def _get_validate_plotdata_yaml(variant, data):
"""Retrieve validation plot data from grading YAML file (old style).
"""
with open(variant["validate"]["grading"]) as in_handle:
grade_stats = yaml.load(in_handle)
for sample_stats in grade_stats:
sample = sample_stats["sample"]
for vtype, cat, val in _flatten_grading(sample_stats):
yield [sample, variant.get("variantcaller", ""),
vtype, cat, val]
# ## Summarize by frequency
def freq_summary(val_file, call_file, truth_file, target_name):
"""Summarize true and false positive calls by variant type and frequency.
Resolve differences in true/false calls based on output from hap.py:
https://github.com/sequencing/hap.py
"""
out_file = "%s-freqs.csv" % utils.splitext_plus(val_file)[0]
truth_freqs = _read_truth_freqs(truth_file)
call_freqs = _read_call_freqs(call_file, target_name)
with VariantFile(val_file) as val_in:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["vtype", "valclass", "freq"])
for rec in val_in:
call_type = _classify_rec(rec)
val_type = _get_validation_status(rec)
key = _get_key(rec)
freq = truth_freqs.get(key, call_freqs.get(key, 0.0))
writer.writerow([call_type, val_type, freq])
return out_file
def _get_key(rec):
return (rec.contig, rec.pos, rec.ref, rec.alts[0])
def _classify_rec(rec):
"""Determine class of variant in the record.
"""
if max([len(x) for x in rec.alleles]) == 1:
return "snp"
else:
return "indel"
def _get_validation_status(rec):
"""Retrieve the status of the validation, supporting hap.py output
"""
return rec.info["type"]
def _read_call_freqs(in_file, sample_name):
"""Identify frequencies for calls in the input file.
"""
out = {}
with VariantFile(in_file) as call_in:
for rec in call_in:
if rec.filter.keys() == ["PASS"]:
for name, sample in rec.samples.items():
if name == sample_name:
alt, depth = bubbletree.sample_alt_and_depth(sample)
if depth > 0:
out[_get_key(rec)] = float(alt) / float(depth)
return out
def _read_truth_freqs(in_file):
"""Read frequency of calls from truth VCF.
Currently handles DREAM data, needs generalization for other datasets.
"""
out = {}
with VariantFile(in_file) as bcf_in:
for rec in bcf_in:
freq = float(rec.info.get("VAF", 1.0))
out[_get_key(rec)] = freq
return out | [
"[email protected]"
] | |
c48d547a02107433a9ba668d5a79b1c1374499e5 | 03f1a716d426dcb7b5d77b9050e6332ab0726a9f | /nn6/__init__.py | 724f5c4d6039bd7f245d65cd0740ea1f63523eab | [
"MIT"
] | permissive | brice291/nn6 | 8e8688179834dfea3bd8d1f453e1135bfb6c5e45 | 9c7ca32514aa121ad36504f0c11177d8989660ae | refs/heads/master | 2022-04-03T02:00:13.599236 | 2019-12-02T09:20:16 | 2019-12-02T09:20:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | import numpy as np
from numpy.linalg import norm
# 函數 f 對變數 k 的偏微分: df / dk
def df(f, p, k, step=0.01):
p1 = p.copy()
p1[k] = p[k]+step
return (f(p1) - f(p)) / step
# 函數 f 在點 p 上的梯度
def grad(f, p, step=0.01):
gp = p.copy()
for k in range(len(p)):
gp[k] = df(f, p, k, step)
return gp
# 使用梯度下降法尋找函數最低點
def gd(f, p0, step=0.001):
p = p0.copy()
while (True):
gp = grad(f, p) # 計算梯度 gp
glen = norm(gp) # norm = 梯度的長度 (步伐大小)
print('p=', p, 'f(p)=', f(p), 'glen=', glen)
if glen < 0.00001: # 如果步伐已經很小了,那麼就停止吧!
break
# gstep = np.mul(gp, -1 * step) # gstep = 逆梯度方向的一小步
gstep = np.multiply(gp, -1*step)
p += gstep # 向 gstep 方向走一小步
return p # 傳回最低點!
| [
"[email protected]"
] | |
850ac07380673e3ec79b314d5c5c986d50092181 | f0a44b63a385e1c0f1f5a15160b446c2a2ddd6fc | /examples/transform_ucs.py | ee0962d84cada4759f9a40046f14c79071c8b632 | [
"MIT"
] | permissive | triroakenshield/ezdxf | 5652326710f2a24652605cdeae9dd6fc58e4f2eb | 82e964a574bcb86febc677bd63f1626318f51caf | refs/heads/master | 2023-08-17T12:17:02.583094 | 2021-10-09T08:23:36 | 2021-10-09T08:23:36 | 415,426,069 | 1 | 0 | MIT | 2021-10-09T21:31:25 | 2021-10-09T21:31:25 | null | UTF-8 | Python | false | false | 4,940 | py | # Copyright (c) 2020-2021 Manfred Moitzi
# License: MIT License
import pathlib
import math
import ezdxf
from ezdxf import zoom
from ezdxf.math import UCS, Vec3
OUTDIR = pathlib.Path("~/Desktop/Outbox").expanduser()
NARROW = "OpenSansCondensed-Light"
X_COUNT = 7
Y_COUNT = 7
DX = 2
DY = 2
def add_circle(msp, ucs):
msp.add_circle(
center=(0, 0),
radius=0.5,
dxfattribs={
"color": 6,
},
).transform(ucs.matrix)
def add_ocs_circle(msp, ucs):
msp.add_circle(
center=(0, 0, 0.5),
radius=0.25,
dxfattribs={
"color": 6,
"extrusion": (1, 0, 0),
},
).transform(ucs.matrix)
def add_ellipse(msp, ucs):
msp.add_ellipse(
center=(0, 0),
major_axis=(0.5, 0, 0),
ratio=0.5,
start_param=0,
end_param=math.pi,
dxfattribs={
"color": 1,
},
).transform(ucs.matrix)
def add_ocs_arc(msp, ucs):
msp.add_arc(
center=(0, 0, 0.5),
radius=0.25,
start_angle=0,
end_angle=90,
dxfattribs={
"color": 4,
"extrusion": (-1, 0, 0),
},
).transform(ucs.matrix)
def add_solid(msp, ucs):
msp.add_solid(
[(-0.25, -0.15), (0.25, -0.15), (0, -0.5)], dxfattribs={"color": 2}
).transform(ucs.matrix)
def add_trace(msp, ucs):
msp.add_trace(
[(-0.25, 0.15), (0.25, 0.15), (0, 0.5)], dxfattribs={"color": 7}
).transform(ucs.matrix)
def add_3dface(msp, ucs):
msp.add_3dface(
[(0, 0, 0), (0.5, 0.5, 0), (0.5, 0.5, 0.5), (0, 0, 0.5)],
dxfattribs={"color": 8},
).transform(ucs.matrix)
def add_lwpolyline(msp, ucs):
msp.add_lwpolyline(
[(0, 0, 0), (0.3, 0, 1), (0.3, 0.3, 0), (0, 0.3, 0)],
format="xyb",
dxfattribs={"color": 6},
).transform(ucs.matrix)
def add_text(msp, ucs):
msp.add_text(
"TEXT",
dxfattribs={
"color": 4,
"style": NARROW,
"height": 0.2,
},
).set_align("MIDDLE_CENTER").transform(ucs.matrix)
def add_mtext(msp, ucs):
# It is always better to use text_direction instead of a rotation angle,
# which works only for extrusion == (0, 0, 1)
msp.add_mtext(
"MTEXT",
dxfattribs={
"color": 5,
"style": NARROW,
"char_height": 0.2,
"insert": (0, 0),
"rotation": 90,
"attachment_point": 4,
},
).transform(ucs.matrix)
def scene1(filename):
doc = ezdxf.new("R2010", setup=True)
msp = doc.modelspace()
ucs = UCS()
angle = math.pi / 12 # 15 degree
for ix in range(X_COUNT):
for iy in range(Y_COUNT):
ucs.moveto((ix * DX, iy * DY, 0))
ucs.render_axis(msp, length=1)
add_circle(msp, ucs)
# add_ocs_circle(msp, ucs)
# add_ocs_arc(msp, ucs)
# add_text(msp, ucs)
add_mtext(msp, ucs)
add_ellipse(msp, ucs)
# add_solid(msp, ucs)
add_trace(msp, ucs)
# add_3dface(msp, ucs)
# add_lwpolyline(msp, ucs)
ucs = ucs.rotate_local_z(angle)
ucs = UCS().rotate_local_x(ix * angle)
zoom.extents(msp)
doc.saveas(filename)
def add_excentric_text(msp, ucs, location, text):
text = msp.add_mtext(
text,
dxfattribs={
"color": 5,
"style": NARROW,
"char_height": 0.2,
"insert": location,
"attachment_point": 5,
},
)
text.transform(ucs.matrix)
msp.add_line(
start=(0, 0, 0), end=(location.x, 0, 0), dxfattribs={"color": 1}
).transform(ucs.matrix)
msp.add_line(
start=(location.x, 0, 0),
end=(location.x, location.y, 0),
dxfattribs={"color": 3},
).transform(ucs.matrix)
msp.add_line(
start=(location.x, location.y, 0),
end=(location.x, location.y, location.z),
dxfattribs={"color": 5},
).transform(ucs.matrix)
def scene2(filename):
doc = ezdxf.new("R2010", setup=True)
msp = doc.modelspace()
delta = 6
for z in range(-2, 3):
for y in range(-2, 3):
for x in range(-2, 3):
cx = x * delta
cy = y * delta
cz = z * delta
ucs = (
UCS(origin=(cx, cy, cz))
.rotate_local_z(math.radians(45))
.rotate_local_x(math.radians(30))
)
add_excentric_text(
msp,
ucs,
location=Vec3(1, 2, 3),
text=f"Hallo\n(x={cx}, y={cy}, z={cz})",
)
zoom.extents(msp)
doc.saveas(filename)
if __name__ == "__main__":
scene1(OUTDIR / "transform_scene_1.dxf")
scene2(OUTDIR / "transform_scene_2.dxf")
| [
"[email protected]"
] | |
a76426fce9150b66d25e45f27cf70a5f4ef87ffc | 99e44f844d78de330391f2b17bbf2e293bf24b1b | /pytorch/torch/optim/lr_scheduler.py | 69c0da4e36cb9c95170818a997cced0fb0e7c5d2 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | raghavnauhria/whatmt | be10d57bcd6134dd5714d0c4058abd56a1b35a13 | c20483a437c82936cb0fb8080925e37b9c4bba87 | refs/heads/master | 2022-12-04T05:39:24.601698 | 2019-07-22T09:43:30 | 2019-07-22T09:43:30 | 193,026,689 | 0 | 1 | MIT | 2022-11-28T17:50:19 | 2019-06-21T03:48:20 | C++ | UTF-8 | Python | false | false | 32,251 | py | import types
import math
from torch._six import inf
from functools import partial, wraps
import warnings
from bisect import bisect_right
from .optimizer import Optimizer
class _LRScheduler(object):
def __init__(self, optimizer, last_epoch=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
last_epoch = 0
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `lr_scheduler.step()` is called after
# `optimizer.step()`
def with_counter(func, opt):
@wraps(func)
def wrapper(*args, **kwargs):
opt._step_count += 1
return func(*args, **kwargs)
wrapper._with_counter = True
return wrapper
self.optimizer.step = with_counter(self.optimizer.step, self.optimizer)
self.optimizer._step_count = 0
self._step_count = 0
self.step(last_epoch)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_lr(self):
raise NotImplementedError
def step(self, epoch=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
"initialization. Please, make sure to call `optimizer.step()` before "
"`lr_scheduler.step()`. See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
"will result in PyTorch skipping the first value of the learning rate schedule."
"See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
self._step_count += 1
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
class LambdaLR(_LRScheduler):
"""Sets the learning rate of each parameter group to the initial lr
times a given function. When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
lr_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch, or a list of such
functions, one for each group in optimizer.param_groups.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # Assuming optimizer has two groups.
>>> lambda1 = lambda epoch: epoch // 30
>>> lambda2 = lambda epoch: 0.95 ** epoch
>>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2])
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
def __init__(self, optimizer, lr_lambda, last_epoch=-1):
self.optimizer = optimizer
if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):
self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
else:
if len(lr_lambda) != len(optimizer.param_groups):
raise ValueError("Expected {} lr_lambdas, but got {}".format(
len(optimizer.param_groups), len(lr_lambda)))
self.lr_lambdas = list(lr_lambda)
self.last_epoch = last_epoch
super(LambdaLR, self).__init__(optimizer, last_epoch)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
The learning rate lambda functions will only be saved if they are callable objects
and not if they are functions or lambdas.
"""
state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')}
state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas)
for idx, fn in enumerate(self.lr_lambdas):
if not isinstance(fn, types.FunctionType):
state_dict['lr_lambdas'][idx] = fn.__dict__.copy()
return state_dict
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
lr_lambdas = state_dict.pop('lr_lambdas')
self.__dict__.update(state_dict)
for idx, fn in enumerate(lr_lambdas):
if fn is not None:
self.lr_lambdas[idx].__dict__.update(fn)
def get_lr(self):
return [base_lr * lmbda(self.last_epoch)
for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)]
class StepLR(_LRScheduler):
"""Sets the learning rate of each parameter group to the initial lr
decayed by gamma every step_size epochs. When last_epoch=-1, sets
initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
step_size (int): Period of learning rate decay.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 60
>>> # lr = 0.0005 if 60 <= epoch < 90
>>> # ...
>>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1):
self.step_size = step_size
self.gamma = gamma
super(StepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * self.gamma ** (self.last_epoch // self.step_size)
for base_lr in self.base_lrs]
class MultiStepLR(_LRScheduler):
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma once the number of epoch reaches one of the milestones. When
last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
milestones (list): List of epoch indices. Must be increasing.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 80
>>> # lr = 0.0005 if epoch >= 80
>>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1)
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError('Milestones should be a list of'
' increasing integers. Got {}', milestones)
self.milestones = milestones
self.gamma = gamma
super(MultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs]
class ExponentialLR(_LRScheduler):
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma every epoch. When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
gamma (float): Multiplicative factor of learning rate decay.
last_epoch (int): The index of last epoch. Default: -1.
"""
def __init__(self, optimizer, gamma, last_epoch=-1):
self.gamma = gamma
super(ExponentialLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [base_lr * self.gamma ** self.last_epoch
for base_lr in self.base_lrs]
class CosineAnnealingLR(_LRScheduler):
r"""Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr and
:math:`T_{cur}` is the number of epochs since the last restart in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{max}}\pi))
When last_epoch=-1, sets initial lr as lr.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
implements the cosine annealing part of SGDR, and not the restarts.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
self.T_max = T_max
self.eta_min = eta_min
super(CosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2
for base_lr in self.base_lrs]
class ReduceLROnPlateau(object):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This scheduler reads a metrics
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Args:
optimizer (Optimizer): Wrapped optimizer.
mode (str): One of `min`, `max`. In `min` mode, lr will
be reduced when the quantity monitored has stopped
decreasing; in `max` mode it will be reduced when the
quantity monitored has stopped increasing. Default: 'min'.
factor (float): Factor by which the learning rate will be
reduced. new_lr = lr * factor. Default: 0.1.
patience (int): Number of epochs with no improvement after
which learning rate will be reduced. For example, if
`patience = 2`, then we will ignore the first 2 epochs
with no improvement, and will only decrease the LR after the
3rd epoch if the loss still hasn't improved then.
Default: 10.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
threshold (float): Threshold for measuring the new optimum,
to only focus on significant changes. Default: 1e-4.
threshold_mode (str): One of `rel`, `abs`. In `rel` mode,
dynamic_threshold = best * ( 1 + threshold ) in 'max'
mode or best * ( 1 - threshold ) in `min` mode.
In `abs` mode, dynamic_threshold = best + threshold in
`max` mode or best - threshold in `min` mode. Default: 'rel'.
cooldown (int): Number of epochs to wait before resuming
normal operation after lr has been reduced. Default: 0.
min_lr (float or list): A scalar or a list of scalars. A
lower bound on the learning rate of all param groups
or each group respectively. Default: 0.
eps (float): Minimal decay applied to lr. If the difference
between new and old lr is smaller than eps, the update is
ignored. Default: 1e-8.
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = ReduceLROnPlateau(optimizer, 'min')
>>> for epoch in range(10):
>>> train(...)
>>> val_loss = validate(...)
>>> # Note that step should be called after validate()
>>> scheduler.step(val_loss)
"""
def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
verbose=False, threshold=1e-4, threshold_mode='rel',
cooldown=0, min_lr=0, eps=1e-8):
if factor >= 1.0:
raise ValueError('Factor should be < 1.0.')
self.factor = factor
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if isinstance(min_lr, list) or isinstance(min_lr, tuple):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError("expected {} min_lrs, got {}".format(
len(optimizer.param_groups), len(min_lr)))
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
self.best = None
self.num_bad_epochs = None
self.mode_worse = None # the worse value for the chosen mode
self.is_better = None
self.eps = eps
self.last_epoch = -1
self._init_is_better(mode=mode, threshold=threshold,
threshold_mode=threshold_mode)
self._reset()
def _reset(self):
"""Resets num_bad_epochs counter and cooldown counter."""
self.best = self.mode_worse
self.cooldown_counter = 0
self.num_bad_epochs = 0
def step(self, metrics, epoch=None):
# convert `metrics` to float, in case it's a zero-dim Tensor
current = float(metrics)
if epoch is None:
epoch = self.last_epoch = self.last_epoch + 1
self.last_epoch = epoch
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self._reduce_lr(epoch)
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
def _reduce_lr(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group['lr'] = new_lr
if self.verbose:
print('Epoch {:5d}: reducing learning rate'
' of group {} to {:.4e}.'.format(epoch, i, new_lr))
@property
def in_cooldown(self):
return self.cooldown_counter > 0
def _cmp(self, mode, threshold_mode, threshold, a, best):
if mode == 'min' and threshold_mode == 'rel':
rel_epsilon = 1. - threshold
return a < best * rel_epsilon
elif mode == 'min' and threshold_mode == 'abs':
return a < best - threshold
elif mode == 'max' and threshold_mode == 'rel':
rel_epsilon = threshold + 1.
return a > best * rel_epsilon
else: # mode == 'max' and epsilon_mode == 'abs':
return a > best + threshold
def _init_is_better(self, mode, threshold, threshold_mode):
if mode not in {'min', 'max'}:
raise ValueError('mode ' + mode + ' is unknown!')
if threshold_mode not in {'rel', 'abs'}:
raise ValueError('threshold mode ' + threshold_mode + ' is unknown!')
if mode == 'min':
self.mode_worse = inf
else: # mode == 'max':
self.mode_worse = -inf
self.is_better = partial(self._cmp, mode, threshold_mode, threshold)
def state_dict(self):
return {key: value for key, value in self.__dict__.items() if key not in {'optimizer', 'is_better'}}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode)
class CyclicLR(_LRScheduler):
"""Sets the learning rate of each parameter group according to
cyclical learning rate policy (CLR). The policy cycles the learning
rate between two boundaries with a constant frequency, as detailed in
the paper `Cyclical Learning Rates for Training Neural Networks`_.
The distance between the two boundaries can be scaled on a per-iteration
or per-cycle basis.
Cyclical learning rate policy changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
This class has three built-in policies, as put forth in the paper:
"triangular":
A basic triangular cycle w/ no amplitude scaling.
"triangular2":
A basic triangular cycle that scales initial amplitude by half each cycle.
"exp_range":
A cycle that scales initial amplitude by gamma**(cycle iterations) at each
cycle iteration.
This implementation was adapted from the github repo: `bckenstler/CLR`_
Args:
optimizer (Optimizer): Wrapped optimizer.
base_lr (float or list): Initial learning rate which is the
lower boundary in the cycle for each parameter group.
max_lr (float or list): Upper learning rate boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_lr - base_lr).
The lr at any cycle is the sum of base_lr
and some scaling of the amplitude; therefore
max_lr may not actually be reached depending on
scaling function.
step_size_up (int): Number of training iterations in the
increasing half of a cycle. Default: 2000
step_size_down (int): Number of training iterations in the
decreasing half of a cycle. If step_size_down is None,
it is set to step_size_up. Default: None
mode (str): One of {triangular, triangular2, exp_range}.
Values correspond to policies detailed above.
If scale_fn is not None, this argument is ignored.
Default: 'triangular'
gamma (float): Constant in 'exp_range' scaling function:
gamma**(cycle iterations)
Default: 1.0
scale_fn (function): Custom scaling policy defined by a single
argument lambda function, where
0 <= scale_fn(x) <= 1 for all x >= 0.
If specified, then 'mode' is ignored.
Default: None
scale_mode (str): {'cycle', 'iterations'}.
Defines whether scale_fn is evaluated on
cycle number or cycle iterations (training
iterations since start of cycle).
Default: 'cycle'
cycle_momentum (bool): If ``True``, momentum is cycled inversely
to learning rate between 'base_momentum' and 'max_momentum'.
Default: True
base_momentum (float or list): Lower momentum boundaries in the cycle
for each parameter group. Note that momentum is cycled inversely
to learning rate; at the peak of a cycle, momentum is
'base_momentum' and learning rate is 'max_lr'.
Default: 0.8
max_momentum (float or list): Upper momentum boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_momentum - base_momentum).
The momentum at any cycle is the difference of max_momentum
and some scaling of the amplitude; therefore
base_momentum may not actually be reached depending on
scaling function. Note that momentum is cycled inversely
to learning rate; at the start of a cycle, momentum is 'max_momentum'
and learning rate is 'base_lr'
Default: 0.9
last_epoch (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_epoch=-1, the schedule is started from the beginning.
Default: -1
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1)
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
.. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186
.. _bckenstler/CLR: https://github.com/bckenstler/CLR
"""
def __init__(self,
optimizer,
base_lr,
max_lr,
step_size_up=2000,
step_size_down=None,
mode='triangular',
gamma=1.,
scale_fn=None,
scale_mode='cycle',
cycle_momentum=True,
base_momentum=0.8,
max_momentum=0.9,
last_epoch=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
base_lrs = self._format_param('base_lr', optimizer, base_lr)
if last_epoch == -1:
for lr, group in zip(base_lrs, optimizer.param_groups):
group['lr'] = lr
self.max_lrs = self._format_param('max_lr', optimizer, max_lr)
step_size_up = float(step_size_up)
step_size_down = float(step_size_down) if step_size_down is not None else step_size_up
self.total_size = step_size_up + step_size_down
self.step_ratio = step_size_up / self.total_size
if mode not in ['triangular', 'triangular2', 'exp_range'] \
and scale_fn is None:
raise ValueError('mode is invalid and scale_fn is None')
self.mode = mode
self.gamma = gamma
if scale_fn is None:
if self.mode == 'triangular':
self.scale_fn = self._triangular_scale_fn
self.scale_mode = 'cycle'
elif self.mode == 'triangular2':
self.scale_fn = self._triangular2_scale_fn
self.scale_mode = 'cycle'
elif self.mode == 'exp_range':
self.scale_fn = self._exp_range_scale_fn
self.scale_mode = 'iterations'
else:
self.scale_fn = scale_fn
self.scale_mode = scale_mode
self.cycle_momentum = cycle_momentum
if cycle_momentum:
if 'momentum' not in optimizer.defaults:
raise ValueError('optimizer must support momentum with `cycle_momentum` option enabled')
base_momentums = self._format_param('base_momentum', optimizer, base_momentum)
if last_epoch == -1:
for momentum, group in zip(base_momentums, optimizer.param_groups):
group['momentum'] = momentum
self.base_momentums = list(map(lambda group: group['momentum'], optimizer.param_groups))
self.max_momentums = self._format_param('max_momentum', optimizer, max_momentum)
super(CyclicLR, self).__init__(optimizer, last_epoch)
def _format_param(self, name, optimizer, param):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
raise ValueError("expected {} values for {}, got {}".format(
len(optimizer.param_groups), name, len(param)))
return param
else:
return [param] * len(optimizer.param_groups)
def _triangular_scale_fn(self, x):
return 1.
def _triangular2_scale_fn(self, x):
return 1 / (2. ** (x - 1))
def _exp_range_scale_fn(self, x):
return self.gamma**(x)
def get_lr(self):
"""Calculates the learning rate at batch index. This function treats
`self.last_epoch` as the last batch index.
If `self.cycle_momentum` is ``True``, this function has a side effect of
updating the optimizer's momentum.
"""
cycle = math.floor(1 + self.last_epoch / self.total_size)
x = 1. + self.last_epoch / self.total_size - cycle
if x <= self.step_ratio:
scale_factor = x / self.step_ratio
else:
scale_factor = (x - 1) / (self.step_ratio - 1)
lrs = []
for base_lr, max_lr in zip(self.base_lrs, self.max_lrs):
base_height = (max_lr - base_lr) * scale_factor
if self.scale_mode == 'cycle':
lr = base_lr + base_height * self.scale_fn(cycle)
else:
lr = base_lr + base_height * self.scale_fn(self.last_epoch)
lrs.append(lr)
if self.cycle_momentum:
momentums = []
for base_momentum, max_momentum in zip(self.base_momentums, self.max_momentums):
base_height = (max_momentum - base_momentum) * scale_factor
if self.scale_mode == 'cycle':
momentum = max_momentum - base_height * self.scale_fn(cycle)
else:
momentum = max_momentum - base_height * self.scale_fn(self.last_epoch)
momentums.append(momentum)
for param_group, momentum in zip(self.optimizer.param_groups, momentums):
param_group['momentum'] = momentum
return lrs
class CosineAnnealingWarmRestarts(_LRScheduler):
r"""Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}`
is the number of epochs since the last restart and :math:`T_{i}` is the number
of epochs between two warm restarts in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{i}}\pi))
When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`.
When :math:`T_{cur}=0`(after restart), set :math:`\eta_t=\eta_{max}`.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_0 (int): Number of iterations for the first restart.
T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1.
eta_min (float, optional): Minimum learning rate. Default: 0.
last_epoch (int, optional): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1):
if T_0 <= 0 or not isinstance(T_0, int):
raise ValueError("Expected positive integer T_0, but got {}".format(T_0))
if T_mult < 1 or not isinstance(T_mult, int):
raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult))
self.T_0 = T_0
self.T_i = T_0
self.T_mult = T_mult
self.eta_min = eta_min
super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch)
self.T_cur = last_epoch
def get_lr(self):
return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2
for base_lr in self.base_lrs]
def step(self, epoch=None):
"""Step could be called after every batch update
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> iters = len(dataloader)
>>> for epoch in range(20):
>>> for i, sample in enumerate(dataloader):
>>> inputs, labels = sample['inputs'], sample['labels']
>>> scheduler.step(epoch + i / iters)
>>> optimizer.zero_grad()
>>> outputs = net(inputs)
>>> loss = criterion(outputs, labels)
>>> loss.backward()
>>> optimizer.step()
This function can be called in an interleaved way.
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> for epoch in range(20):
>>> scheduler.step()
>>> scheduler.step(26)
>>> scheduler.step() # scheduler.step(27), instead of scheduler(20)
"""
if epoch is None:
epoch = self.last_epoch + 1
self.T_cur = self.T_cur + 1
if self.T_cur >= self.T_i:
self.T_cur = self.T_cur - self.T_i
self.T_i = self.T_i * self.T_mult
else:
if epoch < 0:
raise ValueError("Expected non-negative epoch, but got {}".format(epoch))
if epoch >= self.T_0:
if self.T_mult == 1:
self.T_cur = epoch % self.T_0
else:
n = int(math.log((epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult))
self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1)
self.T_i = self.T_0 * self.T_mult ** (n)
else:
self.T_i = self.T_0
self.T_cur = epoch
self.last_epoch = math.floor(epoch)
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
| [
"[email protected]"
] | |
35d2c662fbb29329f46cc039d12df65fcc2e402e | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_337/ch78_2020_04_12_01_52_05_952994.py | 618d2e41901705a68a61d1e4af17bec9060603e5 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | nome = input('Nome?')
atletas = {}
i = 0
while nome != 'sair':
a = float(input('aceleração?'))
atletas[nome] = a
nome = input('Nome?')
if i == 0:
vence = a
i+= 1
valores = atletas.values()
dic = {}
for i in valores:
tempo = (200/i)**(1/2)
for k in atletas.keys():
if atletas[k] == i:
dic[k] = tempo
valor = dic.values()
for e in valor:
if e < vence:
vence = e
for q in atletas:
if atletas[q] == e:
atleta = q | [
"[email protected]"
] | |
6da4091fd8ac8ad313d1b7259bd84c569f0a6e08 | 920f0fbb7064f2017ff62da372eaf79ddcc9035b | /lc_ladder/company/amzn/oa/Search_A_2D_Matrix.py | 617d0c9cea04baca4bd662f4ceb49c9887b4a548 | [] | no_license | JenZhen/LC | b29a1c45d8c905680c7b4ad0017516b3dca80cc4 | 85219de95e41551fce5af816b66643495fe51e01 | refs/heads/master | 2021-06-03T10:03:02.901376 | 2020-08-05T19:44:48 | 2020-08-05T19:44:48 | 104,683,578 | 3 | 1 | null | 2020-08-05T19:44:50 | 2017-09-24T23:30:35 | Python | UTF-8 | Python | false | false | 1,980 | py | #! /usr/local/bin/python3
# Requirement
# Example
# 写出一个高效的算法来搜索 m × n矩阵中的值。
#
# 这个矩阵具有以下特性:
#
# 每行中的整数从左到右是排序的。
# 每行的第一个数大于上一行的最后一个整数。
# 样例
# 考虑下列矩阵:
#
# [
# [1, 3, 5, 7],
# [10, 11, 16, 20],
# [23, 30, 34, 50]
# ]
# 给出 target = 3,返回 true
#
# 挑战
# O(log(n) + log(m)) 时间复杂度
"""
Algo: Binary Search
D.S.:
Solution:
Time: O(log(n) + log(m))
Corner cases:
"""
class Solution:
"""
@param matrix: matrix, a list of lists of integers
@param target: An integer
@return: a boolean, indicate whether matrix contains target
"""
def searchMatrix(self, matrix, target):
# write your code here
if not matrix or not matrix[0] or target is None:
return False
m, n = len(matrix), len(matrix[0])
# search which row target is
l, r = 0, m - 1
row, col = -1, -1
while l + 1 < r:
mid = l + (r - l) // 2
if matrix[mid][0] <= target <= matrix[mid][n - 1]:
row = mid
break
elif target < matrix[mid][0]:
r = mid - 1
else:
l = mid + 1
if matrix[l][0] <= target <= matrix[l][n - 1]:
row = l
if matrix[r][0] <= target <= matrix[r][n - 1]:
row = r
if row == -1:
return False
# serach which col target is
l, r = 0, n - 1
while l + 1 < r:
mid = l + (r - l) // 2
if matrix[row][mid] == target:
return True
elif target < matrix[row][mid]:
r = mid - 1
else:
l = mid + 1
if matrix[row][l] == target or matrix[row][r] == target:
return True
return False
# Test Cases
if __name__ == "__main__":
solution = Solution()
| [
"[email protected]"
] | |
261532ca2c00510c5ce9c8dbe6e2c6d3433977c6 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/HH3C-MP-MIB.py | 7952f4768e11aeef8d12a312c4032a70c9ce1d05 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 8,080 | py | #
# PySNMP MIB module HH3C-MP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-MP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:15:35 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
hh3cRhw, = mibBuilder.importSymbols("HH3C-OID-MIB", "hh3cRhw")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
NotificationType, Gauge32, TimeTicks, Counter64, ObjectIdentity, IpAddress, Integer32, Bits, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Unsigned32, MibIdentifier, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Gauge32", "TimeTicks", "Counter64", "ObjectIdentity", "IpAddress", "Integer32", "Bits", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Unsigned32", "MibIdentifier", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hh3cMultilinkPPP = ModuleIdentity((1, 3, 6, 1, 4, 1, 25506, 8, 33))
if mibBuilder.loadTexts: hh3cMultilinkPPP.setLastUpdated('200405180000Z')
if mibBuilder.loadTexts: hh3cMultilinkPPP.setOrganization('Hangzhou H3C Tech. Co., Ltd.')
hh3cMpObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1))
hh3cMpMultilinkTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1), )
if mibBuilder.loadTexts: hh3cMpMultilinkTable.setStatus('current')
hh3cMpMultilinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hh3cMpMultilinkEntry.setStatus('current')
hh3cMpMultilinkDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpMultilinkDescr.setStatus('current')
hh3cMpBundleName = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpBundleName.setStatus('current')
hh3cMpBundledSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpBundledSlot.setStatus('current')
hh3cMpBundledMemberCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpBundledMemberCnt.setStatus('current')
hh3cMpLostFragments = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpLostFragments.setStatus('current')
hh3cMpReorderedPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpReorderedPkts.setStatus('current')
hh3cMpUnassignedPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpUnassignedPkts.setStatus('current')
hh3cMpInterleavedPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpInterleavedPkts.setStatus('current')
hh3cMpRcvdSequence = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpRcvdSequence.setStatus('current')
hh3cMpSentSequence = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpSentSequence.setStatus('current')
hh3cMpMemberlinkTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2), )
if mibBuilder.loadTexts: hh3cMpMemberlinkTable.setStatus('current')
hh3cMpMemberlinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HH3C-MP-MIB", "hh3cMpMemberlinkSeqNumber"))
if mibBuilder.loadTexts: hh3cMpMemberlinkEntry.setStatus('current')
hh3cMpMemberlinkSeqNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpMemberlinkSeqNumber.setStatus('current')
hh3cMpMemberlinkIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpMemberlinkIfIndex.setStatus('current')
hh3cMpMemberlinkDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpMemberlinkDescr.setStatus('current')
hh3cMpMemberlinkMpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 8, 33, 1, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cMpMemberlinkMpStatus.setStatus('current')
hh3cMpNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 33, 2))
hh3cMpConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3))
hh3cMpCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3, 1))
hh3cMpCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3, 1, 1)).setObjects(("HH3C-MP-MIB", "hh3cMpMandatoryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hh3cMpCompliance = hh3cMpCompliance.setStatus('current')
hh3cMpGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3, 2))
hh3cMpMandatoryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3, 2, 1)).setObjects(("HH3C-MP-MIB", "hh3cMpBundledMemberCnt"), ("HH3C-MP-MIB", "hh3cMpMemberlinkSeqNumber"), ("HH3C-MP-MIB", "hh3cMpMemberlinkIfIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hh3cMpMandatoryGroup = hh3cMpMandatoryGroup.setStatus('current')
hh3cMpInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 25506, 8, 33, 3, 2, 2)).setObjects(("HH3C-MP-MIB", "hh3cMpMultilinkDescr"), ("HH3C-MP-MIB", "hh3cMpBundleName"), ("HH3C-MP-MIB", "hh3cMpBundledSlot"), ("HH3C-MP-MIB", "hh3cMpBundledMemberCnt"), ("HH3C-MP-MIB", "hh3cMpLostFragments"), ("HH3C-MP-MIB", "hh3cMpReorderedPkts"), ("HH3C-MP-MIB", "hh3cMpUnassignedPkts"), ("HH3C-MP-MIB", "hh3cMpInterleavedPkts"), ("HH3C-MP-MIB", "hh3cMpRcvdSequence"), ("HH3C-MP-MIB", "hh3cMpSentSequence"), ("HH3C-MP-MIB", "hh3cMpMemberlinkDescr"), ("HH3C-MP-MIB", "hh3cMpMemberlinkMpStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hh3cMpInfoGroup = hh3cMpInfoGroup.setStatus('current')
mibBuilder.exportSymbols("HH3C-MP-MIB", hh3cMpMemberlinkTable=hh3cMpMemberlinkTable, hh3cMpNotifications=hh3cMpNotifications, hh3cMpBundleName=hh3cMpBundleName, hh3cMpMultilinkEntry=hh3cMpMultilinkEntry, hh3cMpInterleavedPkts=hh3cMpInterleavedPkts, hh3cMpCompliances=hh3cMpCompliances, hh3cMpMultilinkTable=hh3cMpMultilinkTable, hh3cMpMemberlinkDescr=hh3cMpMemberlinkDescr, hh3cMultilinkPPP=hh3cMultilinkPPP, hh3cMpBundledMemberCnt=hh3cMpBundledMemberCnt, hh3cMpObjects=hh3cMpObjects, hh3cMpUnassignedPkts=hh3cMpUnassignedPkts, hh3cMpMemberlinkSeqNumber=hh3cMpMemberlinkSeqNumber, hh3cMpMemberlinkEntry=hh3cMpMemberlinkEntry, hh3cMpCompliance=hh3cMpCompliance, hh3cMpSentSequence=hh3cMpSentSequence, hh3cMpMultilinkDescr=hh3cMpMultilinkDescr, hh3cMpLostFragments=hh3cMpLostFragments, hh3cMpMemberlinkIfIndex=hh3cMpMemberlinkIfIndex, hh3cMpConformance=hh3cMpConformance, hh3cMpRcvdSequence=hh3cMpRcvdSequence, hh3cMpMemberlinkMpStatus=hh3cMpMemberlinkMpStatus, PYSNMP_MODULE_ID=hh3cMultilinkPPP, hh3cMpGroups=hh3cMpGroups, hh3cMpInfoGroup=hh3cMpInfoGroup, hh3cMpMandatoryGroup=hh3cMpMandatoryGroup, hh3cMpBundledSlot=hh3cMpBundledSlot, hh3cMpReorderedPkts=hh3cMpReorderedPkts)
| [
"[email protected]"
] | |
8fccf7816b9470fb84f1a4004f158f26b3e9010d | fa165cdb86defd6d6131ac763c38d6875c4ebec8 | /manage.py | e472a9b3db0e0dfb7893cce4f0d989c49fb87545 | [] | no_license | HongHanh120/generate-image-captcha | 3f93665393eae0df41c2bafdb2e02a1d9dd29da4 | 144aae5fed9e15c670d8eda9501e53d7dc252256 | refs/heads/main | 2023-05-16T05:41:52.215234 | 2021-06-01T19:18:17 | 2021-06-01T19:18:17 | 354,935,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'generate_captcha.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d7ca2662615309dffb8694f7333f7b13866cb74a | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/kcrt/A.py | 8a78979fdd4d2249fd6b2449edec65b0d5a74bda | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 371 | py | # -*- coding: utf-8 -*-
T = int(input())
for t in range(T):
N = int(input())
if N == 0:
result = "INSOMNIA"
else:
n = 0
digits_seen = set()
while len(digits_seen) != 10:
n += N
for x in str(n):
digits_seen.add(x)
result = str(n)
print("Case #" + str(t + 1) + ": " + result)
| [
"[[email protected]]"
] | |
497971334d48da8fa1c01f649fa7eedf0c06d2a2 | b83ac23819fd7ba998563f2ad870405bdd07cc2b | /experiments/util/util.py | 5911d151f027ca6f96b5121f25011c81af5f76ec | [
"MIT"
] | permissive | Limmen/gym-idsgame | 699abd2894bce15108f1606f5fb71f612dd7ba03 | d10830fef55308d383c98b41b34688a7fceae357 | refs/heads/master | 2023-09-01T17:32:16.768138 | 2023-08-22T12:00:53 | 2023-08-22T12:00:53 | 247,794,752 | 49 | 12 | MIT | 2021-04-21T07:50:06 | 2020-03-16T19:00:27 | Python | UTF-8 | Python | false | false | 3,889 | py | """
Utility functions for experiments with the idsgame-env
"""
import io
import json
import jsonpickle
import logging
import time
import argparse
import os
from gym_idsgame.config.client_config import ClientConfig
def create_artefact_dirs(output_dir: str, random_seed : int) -> None:
"""
Creates artefact directories if they do not already exist
:param output_dir: the base directory
:param random_seed: the random seed of the experiment
:return: None
"""
if not os.path.exists(output_dir + "/results/logs/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/logs/" + str(random_seed) + "/")
if not os.path.exists(output_dir + "/results/plots/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/plots/" + str(random_seed) + "/")
if not os.path.exists(output_dir + "/results/data/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/data/" + str(random_seed) + "/")
if not os.path.exists(output_dir + "/results/hyperparameters/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/hyperparameters/" + str(random_seed) + "/")
if not os.path.exists(output_dir + "/results/gifs/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/gifs/" + str(random_seed) + "/")
if not os.path.exists(output_dir + "/results/tensorboard/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/tensorboard/" + str(random_seed) + "/")
def setup_logger(name: str, logdir: str, time_str = None):
"""
Configures the logger for writing log-data of experiments
:param name: name of the logger
:param logdir: directory to save log files
:param time_str: time string for file names
:return: None
"""
# create formatter
formatter = logging.Formatter('%(asctime)s,%(message)s')
# log to console
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# log to file
if time_str is None:
time_str = str(time.time())
fh = logging.FileHandler(logdir + "/" + time_str + "_" + name + ".log", mode="w")
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
#logger.addHandler(ch)
return logger
def write_config_file(config: ClientConfig, path: str) -> None:
"""
Writes a config object to a config file
:param config: the config to write
:param path: the path to write the file
:return: None
"""
json_str = json.dumps(json.loads(jsonpickle.encode(config)), indent=4, sort_keys=True)
with io.open(path, 'w', encoding='utf-8') as f:
f.write(json_str)
def read_config(config_path) -> ClientConfig:
"""
Reads configuration of the experiment from a json file
:param config_path: the path to the configuration file
:return: the configuration
"""
with io.open(config_path, 'r', encoding='utf-8') as f:
json_str = f.read()
client_config: ClientConfig = jsonpickle.decode(json_str)
return client_config
def parse_args(default_config_path):
"""
Parses the commandline arguments with argparse
:param default_config_path: default path to config file
"""
parser = argparse.ArgumentParser(description='Parse flags to configure the json parsing')
parser.add_argument("-cp", "--configpath", help="Path to configuration file",
default=default_config_path, type=str)
parser.add_argument("-po", "--plotonly", help="Boolean parameter, if true, only plot",
action="store_true")
parser.add_argument("-nc", "--noconfig", help="Boolean parameter, if true always override config",
action="store_true")
args = parser.parse_args()
return args | [
"[email protected]"
] | |
c83e2817d9a860680a5a11fdeadfb2a9d45c9037 | 1793aac7856809ed8e121955056154de50a2ae8f | /c07_pycon_tw/p14_digit_stack.py | 51b37d4dba55ac1dce6921328221a67bb068261c | [] | no_license | ZpRoc/checkio | fe4af88f116f75f8197cd31d857ae5262615b6af | 126647f8971732bdf13d49092df178654dee889b | refs/heads/main | 2023-03-22T23:52:37.330312 | 2021-03-10T02:28:56 | 2021-03-10T02:28:56 | 335,871,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | # ---------------------------------------------------------------- #
# Digit Stack
# Take the one off the top of the pile!
# (Numbers)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
| [
"[email protected]"
] | |
5f477bed338ceb2d6e833ae8f6b79e32aa1680ee | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_sneers.py | a803280a36259a958b364c4e672bcfc2c97f2596 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.nouns._sneer import _SNEER
#calss header
class _SNEERS(_SNEER, ):
def __init__(self,):
_SNEER.__init__(self)
self.name = "SNEERS"
self.specie = 'nouns'
self.basic = "sneer"
self.jsondata = {}
| [
"[email protected]"
] | |
291ab02ed979348e94062f8dfa48aa3b87c29f40 | af5c8d742226965ef73cf761782f0825fb374b7c | /string_format/parcela.py | 21d6fe1122398071d26d21ef974fc71e743cdae5 | [] | no_license | Zahidsqldba07/PythonExamples-1 | 002e99d2581c05bfb8a1766caff12991f55e11bb | a9f5bbc58cc6941a73c537b3a22d812cc9081785 | refs/heads/master | 2023-03-16T08:40:43.781000 | 2019-01-11T04:33:03 | 2019-01-11T04:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | # coding: utf-8
# Passagem Aérea
# (c) Héricles Emanuel, UFCG, Programação 1
milhas = float(raw_input())
aliquota = float(raw_input())
valor_passagem = (milhas * aliquota) + 51.00
print "Valor da passagem: R$ %.2f" % valor_passagem
#Formas de Pagamento
# À vista
pag_vista = valor_passagem * 0.75
# Em 6 parcelas
pag_6 = valor_passagem * 0.95
# Em 10 parcelas
pag_10 = valor_passagem
# Valor das parcelas
# 6 Parcelas
parcela_6 = pag_6 / 6
# 10 Parcelas
parcela_10 = pag_10 / 10
print "\nPagamento:"
print "1. À vista. R$ %.2f" % pag_vista
print "2. Em 6 parcelas. Total: R$ %.2f" % pag_6
print " 6 x R$ %.2f" % parcela_6
print "3. Em 10 parcelas. Total: R$ %.2f" % pag_10
print " 10 x R$ %.2f" % parcela_10 | [
"[email protected]"
] | |
2facbb1ce7570c8227c25a19f561d45f47185148 | 8ef5a09d76a11c56963f18e6a08474a1a8bafe3c | /leet_code/1446. Consecutive Characters.py | 9379134d01e057544f21fef8f3b8eec1ee75c106 | [] | no_license | roiei/algo | 32c4677649c7666db148f6183fbfbf66c8b1969f | ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec | refs/heads/master | 2022-04-01T19:21:27.768675 | 2022-02-19T06:15:29 | 2022-02-19T06:15:29 | 169,021,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py |
import time
from util.util_list import *
from util.util_tree import *
import copy
import heapq
import bisect
import collections
class Solution:
def maxPower(self, s: str) -> int:
if not s:
return 0
cur = s[0]
mx = cnt = 1
for i in range(1, len(s)):
if s[i] == cur:
cnt += 1
mx = max(cnt, mx)
continue
cnt = 1
cur = s[i]
return mx
stime = time.time()
print(2 == Solution().maxPower(s = "leetcode"))
print('elapse time: {} sec'.format(time.time() - stime)) | [
"[email protected]"
] | |
2f1a3d7401b312b6b3dbbeef3cacb792e34eb756 | 206e54f4ad23386a08b634dbf1c5bc691ef76390 | /build/scripts-3.6/4scanner | 80627675ba7ac052fa3161208bd2848b3dfb8f4e | [
"MIT"
] | permissive | Python3pkg/4scanner | f604cd0e265ec96f39116270c5dedb67f4d9b7d1 | 7e647081fd6a4fb7baff1a5fd11e2cdc3d22bd20 | refs/heads/master | 2021-01-21T09:14:32.962898 | 2017-05-18T19:38:38 | 2017-05-18T19:38:38 | 91,650,218 | 0 | 0 | null | 2017-05-18T04:56:17 | 2017-05-18T04:56:17 | null | UTF-8 | Python | false | false | 1,866 | #!/usr/local/opt/python3/bin/python3.6
import argparse
import os
import scanner
import time
def main():
# Arguments parsing and validation
parser = argparse.ArgumentParser()
parser.add_argument("keywords_file",
help="file with the keywords to search for")
parser.add_argument("-o", "--output", help="Specify output folder")
parser.add_argument("-w", "--wait-time",
help="Time to wait between each scan in minutes. "
"Default is 5 minutes")
parser.add_argument("-q", "--quota",
help="Exit when specified size quota "
"is exceeded. Ex: 500MB, 30GB etc...")
args = parser.parse_args()
# Checking keywords file
if not os.path.isfile(args.keywords_file):
print("Keywords file does not exist...")
exit(1)
if args.output:
output = args.output
if not os.path.exists(output):
print("{0} Does not exist.".format(output))
exit(1)
else:
output = os.getcwd()
# Checking for quota
if args.quota:
if "gb" in args.quota.lower():
quota_mb = args.quota.lower().split('g')[0] * 1000
elif "mb" in args.quota.lower():
quota_mb = args.quota.lower().split('m')[0]
else:
print("Quota format invalid. Valid example: 20GB, 700MB etc...")
exit(1)
else:
quota_mb = False
# Checking for sleep time
if args.wait_time:
wait_time = args.wait_time * 60
else:
wait_time = 300
log_file = "downloaded-{0}.txt".format(time.strftime('%d%m%Y_%H%M'))
scanner.scan(args.keywords_file, output, log_file, quota_mb, wait_time)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| [
"[email protected]"
] | ||
c20ef58d20f34d8cb7e2c6df7c4607975c74d86e | 2dadf0fb64c52655704d9231b8039fc0343c5e88 | /py_wake/tests/test_blockage_models/test_selfsimilarity.py | f7695f796c9dfe4810417efb6ead3f6cf6b334b5 | [
"MIT"
] | permissive | luqidndx/PyWake | 45377245e8d35db3c7b33b27ef11b812db0a6ada | 3d046eb14c4597de49ac2fee3771b8e0e68820ad | refs/heads/master | 2022-07-02T01:40:28.575907 | 2020-05-13T16:51:42 | 2020-05-13T16:51:42 | 259,701,236 | 0 | 0 | MIT | 2020-04-28T17:06:59 | 2020-04-28T17:06:58 | null | UTF-8 | Python | false | false | 5,796 | py | import pytest
import matplotlib.pyplot as plt
import numpy as np
from py_wake.deficit_models import SelfSimilarityDeficit
from py_wake.deficit_models.no_wake import NoWakeDeficit
from py_wake.deficit_models.noj import NOJDeficit
from py_wake.examples.data import hornsrev1
from py_wake.examples.data.hornsrev1 import Hornsrev1Site
from py_wake.superposition_models import LinearSum
from py_wake.tests import npt
from py_wake.wind_farm_models.engineering_models import All2AllIterative
@pytest.fixture(scope='module')
def setup():
site = Hornsrev1Site()
windTurbines = hornsrev1.HornsrevV80()
ss = SelfSimilarityDeficit()
return site, windTurbines, ss
def test_selfsimilarity_reference_figures(setup):
ss = setup[2]
ws = 10
D = 80
R = D / 2
WS_ilk = np.array([[[ws]]])
D_src_il = np.array([[D]])
ct_ilk = np.array([[[.8]]])
x1, y1 = -np.arange(200), np.array([0])
deficit_centerline = ss.calc_deficit(WS_ilk=WS_ilk, D_src_il=D_src_il,
dw_ijlk=x1.reshape((1, len(x1), 1, 1)),
cw_ijlk=y1.reshape((1, len(y1), 1, 1)), ct_ilk=ct_ilk)[0, :, 0, 0]
x2, y2 = np.array([-2 * R]), np.arange(200)
deficit_radial = ss.calc_deficit(WS_ilk=WS_ilk, D_src_il=D_src_il,
dw_ijlk=x2.reshape((1, len(x2), 1, 1)),
cw_ijlk=y2.reshape((1, len(y2), 1, 1)), ct_ilk=ct_ilk)[0, :, 0, 0]
r12 = np.sqrt(ss.lambda_ * (ss.eta + (x2 / R) ** 2)) # Eq. (13) from [1]
if 0:
plt.title('Fig 11 from [1]')
plt.xlabel('x/R')
plt.ylabel('a')
plt.plot(x1 / R, deficit_centerline / ws)
print(list(np.round(deficit_centerline[::20], 6)))
plt.figure()
plt.title('Fig 10 from [1]')
print(list(np.round(deficit_radial[::20] / deficit_radial[0], 6)))
plt.xlabel('y/R12 (epsilon)')
plt.ylabel('f')
plt.plot((y2 / R) / r12, deficit_radial / deficit_radial[0])
plt.show()
fig11_ref = np.array([[-0.025, -1, -2, -3, -4, -5], [0.318, 0.096, 0.035, 0.017, 0.010, 0.0071]]).T
npt.assert_array_almost_equal(np.interp(-fig11_ref[:, 0], -x1 / R, deficit_centerline / ws), fig11_ref[:, 1], 1)
npt.assert_array_almost_equal(deficit_centerline[::20], [0, 1.806478, 0.95716, 0.548851, 0.345007,
0.233735, 0.1677, 0.125738, 0.097573, 0.077819])
fig10_ref = np.array([[0, 1, 2, 3], [1, .5, .15, .045]]).T
npt.assert_array_almost_equal(np.interp(fig10_ref[:, 0], (y2 / R) / r12, deficit_radial / deficit_radial[0]),
fig10_ref[:, 1], 1)
npt.assert_array_almost_equal(deficit_radial[::20] / deficit_radial[0],
[1.0, 0.933011, 0.772123, 0.589765, 0.430823, 0.307779,
0.217575, 0.153065, 0.107446, 0.075348])
def test_blockage_map(setup):
site, windTurbines, ss = setup
wm = All2AllIterative(site, windTurbines, wake_deficitModel=NoWakeDeficit(),
superpositionModel=LinearSum(), blockage_deficitModel=ss)
flow_map = wm(x=[0], y=[0], wd=[270], ws=[10]).flow_map()
X_j, Y_j = flow_map.XY
WS_eff = flow_map.WS_eff_xylk[:, :, 0, 0]
if 0:
plt.contourf(X_j, Y_j, WS_eff)
plt.plot(X_j[200, ::50], Y_j[200, ::50], '.-')
plt.plot(X_j[250, ::50], Y_j[250, ::50], '.-')
print(list(np.round(WS_eff[200, ::50], 6)))
print(list(np.round(WS_eff[250, ::50], 6)))
ss.windTurbines.plot([0], [0], wd=[270])
plt.show()
npt.assert_array_almost_equal(WS_eff[200, ::50], [9.940967, 9.911659, 9.855934,
9.736016, 9.44199, 10.0, 10.0, 10.0, 10.0, 10.0])
npt.assert_array_almost_equal(WS_eff[250, ::50], [9.937601, 9.90397, 9.834701,
9.659045, 9.049764, 10.0, 10.0, 10.0, 10.0, 10.0])
def test_wake_and_blockage(setup):
site, windTurbines, ss = setup
noj_ss = All2AllIterative(site, windTurbines, wake_deficitModel=NOJDeficit(),
blockage_deficitModel=ss, superpositionModel=LinearSum())
flow_map = noj_ss(x=[0], y=[0], wd=[270], ws=[10]).flow_map()
X_j, Y_j = flow_map.XY
WS_eff = flow_map.WS_eff_xylk[:, :, 0, 0]
npt.assert_array_almost_equal(WS_eff[200, ::50], [9.940967, 9.911659, 9.855934, 9.736016, 9.44199, 4.560631,
5.505472, 6.223921, 6.782925, 7.226399])
npt.assert_array_almost_equal(WS_eff[250, ::50], [9.937601, 9.90397, 9.834701, 9.659045, 9.049764, 4.560631,
5.505472, 6.223921, 6.782925, 7.226399])
if 0:
plt.contourf(X_j, Y_j, WS_eff)
plt.plot(X_j[200, ::50], Y_j[200, ::50], '.-')
plt.plot(X_j[250, ::50], Y_j[250, ::50], '.-')
print(list(np.round(WS_eff[200, ::50], 6)))
print(list(np.round(WS_eff[250, ::50], 6)))
ss.windTurbines.plot([0], [0], wd=[270])
plt.show()
def test_aep_two_turbines(setup):
site, windTurbines, ss = setup
nwm_ss = All2AllIterative(site, windTurbines, wake_deficitModel=NoWakeDeficit(),
blockage_deficitModel=ss, superpositionModel=LinearSum())
sim_res = nwm_ss(x=[0, 80 * 3], y=[0, 0])
aep_no_blockage = sim_res.aep_ilk(with_wake_loss=False).sum(2)
aep = sim_res.aep_ilk().sum(2)
# blockage reduce aep(wd=270) by .5%
npt.assert_almost_equal((aep_no_blockage[0, 270] - aep[0, 270]) / aep_no_blockage[0, 270] * 100, 0.4896853)
if 0:
plt.plot(sim_res.WS_eff_ilk[:, :, 7].T)
plt.show()
| [
"[email protected]"
] | |
fedd53070c0b4a1d64e61591d4c69662132cb765 | 7b55cfc4ffa7678e4c7b8f2312831ebbd549e54f | /proj1/tests/other-tests/oskis-angels_tests/regexperts_tests/test-proj1 | afb5b61b6d7e62ec1bf967ca1639a3a68c36a002 | [] | no_license | czchen1/cs164-projects | 0d330efef85421e611a436b165428ba0ddfb3512 | a04cafbcaafd32e518227dacf89a6d7837bf9f57 | refs/heads/master | 2020-03-27T04:03:31.727524 | 2018-08-23T21:43:46 | 2018-08-23T21:43:46 | 145,909,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,436 | #!/usr/bin/env python3
# Test compiler against suite of Python programs. Requires that pyunparse be
# on the path.
import os, sys, re
from subprocess import Popen, PIPE
from getopt import getopt, GetoptError
def Usage():
print('''
Usage: test-proj1 [ --errors ] [ --compiler=COMPILER ] [ --retain ] \
[ --runtime=RUNTIME.py ] DIR/BASE.py ...
Runs COMPILER (default ./apyc) on each DIR/BASE.py with the command
COMPILER --phase=1 -o BASE.ast PROGRAM-FILE
In the absence of the --errors option, unparses the result into
a Python program BASE-2.py. If there is a file BASE.pre, prefixes that
to BASE-2.py; otherwise, if there is a file RUNTIME.py specified,
prefixes that. Then runs python on BASE-2.py.
If there is a file DIR/BASE.in, uses that as the standard input.
Otherwise, uses the empty file for the standard input. Compares
the output to file DIR/BASE.std, if that is present, and otherwise
just checks that the python interpreter exits normally. Retains ASTs
if --retain.
With the --errors option, checks that the compiler exits with a
non-zero exit code and that stderr contains at least one error message
in the correct format. Does not unparse the resulting AST or execute
the Python interpreter.
Reports results and exits with 0 if there are no errors, and
with 1 otherwise.''', file=sys.stderr)
def Run(command, *args, **keys):
if args:
command = command % args
proc = Popen (command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(keys.get ('stdin', ''))
return proc.returncode, out.decode('utf-8'), err.decode('utf-8')
def Remove(file):
try:
os.unlink(file)
except OSError:
pass
def Contents(file):
try:
f = open(file)
r = f.read()
f.close()
return r
except:
return ""
def Compile(prog, ast):
global Stdout, Stderr
code, Stdout, Stderr = Run("%s --phase=1 -o %s %s", compiler, ast, prog)
return code == 0
def Unparse(ast, python, prefix=""):
code, out, err = Run("pyunparse --remove-extensions %s", ast)
if code != 0:
return False
outfile = open(python, "w")
outfile.write(prefix + out)
outfile.close()
return True
def Execute(prog, inp):
global Stdout, Stderr
code, Stdout, Stderr = Run("python %s", prog, stdin=inp.encode('utf-8'))
return code == 0
def HasError(errout):
return re.search (r'(?m)^[^:]+\.py:\d+:\s*\S', errout)
try:
opts, tests = getopt (sys.argv[1:], 'h',
['help', 'retain', 'errors', 'runtime=', 'compiler='])
except GetoptError:
Usage()
sys.exit(1)
compiler = './apyc'
errors = False
retain = False
runtime = ''
for opt, val in opts:
if opt in ( '-h', '--help' ):
Usage()
sys.exit(0)
elif opt == '--errors':
errors = True
elif opt == '--compiler':
compiler = val
elif opt == '--retain':
retain = True
elif opt == '--runtime':
runtime = Contents(val)
N = 0
OK = 0
for f in tests:
N += 1
dir, file = os.path.split(f)
base, ext = os.path.splitext(file)
print(base + ".py:", end=" ")
ast = base + ".ast"
if errors:
if Compile(f, ast):
msg = "FAIL (wrong exit code)"
elif HasError (Stderr):
msg = "OK"
else:
msg = "FAIL (bad error message)"
else:
prog2 = base + "-2.py"
inp = os.path.join(dir, base + ".in")
std = Contents (os.path.join(dir, base + ".std"))
prefix = Contents(os.path.join(dir, base + ".pre")) or runtime
if not Compile(f, ast):
msg = "FAIL (wrong exit code)"
elif Stderr:
msg = "FAIL (error messages)"
elif not Unparse(ast, prog2, prefix):
msg = "FAIL (bad AST)"
elif not Execute(prog2, Contents(inp)):
msg = "FAIL (execution error)"
elif Stderr:
msg = "FAIL (error output on execution)"
elif std and std != Stdout:
msg = "FAIL (wrong output)"
else:
msg = "OK"
Remove(prog2)
if not retain:
Remove(ast)
if msg == "OK":
OK += 1
print(msg)
print()
print("Ran %d tests." % N)
if OK == N:
print("All passed.")
sys.exit(0)
else:
print("%d failed." % (N - OK))
sys.exit(1)
| [
"[email protected]"
] | ||
e7e0c38178d388d789d1caec87893f7825006c74 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/L/lk_morgan/molecular_spectroscopy_data_3.py | e22a7abf2fdbca5d126d00da9a5629182b65adc7 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,992 | py | ###########################################################################################
# We use a ScraperWiki library called pdftoxml to scrape PDFs.
# This is an example of scraping a simple PDF.
###########################################################################################
import scraperwiki
import urllib2
import lxml.etree
import lxml.html
import re
htmlurl=scraperwiki.scrape("http://spec.jpl.nasa.gov/ftp/pub/catalog/catdir.html")
html = lxml.html.fromstring(htmlurl)
text_arr=[]
for el in html.cssselect("div[align='left'] a"):
text=el.text_content()
text_arr.append(text)
cat_list=[]
for k in text_arr:
if k != 'pdf' and k !='Tex': cat_list.append(k)
species_list=[]
for l in cat_list:
start = l.find('c')
end = l.find('.cat', start)
species_list.append(l[start+1:end])
for i in species_list:
#Skip the 055002 and 102002 files for now as they don't follow the regular format, need to fix this at some point
if i !='055002' and i !='102002':
print i
sp_url="http://spec.jpl.nasa.gov/ftp/pub/catalog/doc/d"+i+".pdf"
url = sp_url
pdfdata = urllib2.urlopen(url).read()
xmldata = scraperwiki.pdftoxml(pdfdata,'-hidden')
root = lxml.etree.fromstring(xmldata)
pages = list(root)
# this function has to work recursively because we might have "<b>Part1 <i>part 2</i></b>"
def gettext_with_bi_tags(el):
res = [ ]
if el.text:
res.append(el.text)
for lel in el:
res.append("<%s>" % lel.tag)
res.append(gettext_with_bi_tags(lel))
res.append("</%s>" % lel.tag)
if el.tail:
res.append(el.tail)
return "".join(res)
print gettext_with_bi_tags(pages[0])
row=[]
pagei=0
for page in list(pages):
pagei=pagei+1
eli=0
for el in list(page):
eli=eli+1
row.append(gettext_with_bi_tags(el))
try:
Species_Tagn=row.index('Species Tag:')
Species=row[Species_Tagn+1]
except:
Species='No Info'
try:
Namen = row.index('Name:')
if row[Namen+1] == row[Versionn-1]:Name=row[Namen+1]
else:Name=row[Namen+1]+row[Versionn-1]
except:
Name='No Info'
Versionn=row.index('Version:')
Daten=row.index('Date:')
Q300n=row.index('Q(300.0)=')
Q225n=row.index('Q(225.0)=')
Q150n=row.index('Q(150.0)=')
Q75n=row.index('Q(75.00)=')
Q37n=row.index('Q(37.50)=')
Q18n=row.index('Q(18.75)=')
Q9n=row.index('Q(9.375)=')
try:
mu_an=row.index('a')
except:
mu_an=row.index('0')
try:
mu_bn=row.index('b')
except:
mu_bn=row.index(u'\xb5 =')
try:
mu_cn=row.index('c')
except:
mu_cn=row.index('el')
maxJn=row.index('Max. J:')
An=row.index('A=')
Bn=row.index('B=')
Cn=row.index('C=')
State=row[Versionn+2:Daten]
statn=''
for j in State:
statn=statn+' '+j
if row[An+1] == u'\xb5' or row[An+1] == u'\xb5 =':A='no data'
else: A=row[An+1]
if row[Bn+1] == u'\xb5':B='no data'
else: B=row[Bn+1]
C_test=row[Cn+1]
if C_test != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':C='no data'
else:C=row[Cn+1]
if row[mu_an+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':mua='no data'
else: mua=row[mu_an+2]
if row[mu_bn+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':mub='no data'
else: mub=row[mu_cn+2]
if row[mu_cn+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':muc='no data'
else: muc=row[mu_cn+2]
scraperwiki.sqlite.save(unique_keys=["Species Tag"],data={"Molecule":Name,"State":statn,"Species Tag":Species,"Max J":row[maxJn+1],"mu a":mua,"mu b":mub,"mu c":muc,"A":A,"B":B,"C":C,"Q300":row[Q300n+1],"Q225":row[Q225n+1],"Q150":row[Q150n+1],"Q75":row[Q75n+1],"Q37":row[Q37n+1],"Q18":row[Q18n+1],"Q9":row[Q9n+1]})
###########################################################################################
# We use a ScraperWiki library called pdftoxml to scrape PDFs.
# This is an example of scraping a simple PDF.
###########################################################################################
import scraperwiki
import urllib2
import lxml.etree
import lxml.html
import re
htmlurl=scraperwiki.scrape("http://spec.jpl.nasa.gov/ftp/pub/catalog/catdir.html")
html = lxml.html.fromstring(htmlurl)
text_arr=[]
for el in html.cssselect("div[align='left'] a"):
text=el.text_content()
text_arr.append(text)
cat_list=[]
for k in text_arr:
if k != 'pdf' and k !='Tex': cat_list.append(k)
species_list=[]
for l in cat_list:
start = l.find('c')
end = l.find('.cat', start)
species_list.append(l[start+1:end])
for i in species_list:
#Skip the 055002 and 102002 files for now as they don't follow the regular format, need to fix this at some point
if i !='055002' and i !='102002':
print i
sp_url="http://spec.jpl.nasa.gov/ftp/pub/catalog/doc/d"+i+".pdf"
url = sp_url
pdfdata = urllib2.urlopen(url).read()
xmldata = scraperwiki.pdftoxml(pdfdata,'-hidden')
root = lxml.etree.fromstring(xmldata)
pages = list(root)
# this function has to work recursively because we might have "<b>Part1 <i>part 2</i></b>"
def gettext_with_bi_tags(el):
res = [ ]
if el.text:
res.append(el.text)
for lel in el:
res.append("<%s>" % lel.tag)
res.append(gettext_with_bi_tags(lel))
res.append("</%s>" % lel.tag)
if el.tail:
res.append(el.tail)
return "".join(res)
print gettext_with_bi_tags(pages[0])
row=[]
pagei=0
for page in list(pages):
pagei=pagei+1
eli=0
for el in list(page):
eli=eli+1
row.append(gettext_with_bi_tags(el))
try:
Species_Tagn=row.index('Species Tag:')
Species=row[Species_Tagn+1]
except:
Species='No Info'
try:
Namen = row.index('Name:')
if row[Namen+1] == row[Versionn-1]:Name=row[Namen+1]
else:Name=row[Namen+1]+row[Versionn-1]
except:
Name='No Info'
Versionn=row.index('Version:')
Daten=row.index('Date:')
Q300n=row.index('Q(300.0)=')
Q225n=row.index('Q(225.0)=')
Q150n=row.index('Q(150.0)=')
Q75n=row.index('Q(75.00)=')
Q37n=row.index('Q(37.50)=')
Q18n=row.index('Q(18.75)=')
Q9n=row.index('Q(9.375)=')
try:
mu_an=row.index('a')
except:
mu_an=row.index('0')
try:
mu_bn=row.index('b')
except:
mu_bn=row.index(u'\xb5 =')
try:
mu_cn=row.index('c')
except:
mu_cn=row.index('el')
maxJn=row.index('Max. J:')
An=row.index('A=')
Bn=row.index('B=')
Cn=row.index('C=')
State=row[Versionn+2:Daten]
statn=''
for j in State:
statn=statn+' '+j
if row[An+1] == u'\xb5' or row[An+1] == u'\xb5 =':A='no data'
else: A=row[An+1]
if row[Bn+1] == u'\xb5':B='no data'
else: B=row[Bn+1]
C_test=row[Cn+1]
if C_test != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':C='no data'
else:C=row[Cn+1]
if row[mu_an+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':mua='no data'
else: mua=row[mu_an+2]
if row[mu_bn+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':mub='no data'
else: mub=row[mu_cn+2]
if row[mu_cn+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':muc='no data'
else: muc=row[mu_cn+2]
scraperwiki.sqlite.save(unique_keys=["Species Tag"],data={"Molecule":Name,"State":statn,"Species Tag":Species,"Max J":row[maxJn+1],"mu a":mua,"mu b":mub,"mu c":muc,"A":A,"B":B,"C":C,"Q300":row[Q300n+1],"Q225":row[Q225n+1],"Q150":row[Q150n+1],"Q75":row[Q75n+1],"Q37":row[Q37n+1],"Q18":row[Q18n+1],"Q9":row[Q9n+1]})
| [
"[email protected]"
] | |
061e7dda33dcb2c2fb0db6cad3aee933e290bf59 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/coghq/LobbyManager.py | a7cc7848ebf5356ae4059739f695cbf473e47cec | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | from pandac.PandaModules import *
from toontown.toonbase import ToontownGlobals
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import TTLocalizer
class LobbyManager(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory("LobbyManager")
SetFactoryZoneMsg = "setFactoryZone"
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
def generate(self):
self.notify.debug("generate")
DistributedObject.DistributedObject.generate(self)
def disable(self):
self.notify.debug("disable")
self.ignoreAll()
DistributedObject.DistributedObject.disable(self)
def getSuitDoorOrigin(self):
return 1
def getBossLevel(self):
return 0
| [
"[email protected]"
] | |
237881d1efcacc8a677adc585e4d749afd015af4 | ebda2abceb39e2a85210f0f0b193d3221811d582 | /5ed sozdanie.py | 4960fa60c8c4903e3fae811136d6f612453db8a8 | [] | no_license | PashaKim/Python-dnd-backstory | 783e7604a185e594810a65430d1151db8388f8bb | d13876f3530b6dfca18a9638260a77d49990a401 | refs/heads/master | 2020-03-18T15:04:02.296576 | 2018-05-26T03:15:27 | 2018-05-26T03:15:27 | 134,885,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119,372 | py | # coding=utf-8
import numpy
import random
import sys
def dice(dicemin,dicemax,droll):
return numpy.random.randint(dicemin, dicemax+1, droll)
def story():
return dice(1, 20, 1)
# def Characteristics():
# Char = [dice(1, 6, 4) for x in range(0, 6)]
# Char = [sum(L[1:4], 0) for L in Char]
# Char.sort()
# return Char
def sixroll():
six = dice(1, 6, 4)
six.sort()
six = sorted(six, reverse=True)
return sum(six[:3])
STR = sixroll()
DEX = sixroll()
CON = sixroll()
INT = sixroll()
WSD = sixroll()
CHR = sixroll()
Char = [STR, DEX, CON, INT, WSD, CHR]
Char.sort()
Zoloto = dice(30, 50, 1)
Zoloto = Zoloto[0]
#print('Доступные классы: Варвар, Бард, Жрец, Друид, Воин, Монах, Паладин, Следопыт, Плут, Чародей, Волшебник')
#Prof = input('Выберите ваш класс персонажа: ')
# Prof = random.choice(('Варвар', 'Воин', 'Паладин',
# 'Бард', 'Следопыт', 'Плут',
# 'Друид', 'Монах', 'Жрец',
# 'Чародей', 'Волшебник'))
Prof ='Искатель приключений'
print(Char)
def proftochar():
STR = Char[5]
DEX = Char[3]
CON = Char[4]
INT = Char[0]
WSD = Char[2]
CHR = Char[1]
if Prof in ('Бард', 'бард', 'Скальд', 'скальд', 'Поэт', 'поэт'):
STR = Char[0]
DEX = Char[4]
CON = Char[3]
INT = Char[1]
WSD = Char[2]
CHR = Char[5]
elif Prof in ('Варвар', 'варвар', 'Берсерк', 'берсерк'):
STR = Char[5]
DEX = Char[3]
CON = Char[4]
INT = Char[0]
WSD = Char[2]
CHR = Char[1]
elif Prof in ('Жрец', 'жрец', 'Священик', 'cвященик'):
STR = Char[3]
DEX = Char[0]
CON = Char[4]
INT = Char[2]
WSD = Char[5]
CHR = Char[1]
elif Prof in ('Друид', 'друид'):
STR = Char[0]
DEX = Char[3]
CON = Char[4]
INT = Char[2]
WSD = Char[5]
CHR = Char[1]
elif Prof in ('воин', 'солдат', 'рыцарь', 'Воин'):
STR = Char[5]
DEX = Char[3]
CON = Char[4]
INT = Char[0]
WSD = Char[2]
CHR = Char[1]
elif Prof in ('Монах', 'монах'):
STR = Char[2]
DEX = Char[5]
CON = Char[3]
INT = Char[1]
WSD = Char[4]
CHR = Char[0]
elif Prof in ('Паладин', 'паладин', 'воин света', 'Воин света'):
STR = Char[5]
DEX = Char[0]
CON = Char[3]
INT = Char[1]
WSD = Char[2]
CHR = Char[4]
elif Prof in ('Следопыт', 'следопыт', 'Лучник', 'лучник'):
STR = Char[3]
DEX = Char[5]
CON = Char[2]
INT = Char[0]
WSD = Char[4]
CHR = Char[1]
elif Prof in ('Плут', 'плут', 'убийца', 'вор'):
STR = Char[1]
DEX = Char[5]
CON = Char[3]
INT = Char[0]
WSD = Char[2]
CHR = Char[4]
elif Prof in ('Чародей', 'чародей', 'маг', 'Маг'):
STR = Char[0]
DEX = Char[1]
CON = Char[4]
INT = Char[2]
WSD = Char[3]
CHR = Char[5]
elif Prof in ('колдун', 'Колдун'):
STR = Char[1]
DEX = Char[0]
CON = Char[4]
INT = Char[2]
WSD = Char[3]
CHR = Char[5]
elif Prof in ('Волшебник', 'волшебник'):
STR = Char[0]
DEX = Char[2]
CON = Char[4]
INT = Char[5]
WSD = Char[3]
CHR = Char[1]
return STR, DEX, CON, INT, WSD, CHR
STRm = int((STR - 10)//2)
DEXm = int((DEX - 10)//2)
CONm = int((CON - 10)//2)
INTm = int((INT - 10)//2)
WSDm = int((WSD - 10)//2)
CHRm = int((CHR - 10)//2)
skill = {
'Акробатика (ЛоВ)': DEXm,
'Анализ (ИнТ)': INTm,
'Атлетика (СиЛ)': STRm,
'Внимательность (МуД)': WSDm,
'Выживание (МуД)': WSDm,
'Выступление (ХаР)': CHRm,
'Запугивание (ХаР)': CHRm,
'История (ИнТ)': INTm,
'Лечение (МуД)': WSDm,
'Ловкость Рук (ЛоВ)': DEXm,
'Магия (ИнТ)': INTm,
'Обман (ХаР)': CHRm,
'Природа (ИнТ)': INTm,
'Проницательность (МуД)': WSDm,
'Религия (ИнТ)': INTm,
'Скрытность (ЛоВ)': DEXm,
'Убеждение (ХаР)': CHRm,
'Уход за животными (МуД)': WSDm,
}
def profesion():
sredStr = skill['Атлетика (СиЛ)']
sredDex = (skill['Акробатика (ЛоВ)']+skill['Ловкость Рук (ЛоВ)']+skill['Скрытность (ЛоВ)'])//3
sredCon = CONm
sredInt = (skill['Анализ (ИнТ)']+skill['История (ИнТ)']+skill['Магия (ИнТ)']+skill['Природа (ИнТ)']+skill['Религия (ИнТ)'])//5
sredWsd = (skill['Внимательность (МуД)']+skill['Выживание (МуД)']+skill['Лечение (МуД)']+skill['Проницательность (МуД)']+skill['Уход за животными (МуД)'])//5
sredChr = (skill['Выступление (ХаР)']+skill['Запугивание (ХаР)']+skill['Обман (ХаР)']+skill['Убеждение (ХаР)'])//4
# print([sredStr, sredDex, sredInt, sredWsd, sredChr])
if sredStr and sredDex> sredInt and sredWsd and sredChr:
Prof = 'Варвар'
elif sredStr and sredChr > sredInt and sredWsd and sredDex:
Prof = 'Паладин'
elif sredChr > sredStr and sredInt and sredWsd:
Prof = 'Бард'
elif sredChr > sredDex and sredStr and sredInt and sredWsd:
Prof = 'Колдун'
elif sredChr > sredDex and sredStr:
Prof = 'Чародей'
elif sredInt > sredDex and sredStr:
Prof = 'Волшебника'
elif sredWsd > sredDex and sredInt and sredChr:
Prof = 'Жрец'
elif sredWsd > sredStr and sredChr:
Prof = 'Друид'
elif sredDex > sredStr and sredInt and sredChr:
Prof = 'Монах'
elif sredDex > sredChr:
Prof = 'Следопыт'
elif sredDex > sredStr and sredWsd:
Prof = 'Плут'
else:
Prof = 'Воин'
return Prof
Story = [story() for d in range(1, 35)]
def Table1(): #Таблица 1: Обстоятельства рождения
global Zoloto
if Story[1] <= 1:
print('Нищий')
skill['Ловкость Рук (ЛоВ)'] += 2
Zoloto -= 25
elif Story[1] == 2: #15: Преступление
print('Преступник. \n', 'Его занятие: '+Table15())
skill['Запугивание (ХаР)'] += 2
elif Story[1] == 3:
print('Палач.')
skill['Религия (ИнТ)'] += 2
elif Story[1] == 4:
print('Телохранитель.')
skill['Атлетика (СиЛ)'] += 2
elif Story[1] == 5: #29: Нанятый шпионить
print('Шпион.')
skill['Скрытность (ЛоВ)'] += 2
skill['Обман (ХаР)'] += 2
print('-Есть ли у вас братья и сёстры?')
Table2()
print('Вас сразу ввели в курс семейных дел:')
Table29()
elif Story[1] == 6:
print('Солдат.')
skill['Атлетика (СиЛ)'] += 2
elif Story[1] == 7:
print('Лекарь.')
skill['Лечение (МуД)'] += 2
elif Story[1] == 8:
print('Крестьянин.')
skill['Атлетика (СиЛ)'] += 2
elif Story[1] == 9:
print('Дипломат.')
skill['Убеждение (ХаР)'] += 2
elif Story[1] == 10:
print('Гробовщик.')
skill['Религия (ИнТ)'] += 2
elif Story[1] == 11:
print('Священник.')
skill['Религия (ИнТ)'] += 2
elif Story[1] == 12:
print('Юрист.')
skill['Убеждение (ХаР)'] += 2
elif Story[1] == 13:
print('Учёный')
skill['История (ИнТ)'] += 2
elif Story[1] == 14:
print('Ремесленник.')
skill['Внимательность (МуД)'] += 2
elif Story[1] == 15:
print('Торговец.')
skill['Убеждение (ХаР)'] += 2
elif Story[1] == 16:
print('Дворянин.')
skill['Проницательность (МуД)'] += 2
Zoloto += 25
elif Story[1] == 17: #5: Злодей
print(Table5())
elif Story[1] == 18:
print('Великий герой.')
elif Story[1] == 19:
print('Выращен зверьми. Вы могли бегать в стае волков,'
'пока не встретили цивилизованных существ, а может быть,'
'вас вырастили разумные магические звери, такие как ворги или единороги.')
skill['Природа (ИнТ)'] += 4
skill['Уход за животными (МуД)'] += 4
elif Story[1] == 20:
print('Вы приёмный ребёнок.')
print('Ваши опекуны:')
Story[1] = dice(1, 19, 1)
Table1()
def Table2(): #Таблица 2: Братья и сёстры
if Story[2] <= 2:
print('Нет ни братьев ни сестер.')
elif Story[2] == 3:
print('Однояйцевый близнец.')
elif Story[2] == 4:
print('Младшая сестра.')
elif Story[2] == 5:
print('Младший брат.')
elif Story[2] == 6:
print('Старшая сестра.')
elif Story[2] == 7:
print('Старший брат.')
elif Story[2] == 8:
print('Две сестры.')
elif Story[2] == 9:
print('Два брата.')
elif Story[2] == 10:
print('Старшая сестра и сводный брат.')
elif Story[2] == 11:
print('Две сводные сестры.')
elif Story[2] == 12:
print('Младший брат-бастард.')
elif Story[2] == 13:
print('Вы — бастард, у вас есть две сестры и два брата.')
elif Story[2] == 14:
print('У ваших родителей кроме вас ещё три ребёнка. Определите сами кто это.')
elif Story[2] == 15:
print('Два сводных брата или сестры, у каждого разные родители.')
elif Story[2] == 16:
print('У ваших родителей кроме вас ещё шесть детей (вы — младший).')
elif Story[2] == 17:
print('Однояйцевый близнец и младший брат.')
elif Story[2] == 18:
print('Разнояйцовый близнец.')
elif Story[2] == 19:
print('У ваших родителей кроме вас четыре ребёнка (вы — старший).')
elif Story[2] == 20:
print('Разлучён с семьёй в молодости.')
Story[2] = dice(1, 19, 1)
Table2()
def Table3(): #Таблица 3: Уход из дома
global Zoloto
print('-Уход из дома:')
Story[3] = story()
if Story[3] <= 4: # 4: Попечение
print('Вас отправили на попечение.')
Table4()
elif Story[3] == 5: # 5: Злодей, 9: Месть
print('Ваш дом был уничтожен злодеем:'+Table5m())
print('Вы решили отомстить:')
Table9()
elif Story[3] == 6: # 5: Злодей \Конец
print('Ваш дом был уничтожен злодеем:'+Table5m())
print(' Ваша жизнь искателя приключений начинается.')
# sys.exit("Конец")
elif Story[3] == 7: # Таблица 30: Цирк
print('Вы сбежали, чтобы устроиться в цирк.')
skill['Акробатика (ЛоВ)'] += 2
skill['Выступление (ХаР)'] += 2
Table30()
elif Story[3] == 8: # Таблица 19: В бегах
print(' Вы сбежали от жестокого члена семьи, который хотел использовать вас для чего-то жуткого.')
Table19()
elif Story[3] == 9: # Таблица 12: Персона
print('Вы сбежали с любимым человеком. ')
Table12()
elif Story[3] == 10: #Таблица 11: Поиски
if Story[2] <= 2:
print('Вы узнаёте о существовании единокровной сестры или брата. Вы решили отправиться на поиски родственника.')
else:
print('Вы отправились на поиски пропавшего или живущего далеко родного брата или сестры.')
Table11('person')
elif Story[3] == 11: # Таблица 31: Испытание стойкости Таблица 17: Неудача Конец
print('Большая часть вашей семьи умерла от чумы.')
Table31()
if Story[31] <= 8:
Table17()
else:
print('Справившись с болезнью, вы решили начать жизнь искателя приключений.')
# sys.exit("Конец")
elif Story[3] == 12: # Таблица 16: Рабство
print('Вас похитили и поработили.')
Table16()
elif Story[3] == 13: #Таблица 34: Похищенный на корабль
print('Вас захватили пираты.')
Table34()
elif Story[3] == 14: # Таблица 15: Преступление Таблица 26: Гильдия воров
print('Ваша семья разорилась. Вам пришлось покинуть дом, чтобы совершать '+Table15()+' ради выживания.')
Table26()
elif Story[3] == 15: # Таблица 20: Охрана каравана
print('Вы ушли из города с караваном, чтобы посмотреть мир.')
Table20()
elif Story[3] == 16: # Таблица 11: Поиски
print('Вы отправились на поиски фамильных ценностей или древнего мощного предмета.')
Table11('object')
elif Story[3] == 17: # Таблица 7: Проклятый
print('Вы были прокляты и покинули дом, чтобы не навлечь беду на семью.')
Table7()
elif Story[3] == 18: # Таблица 10: Башня волшебника
print('На вашу семью было наложено заклятье, и вы ищете волшебника, способного его снять. В обмен волшебник попросил оказать ему услугу.')
Table10()
elif Story[3] == 19: # Таблица 33: Дворянство
print('Ваши родители пообещали дворянину ваши услуги в обмен на прощение долгов.')
Table33()
elif Story[3] == 20: # Таблица 8: Прибытие
print('Вы ушли исследовать мир. Путь привел вас в..')
Table8()
def Table4(): #Таблица 4: Попечение
global Zoloto
print('-Попечение:')
Story[4] = story()
if Story[4] == 1: # Таблица 17: Неудача
print('Вы не добрались до точки назначения.')
Table17()
elif 2 <= Story[4] <= 3: # Таблица 6: Торговые дела
print('Вы стали помощником торговца.')
skill['Убеждение (ХаР)'] += 2
Zoloto += 10
Table6()
elif 4 <= Story[4] <= 5: # Таблица 10: Башня волшебника
print('Вы стали подмастерьем волшебника.')
skill['Магия (ИнТ)'] += 2
Table10()
elif 6 <= Story[4] <= 7: # Таблица 27: Отправленный в храм
print('Вы стали послушником в монастыре. ')
skill['Религия (ИнТ)'] += 2
Table27()
elif 8 <= Story[4] <= 9: # Таблица 22: Военная служба
print('Вы поступили на военную службу.')
skill['Атлетика (СиЛ)'] += 2
Table22()
elif 10 <= Story[4] <= 11: # Таблица 12: Персона
if skill['Внимательность (МуД)'] > skill['Акробатика (ЛоВ)'] or skill['Выступление (ХаР)']:
print('Вы стали подмастерьем умелого ремесленника')
skill['Внимательность (МуД)'] += 2
else:
print('Вы стали подмастерьем умелого артиста.')
skill['Акробатика (ЛоВ)'] += 2
skill['Выступление (ХаР)'] += 2
Table12()
elif 12 <= Story[4] <= 13: # Таблица 12: Персона
print('Вы учились у актёра.')
skill['Обман (ХаР)'] += 2
skill['Выступление (ХаР)'] += 2
Table12()
elif 14 <= Story[4] <= 15: # Таблица 15: Преступление Таблица 26: Гильдия воров
print('Ваш наставник оказался преступником. Его ремесло: '+Table15())
skill['Ловкость Рук (ЛоВ)'] += 2
skill['Обман (ХаР)'] += 2
Table26()
elif 16 <= Story[4] <= 17: # Таблица 30: Цирк
print('Вместо того, чтобы попасть к наставнику, вы устроились в цирк.')
skill['Акробатика (ЛоВ)'] += 2
skill['Выступление (ХаР)'] += 2
Table30()
elif 18 <= Story[4] <= 19: #Таблица 33: Дворянство
print('Вы прислуживали в доме дворянина.')
skill['Убеждение (ХаР)'] += 2
Table33()
elif 20 <= Story[4]: # Таблица 5: Злодей Таблица 3: Уход из дома
print('Вашего наставника убил '+Table5m())
print('Вы вернулись домой.')
Table3()
def Table5(): #Таблица 5: Злодей
Story[5] = story()
if Story[5] == 1:
var = 'Полу-орк'
elif Story[5] == 2:
var = 'Единорог'
elif Story[5] == 3:
var = 'Вермедведь'
elif Story[5] == 4:
var = 'Дракон'
elif Story[5] == 5:
var = 'Волшебник'
elif Story[5] == 6:
var = 'Военачальник'
elif Story[5] == 7:
var = 'Вампир'
elif Story[5] == 8:
var = 'Коатль'
elif Story[5] == 9:
var = 'Эмпирей'
elif Story[5] == 10:
var = 'Кентавр'
elif Story[5] == 11:
var = 'Дриада'
elif Story[5] == 12:
var = 'Фейские существо'
elif Story[5] == 13:
var = 'Глубинный гном'
elif Story[5] == 14:
var = 'Джинн'
elif Story[5] == 15:
var = 'Каменный великан'
elif Story[5] == 16:
var = 'Глава гильдии воров'
elif Story[5] == 17:
var = 'Влиятельный торговец'
elif Story[5] == 18:
var = 'Дворянин'
elif Story[5] == 19:
var = 'Волшебник'
elif Story[5] == 20:
var = 'Ааракокра'
return var
def Table5m(): #Таблица 5: Монстры
Story[5] = story()
if Story[5] == 1:
var = 'Орк'
elif Story[5] == 2:
var = 'Гоблин'
elif Story[5] == 3:
var = 'Великан'
elif Story[5] == 4:
var = 'Жаболюд'
elif Story[5] == 5:
var = 'Циклоп'
elif Story[5] == 6:
var = 'Химера'
elif Story[5] == 7:
var = 'Вампир'
elif Story[5] == 8:
var = 'Лич-лорд'
elif Story[5] == 9:
var = 'Тролль'
elif Story[5] == 10:
var = 'Демон'
elif Story[5] == 11:
var = 'Аболет'
elif Story[5] == 12:
var = 'Фейские существо'
elif Story[5] == 13:
var = 'Живодёр разума'
elif Story[5] == 14:
var = 'Доппельгангер'
elif Story[5] == 15:
var = 'Бехолдер'
elif Story[5] == 16:
var = 'Медвежатник'
elif Story[5] == 17:
var = 'Камбион'
elif Story[5] == 18:
var = 'Баньши'
elif Story[5] == 19:
var = 'Волшебник'
elif Story[5] == 20:
var = 'Глава культа'
return var
def Table6(): #Таблица 6: Торговые дела
global Zoloto
print('-Торговые дела:')
Story[6] = story() + skill['Убеждение (ХаР)']
if Story[6] <= 4: # Таблица 21: Заключённый
print('Ваш наниматель или конкурент обвинил вас в краже товаров, и вас отправили в тюрьму.')
Table21()
elif 5 <= Story[6] <=8: # Таблица 26: Гильдия воров
print('Вы занялись контрабандой и связались с ворами.')
Table26()
elif 9 <= Story[6] <=11: # Таблица 20: Охрана каравана
print('Ваши навыки торговли оказались не на высоте, и вы решили устроиться вместо этого охранником каравана.')
Table20()
elif 12 <= Story[6] <=14: # Таблица 17: Неудача
print('Вы умелый торговец, но в одной из деловых поездок вы попали в засаду.')
skill['Убеждение (ХаР)'] += 2
Table17()
elif 15 <= Story[6] <= 18: # Таблица 28: Посредничество
print(' Вы устраивали замечательные демонстрации товаров, и к вам пришли другие торговцы. \n '
'Они хотят, чтобы вы разрешили их торговый спор.')
skill['Убеждение (ХаР)'] += 2
Table28()
elif 19 <= Story[6]:
print('Вы — знаток торговли. К несчастью, вам помешал '+Table5m())
Zoloto += 10
skill['Убеждение (ХаР)'] += 2
print('Вы решили отомстить.')
Table9()
def Table7(): #Таблица 7: Проклятый
global Zoloto
print('-Проклятый:')
Story[7] = story()
if Story[7] <= 4: # Таблица 11: Поиски
print('Еда и напитки не имеют вкуса, а музыка не приносит радости.\n '
'Вам нужна информация о том, как снять это проклятье.')
Table11('infa')
elif 5 <= Story[7] <= 7: # Таблица 11: Поиски
print('От ваших поцелуев других тошнит и продирает до костей мороз.\n'
' Только одна личность может снять это проклятье.')
Table11('person')
elif 8 <= Story[7] <= 10: # Таблица 10: Башня волшебника
print('Из ваших глаз постоянно текут чёрные слёзы. \n'
'Вы отправились к волшебнику, чтобы снять это проклятье, но тот поможет только в обмен на вашу услугу.')
Table10()
elif 11 <= Story[7] <= 13: # Таблица 27: Отправленный в храм
print('Одна ваша рука сделана из гибкого камня, и не чувствует ни прикосновений, ни тепла.'
' Вы отправились в храм, чтобы снять проклятье, но в замен вы должны оказать услугу жрецам.')
Table27()
elif 14 <= Story[7] <= 16: # Таблица 11: Поиски
print('У вас кошачий глаз, окружённый сморщенной чёрной кожей и светящийся зелёным. '
'Вам нужен некий предмет для снятия этого проклятья.')
Table11('object')
elif 17 <= Story[7] <= 20: # Конец
print('Одна ваша рука — кривая чешуйчатая лапа. \n'
'Вы не знаете, как снять это проклятье, но вы всё ещё ищете ответ. \n'
' Ваша жизнь искателя приключения начинается.')
# sys.exit("Конец")
def Table8(): # Таблица 8: Прибытие
global Zoloto
print('-Прибытие:')
Story[8] = story()
if Story[8] == 1: #Таблица 34: Похищенный на корабль
print('Портовый город. Вас насильно завербовали.')
Table34()
elif Story[8] == 2: # Таблица 23: Драка в таверне
print('Таверна на торговой дороге.')
Table23()
elif Story[8] == 3: # Таблица 16: Рабство
print('Лагерь работорговцев.')
Table16()
elif Story[8] == 4: # Таблица 25: Затерявшийся в глубинах
print('Горная крепость дварфов. Они попросили вас помочь устранить опасности в их туннелях.')
Table25()
elif Story[8] == 5: # Таблица 10: Башня волшебника
print('Башня волшебника.')
Table10()
elif Story[8] == 6: # Таблица 28: Посредничество
print('Охотничьи угодья дворянина, на которых вас и поймали. Вы начали оправдываться.')
Table28()
elif Story[8] == 7: # Таблица 9: Месть
print('Логово злодея: '+Table5m())
Table9()
elif Story[8] == 8: # Таблица 18: Друг в нужде
print('Дом друга. Ему нужна ваша помощь. ')
Table18()
elif Story[8] == 9: # Таблица 22: Военная служба
print('Военный лагерь.')
Table22()
elif Story[8] == 10: # Таблица 8: Прибытие
print('Странствующая община полуросликов, сплавляющихся на баржах по реке. Они научили вас охотиться.')
skill['Внимательность (МуД)'] += 2
print('Река привела вас...')
Table8()
elif Story[8] == 11: # Таблица 7: Проклятый
print('Дом с привидениями. Вы убежали, но были прокляты.')
Table7()
elif Story[8] == 12: # Таблица 31: Испытание стойкости Таблица 24: Выживание в глуши
print('Маленькая деревня в глуши.')
Table31()
print('Вы пережили кошмарную зиму, а потом отправились на поиски еды.')
Table24()
elif Story[8] == 13: # Таблица 31: Испытание стойкости Таблица 11: Поиски
print('Город на болоте. Среди населения разразилась эпидемия.')
Table31()
print('Вы отправились на поиски лекарства.')
Table11('object')
elif Story[8] == 14: # Таблица 28: Посредничество
print('Лесная община эльфов. Они были против вашего присутствия и потребовали, чтобы вы доказали своё право находиться здесь.')
Table28()
elif Story[8] == 15: # Таблица 33: Дворянство
print('Особняк дворянина.')
Table33()
elif Story[8] == 16: # Таблица 30: Цирк
print('Бродячий цирк на большой дороге. Вы на какое-то время присоединяетесь к ним.')
Table30()
elif Story[8] == 17: # Таблица 11: Поиски
print('Дом вашего друга. Сам он пропал, и вы подозреваете что-то дурное.')
Table11('person')
elif Story[8] == 18: # Таблица 6: Торговые дела
print('Торговый город, разделённый рекой. Вас вовлекли в какие-то торговые дела.')
Table6()
elif Story[8] == 19: # Таблица 19: В бегах
print('Тайное убежище бандитов. Вы украли часть их золота, но сбежали, когда они вас заметили.')
Zoloto +=10
Table19()
elif Story[8] == 20:
print('Священная роща дриад и трентов.')
if skill['Природа (ИнТ)'] + story() >= 12: # Таблица 24: Выживание в глуши
print('Феи прощают вас и позволяют идти дальше.')
Table24()
else: # Таблица 7: Проклятый
print('Они вас проклинают, и вы спасаетесь бегством.')
Table7()
def Table9(): #Таблица 9: Месть
global Zoloto
print('-Месть:')
Story[9] = story()
if Story[9] <= 2: # Таблица 17: Неудача
print('Из-за какого-то события вы потеряли след врага. ')
Table17()
elif 3 <= Story[9] <= 5: # Таблица 24: Выживание в глуши
print('Стремясь схватиться с врагом, вы затерялись в глуши.')
Table24()
elif 6 <= Story[9] <= 8:# Таблица 21: Заключённый
print('Вы победили врага, но были заключены под стражу его союзниками.')
Table21()
elif 9 <= Story[9] <= 11:# Таблица 31: Испытание стойкости Таблица 8: Прибытие
print('Вы с врагом попались в чью-то ловушку, и должны выжить, полагаясь только друг на друга.')
Table31()
print('Ваши пути разошлись.')
Table8()
elif 12 <= Story[9] <= 14: # Таблица 25: Затерявшийся в глубинах
print('Враг обманом заманил вас подземелья или пещеры и бросил там.')
Table25()
elif 15 <= Story[9] <= 17: # Таблица 28: Посредничество
print('Враг оказался слишком сильным, и вы должны проявить красноречие вместо грубой силы.')
Table28()
elif 18 <= Story[9] <= 20: # Конец
print('Враг победил вас, но вы сбежали и готовитесь отомстить. \n'
'Ваша жизнь искателя приключений начинается. ')
# 'В качестве альтернативы вы можете совершить ещё один бросок по этой таблице.')
# sys.exit("Конец")
def Table10(): # Таблица 10: Башня волшебника
global Zoloto
print('-Башня волшебника:')
Story[10] = story() + skill['Магия (ИнТ)']
if Story[10] <= 3: # Таблица 19: В бегах
print('Вы испортили ритуал, исполняемый волшебником, и впустили в этот мир демона.\n'
' Он схватился с волшебником, а вы в суматохе сбежали.')
Table19()
elif 4 <= Story[10] <= 5: # Таблица 24: Выживание в глуши
print('Вы случайно сломали хрустальный шар, и злой маг превратил вас в жабу и выкинул в болото. \n'
' Вы прожили там месяц, а потом вернулись в обычный облик. ')
Table24()
elif 6 <= Story[10] <= 7: # Таблица 8: Прибытие
print('Вы пытались исполнить заклинание и подожгли башню. Взбешённый маг со злости телепортировал вас.')
Table8()
elif 8 <= Story[10] <= 9: # Таблица 11: Поиски
print('Вы прервали ритуал в самый ответственный момент, отчего волшебника куда-то перенесло.\n'
'Вы отправились на поиски мага')
Table11('person')
elif 10 <= Story[10] <= 11: # Таблица 11: Поиски
print('Вы выпустили гомункула, который украл волшебную палочку мага и улетел. \n'
' Волшебник требует, чтобы вы вернули эту палочку или принесли другую, более мощную.')
Table11('object')
elif 12 <= Story[10] <= 13: # Таблица 31: Испытание стойкости
print('Вы сварили плохое зелье и заболели, когда выпили его.')
Table31()
Table10()
elif 14 <= Story[10] <= 16: # Конец
print('Вы спасли книгу заклинаний от пожара и были награждены. \n'
'Вы получаете зелье исцеления в стартовом наборе. \n'
' Ваша жизнь искателя приключений начинается.')
# sys.exit("Конец")
elif 17 <= Story[10] <= 18: # Таблица 22: Военная служба
print('Волшебник оценил ваши навыки и взял вас на войну между магами.')
skill['Магия (ИнТ)'] += 2
Table22()
elif 19 <= Story[10]: # Конец
print('Вы удивили волшебника своими способностями.\n'
' Польщённый вашими услугами волшебник отправляет вас в мир за новыми знаниями.\n'
' Вы получаете зелье исцеления в стартовом наборе. \n'
' Ваша жизнь искателя приключений начинается. \n')
skill['Магия (ИнТ)'] += 2
# sys.exit("Конец")
def Table11(t): # Таблица 11: Поиски
target = t
global Zoloto
print('-Поиски:')
Story[11] = story() + skill['Внимательность (МуД)']
if Story[11] <= 4: # Конец
print('Вы не нашли никаких признаков того, что искали. \n'
'Ваша жизнь искателя приключений начинается с ещё невыполненным заданием.')
# sys.exit("Конец")
elif 5 <= Story[11] <= 8: # Таблица 23: Драка в таверне
print('Пытаясь напоить и разговорить народ, вы сами напились и устроили драку.')
Table23()
elif 9 <= Story[11] <= 13: # Таблица 19: В бегах
print('В своих поисках вы наткнулись на деятельность '+Table5m()+', которого разозлили ваши изыскания.')
Table19()
elif 14 <= Story[11] <= 17:
print('Вы обнаружили, что вам нужен ещё один кусочек головоломки.')
skill['Внимательность (МуД)'] += 2
print('Вы ищете новую информацию.')
Table11(t)
elif 18 <= Story[11]:
print('Вы находите то, что искали.')
skill['Внимательность (МуД)'] += 2
if target == 'person': # Если это человек, совершите бросок по таблице 12. Таблица 12: Персона
Table12()
elif target == 'object': #Если это предмет, совершите бросок по таблице 13. Таблица 13: Предмет
Table13()
else: # Если это информация, совершите бросок по таблице 14. Таблица 14: Информация
Table14()
def Table12(): # Таблица 12: Персона
global Zoloto
print('-Персона:')
Story[12] = story()
if Story[12] <= 2: #Конец Таблица 5: Злодей
print('Это существо убито злым '+Table5m()+'. Ваша жизнь искателя приключений начинается.')
if INT <= 10: # Таблица 23: Драка в таверне
print('Вы попробовали залить печаль вином.')
Table23()
# else:
# sys.exit("Конец")
elif 3 <= Story[12] <= 4: # Таблица 26: Гильдия воров
print('Это существо шантажируют представители гильдии воров. Вы внедряетесь к ним, чтобы найти шантажиста.')
Table26()
elif 5 <= Story[12] <= 6: # Таблица 31: Испытание стойкости
print('Это существо умерло от болезни. Вы заразились.')
Table31()
print('Ваша жизнь искателя приключений начинается.')
if INT <= 10: # Таблица 23: Драка в таверне
print('Вы попробовали залить печаль вином.')
Table23()
# else:
# sys.exit("Конец")
elif 7 <= Story[12] <= 8: #Таблица 19: В бегах
print('Это существо оказалось доппельгангером. Вы сбежали до того, как узнали всю историю целиком.')
Table19()
elif 9 <= Story[12] <= 10:# Таблица 5: Злодей Таблица 9: Месть
print('Это существо убито злым '+Table5m())
Table9()
elif 11 <= Story[12] <= 12: # Таблица 5: Злодей Таблица 11: Поиски
print('Это существо похищено злодеем и удерживается ради выкупа. \n'
'Вы должны найти для '+Table5m()+' некий предмет, или вы никогда не увидите это существо.')
Table11('object')
elif 13 <= Story[12] <= 14: # Таблица 17: Неудача
print('Это существо всего лишь использовало вас, и на самом деле презирает вас. \n'
'Оно прогоняет вас прочь. В отчаянии вы не смотрели по сторонам и попали в беду.')
Table17()
elif 15 <= Story[12] <= 16: # Таблица 9: Месть
print('Это существо перешло на сторону врага. Вы захотели вернуть это существо обратно.')
Table9()
elif 17 <= Story[12] <= 18:
print('Это существо оказалось вашим родственником. \n'
'Ваша жизнь искателя приключений начинается одновременно с этим откровением.')
if INT > 15 or WSD > 15:
print('Ваше знание и опыт раскусили проходимца:')
print('Родство было предлогом для того, чтобы выведать у вас информацию или получить наследство. \n'
'Ваша жизнь искателя приключений начинается с осознания того, что вас одурачили и предали.')
# sys.exit("Конец")
elif 19 <= Story[12] <= 20: # Таблица 18: Друг в нужде
print('Этому существу нужна ваша помощь.')
Table18()
def Table13(): # Таблица 13: Предмет
global Zoloto
print('-Предмет:')
Story[13] = story()
if Story[13] <= 4: # Таблица 7: Проклятый
print('Этот предмет проклят.')
elif 5 <= Story[13] <= 8: # Таблица 5: Злодей Таблица 9: Месть
print('Этот предмет украден злодеем. '+Table5m())
print('Вы попытались заполучить этот предмет.')
Table9()
elif 9 <= Story[13] <= 12: # Таблица 21: Заключённый
print('Этот предмет оказался у таинственной фигуры в плаще, совершившей преступление. \n'
'После того как вас видели вместе, вас обвинили и бросили в тюрьму.')
Table21()
elif 13 <= Story[13] <= 16: # Конец
print('Этот предмет уничтожен. Ваша жизнь искателя приключений начинается, когда вы пытаетесь чем-то заменить его.')
# sys.exit("Конец")
elif 17 <= Story[13] <= 20: # Конец
print('Вы получаете искомый предмет. Ваша жизнь искателя приключений начинается, и вы уже владеете им.')
# sys.exit("Конец")
def Table14(): # Таблица 14: Информация
global Zoloto
print('-Информация:')
Story[14] = story() + skill['Анализ (ИнТ)']
if Story[14] <= 4: # Конец
print('Вы получаете информацию, но за неё нужно заплатить.\n'
' Ваша жизнь искателя приключений начинается с этим знанием.')
Zoloto -= 20
# sys.exit("Конец")
elif 5 <= Story[13] <= 7: # Конец
print('Найденная вами информация ложна. \n'
'Вы не уверены, намеренно ли вас вводят в заблуждение, и вы не знаете, где находится правда. \n'
'Ваша жизнь искателя приключений начинается.')
# sys.exit("Конец")
elif 8 <= Story[13] <= 11: # Таблица 16: Рабство
print('Вы задавали слишком много вопросов, и кто-то заплатил, чтобы вы исчезли.')
Table16()
elif 12 <= Story[13] <= 15: # Таблица 11: Поиски
print('Информация вела к другой тайне. ')
Table11('infa')
elif 16 <= Story[13] <= 18: # Конец
print('Вы получаете нужную информацию. Ваша жизнь искателя приключений начинается с этим знанием.')
# sys.exit("Конец")
elif 19 <= Story[13]: # Конец
print('Вы стали опытным сыщиком. Вы получаете искомую информацию и находите карту сокровищ. \n'
'Вы получаете тренировку Знания Улиц и Внимательности и начинаете жизнь искателя приключений.')
skill['Внимательность (МуД)'] += 2
skill['Анализ (ИнТ)'] += 2
# sys.exit("Конец")
def Table15(): #Таблица 15: Преступление
Story[15] = story()
if Story[15] == 1:
var = 'Контрабанда'
elif Story[15] == 2:
var = 'Заказное убийство'
elif Story[15] == 3:
var = 'Квартирная кража'
elif Story[15] == 4:
var = 'Подстрекательство к мятежу'
elif Story[15] == 5:
var = 'Карманная кража'
elif Story[15] == 6:
var = 'Похищение'
elif Story[15] == 7:
var = 'Грабёж'
elif Story[15] == 8:
var = 'Пиратство'
elif Story[15] == 9:
var = 'Браконьерство'
elif Story[15] ==10:
var = 'Фальшивомонетничество'
elif Story[15] ==11:
var = 'Фальсификация товаров'
elif Story[15] ==12:
var = 'Мошенничество'
elif Story[15] ==13:
var = 'Торговля на чёрном рынке'
elif Story[15] ==14:
var = 'Работорговля'
elif Story[15] ==15:
var = 'Святотатство'
elif Story[15] == 16:
var = 'Осквернение могил'
elif Story[15] == 17:
var = 'Шантаж'
elif Story[15] == 18:
var = 'Шпионаж'
elif Story[15] == 19:
var = 'Жульничество в азартных играх'
elif Story[15] == 20:
var = 'Выдача себя за дворянина'
return var
def Table16(): # Таблица 16: Рабство
global Zoloto, CON
print('-Рабство:')
Story[16] = story()
if Story[16] <= 2: # Таблица 31: Испытание стойкости
print('Вы много лет пробыли рабом, но потом хозяин вас благодушно отпустил.')
CON += 2
Table31()
print('Ваша жизнь искателя приключений начинается.')
# sys.exit("Конец")
elif 3 <= Story[16] <= 4: # Таблица 9: Месть Таблица 5: Злодей
print('Вы сбежали из рабства и нашли, кто виноват в вашем похищении.\n '
'Если подозреваемых нет, это '+Table5m())
Table9()
elif 5 <= Story[16] <= 6: # Таблица 25: Затерявшийся в глубинах
print('Вы добывали под землёй драгоценные металлы.')
CON += 2
skill['Выживание (МуД)'] += 2
print('Вы попытались сбежать по пещерам.')
Table25()
elif 7 <= Story[16] <= 8: #Таблица 24: Выживание в глуши
print('Вы возглавили восстание рабов, которое было подавлено. \n'
'Вы несколько лет провели в цепях, презираемые и рабами и похитителями. \n'
'Ваши хозяева начинают считать вас помехой и оставляют умирать в глуши:')
Table24()
elif 9 <= Story[16] <= 10: #Таблица 22: Военная служба
print('Регулярные войска освободили вас и ваших братьев по несчастью. Вы присоединяетесь к ним.')
Table22()
elif 11 <= Story[16] <= 12: #Таблица 19: В бегах
print('Вы сбежали, когда на вас никто не обращал внимания, но хозяева хотят вас вернуть.')
Table19()
elif 13 <= Story[16] <= 14: #Таблица 17: Неудача
print('Вы возглавили восстание рабов, но когда вы сбежали со своими приятелями, удача отвернулась от вас.')
Table17()
elif 15 <= Story[16] <= 16: #Таблица 27: Отправленный в храм
print('Практически все владельцы рабов погибли в катастрофе, выглядящей как возмездие богов. \n'
'Вы хотите отблагодарить освободившую вас сущность.')
Table27()
elif 17 <= Story[16] <= 18: #Таблица 33: Дворянство
print('Дворянин выкупил вас и позволил отработать долг.')
Table33()
elif 19 <= Story[16] <= 20: #Таблица 18: Друг в нужде
print('Друг накопил денег, чтобы выкупить вас, но ему в ответ нужно оказать услугу.')
Table18()
def Table17(): # Таблица 17: Неудача
global Zoloto
print('-Неудача:')
Story[17] = story()
if Story[17] <= 2: #Таблица 22: Военная служба
print('Началась война, и вас вербуют на службу.')
Table22()
elif 3 <= Story[17] <= 4: # Таблица 9: Месть
print('Бандиты ограбили вас и оставили связанным. Вы освободились и хотите отомстить. ')
Zoloto -= Zoloto/2
Table9()
elif 5 <= Story[17] <= 6: # Таблица 21: Заключённый
print('Вас по ошибке заключили под стражу.')
Table21()
elif 7 <= Story[17] <= 8: # Таблица 12: Персона
print('Вы в кого-то влюбились и теперь не можете сосредоточиться.')
Table12()
elif 9 <= Story[17] <= 10: # Таблица 23: Драка в таверне
print('Вы набрели на таверну, и когда вы потеряли счёт кружкам с выпивкой, началась заварушка.')
Table23()
elif 11 <= Story[17] <= 12: # Таблица 10: Башня волшебника
print('Волшебник зачаровал вас и сделал своим слугой.')
Table10()
elif Story[17] == 13: # Таблица 16: Рабство
print(' Вас поймали работорговцы.')
Table16()
elif Story[17] == 14: # Таблица 34: Похищенный на корабль
print('Вас насильно завербовали.')
Table34()
elif 15 <= Story[17] <= 16: # Таблица 24: Выживание в глуши
print('Вы заблудились в глуши.')
Table24()
elif 17 <= Story[17] <= 18:
print('Фейские существо на несколько лет заманило вас в Страну Фей, хотя по вашим ощущениям прошло всего несколько дней.')
if CHR < 10: # Таблица 7: Проклятый
print('Обиженная фея наградила вас проклятьем за уход.')
Table7()
else: # Конец
print('Ваша жизнь искателя приключений начинается с того, что вы покидаете Страну Фей.')
# sys.exit("Конец")
elif 19 <= Story[17] <= 20: # Таблица 8: Прибытие
print('Яростный шторм надолго задержал вас в месте, в котором вы не намеревались останавливаться.')
Table8()
def Table18(): # Таблица 18: Друг в нужде
global Zoloto
print('-Друг в нужде:')
Story[18] = story()
if Story[18] <= 4: # Таблица 15: Преступление Таблица 26: Гильдия воров
print('Другу нужна ваша помощь в совершении '+Table15()+'.\n'
'У вас всё получилось, но гильдия воров потребовала, чтобы вы оказали для них услугу за несанкционированную деятельность.')
Table26()
elif 5 <= Story[18] <= 8: # Таблица 19: В бегах
print('Другу нужна помощь в спасении члена вашей семьи, которого хотят казнить. Вы преуспели, но теперь вас разыскивают власти.')
Table19()
elif 9 <= Story[18] <= 12: # Таблица 28: Посредничество
print('Вашему другу нужно, чтобы вы поговорили с кем-то, и он смог жениться.')
Table28()
elif 13 <= Story[18] <= 16: # Таблица 21: Заключённый
print('Друг только притворялся, что ему нужна помощь, а на самом деле хочет свалить на вас своё преступление.')
Table21()
elif 17 <= Story[18] <= 20: # Таблица 23: Драка в таверне
print('Другу не нужна ваша помощь. Он всё это подстроил, чтобы устроить вечеринку в вашу честь. \n'
'Когда вечеринка была в самом разгаре:')
Table23()
def Table19(): # Таблица 19: В бегах
global Zoloto
print('-В бегах:')
Story[19] = story() + skill['Внимательность (МуД)']
if Story[19] <= 4: # Таблица 16: Рабство
print('Преследователи поймали вас и продали в рабство.')
Table16()
elif 5 <= Story[19] <= 7: # Таблица 21: Заключённый
print('Ваши преследователи поймали вас и заключили под стражу.')
Table21()
elif 8 <= Story[19] <= 10: # Таблица 17: Неудача
print('Побег от врагов был прерван неким событием. ')
Table17()
elif 11 <= Story[19] <= 12: # Таблица 8: Прибытие
print('Убегая от врагов, никогда не знаешь, кому доверять, и вы постоянно пытались понять чужие мотивы.')
skill['Проницательность (МуД)'] += 2
Table8()
elif 13 <= Story[19] <= 15: # Конец
print('Враги преследуют вас по пятам. \n'
'Ваша жизнь искателя приключений начинается со знанием того, что преследователи могут показаться в любой момент.')
skill['Внимательность (МуД)'] += 2
elif 16 <= Story[19] <= 18: # Конец
print('Вы исчезаете без следа, и преследователи сдаются — на какое-то время.\n'
'Вы получаете тренировку Обмана. Ваша жизнь искателя приключений начинается.')
skill['Обман (ХаР)'] +=2
elif 19 <= Story[19]:
print('Вы выследили преследователей задолго до того как они нашли вас, и вы устроили так, что их заметили их же враги. \n'
'Они уже никого теперь не смогут преследовать. Вы начинаете жизнь искателя приключений, окружённый славой безжалостного хитреца.')
skill['Внимательность (МуД)'] += 2
skill['Обман (ХаР)'] += 2
def Table20(): # Таблица 20: Охрана каравана
global Zoloto
print('-Охрана каравана:')
Story[20] = story() + skill['Внимательность (МуД)']
if Story[20] <=3: # Таблица 27: Отправленный в храм
print('Караван остановился на ночь, и к вашему лагерю присоединились другие путники. \n'
'К несчастью, они оказались вампирами, и они напали на вас ночью. \n'
'Вы бежали по ночи, а за спиной гоготали вампиры, и вы спаслись лишь за счёт того, что добежали до храма.')
Table27()
elif 4 <= Story[20] <=6: #Таблица 25: Затерявшийся в глубинах
print('Внезапно появившийся булит напал на караван. \n'
' Когда вы побежали на помощь, вы упали в яму, прорытую чудовищем, и оказались в подземных туннелях. ')
Table25()
elif 7 <= Story[20] <= 8: # Таблица 24: Выживание в глуши
print('Дракон напал и поднял в воздух фургон, в котором вы укрылись вместе с собратьями по несчастью. \n'
'Вы выпрыгнули, когда дракон пролетал низко над деревьями. ')
Table24()
elif 9 <= Story[20] <= 11: # Таблица 8: Прибытие
print('Ваша работа прошла без особых событий — вам даже было скучно. '
'Когда караван остановился вы прилегли вздремнуть, а проснувшись, обнаружили, что караван ушёл без вас.')
Table8()
elif 12 <= Story[20] <= 14: #Конец
print('Вы хорошо охраняли караван в долгом переходе. Ваша жизнь искателя приключений начинается.')
Zoloto += 10
elif 15 <= Story[20] <= 17: # Таблица 20: Охрана каравана
zlodey = Table5m()
print('На караван напал '+zlodey+' с прислужниками. '
'Вы заметили их издалека и смогли достаточно подготовить защиту, чтобы отогнать их. \n',
zlodey+' поклялся вам отомстить.')
#'когда вам в следующий раз нужно будет совершить бросок по таблице 5, используйте этого злодея.'
skill['Внимательность (МуД)'] += 2
Table20()
elif 18 <= Story[20]: # Таблица 8: Прибытие
print('Вы издалека заметили неприятности на дороге, и сумели поймать в плен бандитов, засевших в засаде.'
'Глава каравана выдал вам дополнительную плату, а также возможность получить деньги за пойманных разбойников.')
skill['Внимательность (МуД)'] += 2
Zoloto += 20
Table8()
def Table21():#Таблица 21: Заключённый
global Zoloto
print('-Заключённый:')
# print('Вас заперли вместе с преступниками всех мастей. Кто-то был оговорён, но многие действительно виновны в ужасных преступлениях.')
Story[21] = story() + skill['Запугивание (ХаР)']
if Story[21] <= 3: #31: Испытание стойкости #Таблица 21: Заключённый
print('Вы год гнили в каземате.')
Table31()
Table21()
elif 4 <= Story[21] <= 5:#31: Испытание стойкости, 11: Поиски
print('Вы отсидели несколько лет.')
Table31()
print('После освобождения вы пытались воссоединиться с членами семьи или друзьями.')
Table11('person')
elif 6 <= Story[21] <= 8:#26: Гильдия воров
print('Вам пришлось вступить в тюремную банду, связанную с преступной группировкой на свободе. \n'
'Они устроили вам побег, а взамен вы должны оказать им услугу.')
Table26()
elif 9 <= Story[21] <= 11:#16: Рабство
print('Ваше буйное поведение принесло вам репутацию смутьяна, \n'
'и вместо содержания в тюрьме вас продали в рабство.')
Table16()
elif 12 <= Story[21] <= 14:#15: Преступление, 19: В бегах
print('Ваш сокамерник был осуждён за '+Table15(), '\n'
'Вы с сокамерником выкопали туннель и сбежали.')
Table19()
elif 15 <= Story[21] <= 16:#22: Военная служба
print('Вашу стойкость заметил начальник тюрьмы, и остаток срока вы провели среди охраны.')
skill['Запугивание (ХаР)'] +=2
Table22()
elif 17 <= Story[21] <= 18:#33: Дворянство
print('Вас должны были казнить. К счастью, дворянин, впечатлённый вашей речью перед лицом смерти, вмешался и выкупил вашу свободу.\n'
'За это вы должны ему служить.')
Table33()
elif 19 <= Story[21]:#Конец
print('Вы стали лидером заключённых. Благодаря связям с людьми на воле ваши подельники \n'
'устроили вам досрочное освобождение и собрали небольшую сумму денег.\n'
'Ваша жизнь искателя приключений начинается.')
Zoloto += 20
skill['Запугивание (ХаР)'] += 2
skill['Проницательность (МуД)'] += 2
# sys.exit("Конец")
def Table22():# Таблица 22: Военная служба
global Zoloto
print('-Военная служба:')
print('Теперь ты в армии!')
Story[22] = story() + skill['Атлетика (СиЛ)']
if Story[22] <= 2: # Таблица 19: В бегах
print('Хаос и резня на поле боя сломили вас, и вы дезертировали.')
Table19()
elif 3 <= Story[21] <= 4: # Таблица 20: Охрана каравана
print('Вы оказались плохим солдатом, и вас отправили охранять обоз.')
Table20()
elif 5 <= Story[21] <= 6: #Таблица 24: Выживание в глуши
print('Ваш взвод окружили враги и вы едва выжили, сбежав в пустоши на вражеской территории.')
Table24()
elif 7 <= Story[21] <= 8: # Таблица 25: Затерявшийся в глубинах
print('Ваше войско было разбито, а его остатки укрылись в разрушенном замке. '
'Там вы нашли туннели и подземелье, и вместо того чтобы ждать смерть от рук врага, вы с другими солдатами отправились искать другой выход.')
Table25()
elif 9 <= Story[21] <= 10: # Таблица 16: Рабство
print('Вы хорошо сражались, но не избежали пленения. Враги взяли вас под стражу.')
Table16()
elif 11 <= Story[21] <= 12: # Таблица 28: Посредничество
print('Ваш взвод наткнулся на вражеский. Переговоры зашли в тупик. \n'
'Вы попытались мирно урегулировать конфликт.')
Table28()
elif 13 <= Story[21] <= 14: # Конец Таблица 11: Поиски
print('Вы выжили, но какой ценой! Вы потеряли конечность и носите протез. \n'
'Ваша жизнь искателя приключений начинается с возвращения с войны.')
skill['Атлетика (СиЛ)'] += 2
variant = input('Занятся поисками волшебного протеза?[Да/Нет]: ')
if variant == 'Да':
print('Вы занялись поисками волшебного протеза.')
Table11('object')
else:
pass
elif 15 <= Story[21] <= 16: # Таблица 8: Прибытие
print('Вы прошли войну относительно невредимым, и были демобилизованы далеко от того места, где были рекрутированы.')
Table8()
elif 17 <= Story[21] <= 18: # Конец
print('Вы хорошо себя показали на поле боя и помогли многим своим соратникам. \n'
'Вы получаете тренировку Целительства за частое перевязывание чужих ран. Вы начинаете жизнь искателя приключений.')
skill['Лечение (МуД)'] += 2
elif 19 <= Story[21]:
print('Вы стали боевым героем и получили новое звание. \n'
'Вы начинаете жизнь искателя приключений со званием, предоставляющим военные привилегии и дополнительную ответственность.')
skill['Атлетика (СиЛ)'] += 2
Zoloto += 15
def Table23():# Таблица 23: Драка в таверне
global Zoloto
print('-Драка в таверне:')
print('В таверне начинается драка.')
Story[23] = story() + skill['Атлетика (СиЛ)']
if Story[23] <=4: # Таблица 21: Заключённый
print('Вы лежали без сознания на полу, когда прибыли стражи, и вас арестовали.')
Table21()
elif 5 <= Story[23] <= 8:# Таблица 19: В бегах
print('Во время драки таверна загорелась и сгорела дотла. \n'
'Вы вырвались наружу, но теперь придётся спасаться бегством.')
Table19()
elif 9 <= Story[23] <= 11: # Таблица 29: Нанятый шпионить
print('Вас нокаутировали, но хозяин заведения восхитился вашим буйством и не дал вас арестовать. \n'
'Вам всё равно придётся возмещать ущерб, проработав на хозяина несколько месяцев. \n'
'Во время работы вы услышали чужой заговор, а когда сообщили о нём, вам поручили внедриться в группу заговорщиков.')
Table29()
elif 12 <= Story[23] <= 15: # Таблица 8: Прибытие
print('Вы хорошо проявили себя и скрылись до того как придут власти и арестуют драчунов.')
skill['Атлетика (СиЛ)'] +=2
Table8()
elif 16 <= Story[23] <= 18: # Таблица 18: Друг в нужде
print('Вы передрались со всеми посетителями, а потом все пожали друг другу руки и начали заглушать боль песнями и выпивкой.\n '
'Вы получаете тренировку Атлетики и заводите несколько новых друзей.')
skill['Атлетика (СиЛ)'] += 2
Table18()
elif 19 <= Story[23]:
print('Вы спасли жизнь владельца заведения и отправили злодеев в бегство. В качестве награды вы становитесь совладельцем. \n '
'Ваша жизнь искателя приключений начинается.')
skill['Атлетика (СиЛ)'] += 2
Zoloto +=30
def Table24(): #Таблица 24: Выживание в глуши
global Zoloto
Story[24]= story() + skill['Природа (ИнТ)']
print('-Выживание в глуши:')
if Story[24] <= 5:# Таблица 31: Испытание стойкости Таблица 24: Выживание в глуши
print('Вы год прожили в глуши робинзоном, ни разу не соприкоснувшись с цивилизацией.')
skill['Природа (ИнТ)'] +=2
Table31()
Table24()
elif 6 <= Story[24] <= 7: #Таблица 25: Затерявшийся в глубинах
print('Вы укрылись в пещере на ночь, а когда с охоты вернулись совомедведи, вам пришлось отступить глубже в туннели.')
Table25()
elif 8 <= Story[24] <= 9: #Таблица 18: Друг в нужде
print('Вы встретили дварфа-старателя, ищущего новое место для рудника. \n'
'Вы с ним подружились, и он помог вам вернуться к цивилизации.\n'
'Взамен он попросил вас оказать небольшую услугу. ')
Table18()
elif 10 <= Story[24] <= 11: # Таблица 8: Прибытие
print('Вы нашли след, который должен вести в поселение.')
Table8()
elif 12 <= Story[24] <= 14: # Таблица 32: Пророческое послание Конец
print('Вы нашли волшебную поляну и встретились с фейским существом.\n'
' Оно коснулось вашего лба и перед тем как вы потеряли сознание, в вашем сознании появилась чужая мысль.')
Table32()
print('Ваша жизнь искателя приключений началась, когда вы проснулись далеко от чудесной поляны.')
elif 15 <= Story[24] <= 16: # Таблица 20: Охрана каравана
print('В глуши на вас вышел враждебно настроенный друид.\n'
' Впечатлившись вашей способностью выживать, он обучил вас новым навыкам и проводил до дороги, где вы примкнули к проходящему каравану.')
skill['Природа (ИнТ)'] +=2
Table20()
elif 17 <= Story[24] <= 18:
print('Вы приручили в странствиях дикую лошадь. \n'
' Вы начинаете жизнь искателя приключений с боевой лошадью и возвращением к цивилизации.')
skill['Природа (ИнТ)'] += 2
elif 19 <= Story[24]:
print('Вы стали отличным охотником и следопытом. \n'
'Вы вернулись из глуши сильным, и начали жизнь искателя приключений.')
skill['Скрытность (ЛоВ)'] += 2
skill['Природа (ИнТ)'] += 2
def Table25():# Таблица 25: Затерявшийся в глубинах
global Zoloto
print('-Затерявшийся в глубинах:')
Story[25] = story() + skill['Выживание (МуД)']
if Story[25] <=5:# Таблица 31: Испытание стойкости Таблица 25: Затерявшийся в глубинах
print('0—5 Вы несколько дней бродили во тьме, питаясь насекомыми и летучими мышами.')
Table31()
Table25()
elif 6 <= Story[25] <= 7:# Таблица 16: Рабство
print('Вас взял в плен дуэргар, который привёл вас на рынок рабов в Подземье и продал там.')
Table16()
elif 8 <= Story[25] <= 9: # Таблица 18: Друг в нужде
print('Вы встретили дварфа-старателя, ищущего новое место для рудника. \n'
'Вы с ним подружились, и он помог вам вернуться к цивилизации.\n'
'Взамен он попросил вас оказать небольшую услугу.')
Table18()
elif 10 <= Story[25] <= 12: # Таблица 7: Проклятый
print('В туннелях вы нашли огромную статую с глазами из рубинов.\n'
'Вы выковыряли один, но вам пришлось бежать, когда за вами по туннелям бросилось племя гоблинов. \n'
'Увеличьте стартовый капитал на 50 зм. К несчастью, это рубин был проклят.')
Zoloto += 50
Table7()
elif 13 <= Story[25] <= 15: # Таблица 8: Прибытие
print('Вы бродили по туннелям, и столкнулись с жутким иллитидом. \n'
'Его щупальца метнулись к вам, и всё вокруг потемнело.\n'
'Вы очнулись в другом месте, не понимая как вы там оказались и почему вам позволили жить. \n'
'Странно, но теперь вы знаете Глубинную Речь. '
'Вы оказались в ...')
skill['Анализ (ИнТ)'] += 2
Table8()
elif 16 <= Story[25] <= 18: #Таблица 5: Злодей
print('Вы шли по туннелям, пока не обнаружили логово '+Table5m()+'. \n'
'Вы украли карту, ведущую к сокровищам, и выбрались на поверхность по туннелям врага.\n'
'Ваша жизнь искателя приключений начинается, и у вас есть похищенная карта.')
skill['Скрытность (ЛоВ)'] +=2
elif 19 <= Story[25]: # Таблица 8: Прибытие
print('Вы неожиданно легко провели время под землёй, и стали адептом в тихом исследовании.'
'Вы вылезли напротив...')
skill['Скрытность (ЛоВ)'] += 2
skill['Анализ (ИнТ)'] += 2
Table8()
def Table26(): #Таблица 26: Гильдия воров
'''
'''
global Zoloto
Story[26]= story() + skill['Ловкость Рук (ЛоВ)']
print('-Гильдия воров:')
if Story[26] <= 5: #Таблица 15: Преступление Таблица 21: Заключённый
print('Гильдия использовала вас в качестве козла отпущения.'
'Вас обвинили в '+Table15())
Table21()
elif 6 <= Story[26] <= 7:#Таблица 15: Преступление Таблица 19: В бегах
print('Гильдия отправила вас на задание:'+Table15()+', но вы попались. \n'
'Члены гильдии спасли вас от властей, но теперь вас разыскивают.')
Table19()
elif 8 <= Story[26] <= 9: #Таблица 34: Похищенный на корабль
print('8—9 Вы часто работали в районе доков, и вас похитили на корабль.')
Table34()
elif 10 <= Story[26] <= 11: #Таблица 24: Выживание в глуши
print('Вы выполняли для гильдии мелкие поручения, но привлекли внимание властей. \n'
'У них недостаточно доказательств для ареста, так что вас просто изгнали из города.')
Table24()
elif 12 <= Story[26] <= 14: #Таблица 26: Гильдия воров
print('Вы много сделали для гильдии, но лидеры всё равно не уверены в вас.')
Zoloto += 10
skill['Ловкость Рук (ЛоВ)'] += 2
Table26()
# 'Если снова выпадет этот результат, вы заслужите достаточно доверия, чтобы стать полноправным членом гильдии. ' 'Вы получаете тренировку Воровства, и ваша жизнь искателя приключений начинается.'
elif 15 <= Story[26] <= 16:# Таблица 9: Месть
print('Ваши навыки в гильдии очень ценят, и вскоре вам начали поручать самую ценную работу. \n'
'Соперник, завидующий вашему продвижению, начал вам строить козни. ')
skill['Ловкость Рук (ЛоВ)'] += 2
skill['Скрытность (ЛоВ)'] += 2
Table9()
elif 17 <= Story[26] <= 18:#Таблица 29: Нанятый шпионить
print('Вы провернули много хитрых грабежей, и получили под вымышленным именем титул великого вора.\n'
'В конечном счёте власти вас поймали, но в тюрьму вас не посадили. \n'
'Вам простят преступления, если вы будете на них шпионить.')
skill['Ловкость Рук (ЛоВ)'] += 4
skill['Обман (ХаР)'] += 2
Table29()
elif 19 <= Story[26]:# Конец
print('Вы стали опытным преступником, и когда глава гильдии был убит: '+Table5()+', вас выбрали новым главой. \n'
'Ваша жизнь искателя приключений начинается с того, что вы являетесь лидером гильдии воров и несёте за неё полную ответственность.')
skill['Ловкость Рук (ЛоВ)'] += 6
skill['Скрытность (ЛоВ)'] += 4
skill['Анализ (ИнТ)'] += 2
Zoloto += 50
def Table27(): #Таблица 27: Отправленный в храм
global Zoloto
Story[27] = story() + skill['Религия (ИнТ)']
print('-Отправленный в храм:')
if Story[27] <= 4: # Таблица 7: Проклятый
print('Ваше поведение оскорбило богов, и теперь вы прокляты.')
Table7()
elif 5 <= Story[27] <= 7: # Таблица 19: В бегах
print('Вы сорвали религиозный обряд и случайно подожгли храм. \n'
' Жрецы напали на вас до того как вы смогли объяснить, что это была ошибка, и вам пришлось бежать.\n'
' Теперь они вас разыскивают. ')
Table19()
elif 8 <= Story[27] <= 10: # Таблица 28: Посредничество
print('Вы задали неуместный вопрос о догме храма, и подняли разногласия, из-за которых жрецы поссорились. \n'
'Вы попытались предотвратить раскол церкви.')
Table28()
elif 11 <= Story[27] <= 13: # Таблица 11: Поиски
print('Вам было таинственное видение, которое никто из жрецов не смог объяснить. \n'
'Они отправили вас на поиски странствующего оракула своего ордена, чтобы вы могли узнать больше.')
Table11('person')
elif 14 <= Story[27] <= 16: #Таблица 4: Попечение
print('Ваше обучение в храме и жажда познания впечатлили жрецов. \n'
'Они научили вас всему, чему могли, и отправили к наставнику, чтобы вы могли и дальше получать знания.')
skill['Религия (ИнТ)'] += 2
Table4()
elif 17 <= Story[27] <= 18:#Таблица 8: Прибытие
print('Ваша преданность храму впечатлила служивших там жрецов. Они отправили вас в мир разносить веру. ')
skill['Религия (ИнТ)'] += 2
skill['Лечение (МуД)'] += 2
Table8()
elif 19 <= Story[27]: #Таблица 32: Пророческое послание
print('Вы получили в храме божественное видение.')
skill['Религия (ИнТ)'] += 2
skill['Проницательность (МуД)'] += 2
Table32()
print('Ваша жизнь искателя приключений начинается.')
def Table28(): #Таблица 28: Посредничество
global Zoloto
Story[28]= story() + skill['Убеждение (ХаР)']
print('-Посредничество:')
if Story[28] <= 4: # Таблица 22: Военная служба
print('Переговоры провалились, ситуация вышла из-под контроля и разразилась новая война.\n '
'Вас заставили сражаться на одной из сторон.')
Table22()
elif 5 <= Story[28] <= 6:# Таблица 19: В бегах
print('Ваша попытка провести переговоры грубо провалилась, и вам пришлось ретироваться от разозлённого собеседника.')
Table19()
elif 7 <= Story[28] <= 8:# Таблица 11: Поиски
print('Те, с кем вы общались, согласятся на ваши условия только в обмен на священный для них предмет.')
Table11('object')
elif 9 <= Story[28] <= 11:#Таблица 11: Поиски
print('Ваших навыков оказалось недостаточно, и другая сторона потребовала, чтобы вы привели кого-то другого, и только тогда они смогут принять решение.')
Table11('person')
elif 12 <= Story[28] <= 14:#Таблица 8: Прибытие
print('Вы едва-едва достигли успеха, но разозлённая другая сторона потребовала, чтобы вы уехали, и только так они согласятся.')
Table8()
elif 15 <= Story[28] <= 17:#Таблица 33: Дворянство
print('Вы привели убедительный аргумент, и завоевали уважение тех, с кем вы разговаривали. \n'
'Эта мудрость привлекла внимание дворянина.')
skill['Убеждение (ХаР)'] +=2
Table33()
elif 18 <= Story[28]:
print('Ваше красноречие впечатлило тех, с кем вы говорили, и они согласились на ваши условия. \n'
'Вас объявили почётным гостем и одарили знаками внимания. \n'
'Ваша жизнь искателя приключений начинается. ')
skill['Убеждение (ХаР)'] += 2
skill['Проницательность (МуД)'] += 2
Zoloto += 10
def Table29(): #Таблица 29: Нанятый шпионить
global Zoloto
Story[29]= story() + skill['Обман (ХаР)']
print('-Нанятый шпионить:')
if Story[29] <= 4:#21: Заключённый
print('Вы практически сразу выдали себя,\n'
'и те, за кем вы шпионили, бросили вас за решётку.')
Table21()
elif 5 <= Story[29] <= 8:#19: В бегах
print('Другу нужна помощь в спасении члена вашей семьи, которого хотят казнить.\n'
'Вы преуспели, но теперь вас разыскивают власти.')
Table19()
elif 6 <= Story[29] <= 7:#18: Друг в нужде
print('Вам пришлось шпионить за другом.\n'
'Вы предали нанимателей и попытались помочь своему приятелю.')
Table18()
elif 8<= Story[29] <= 9:#9: Месть
print('Выполняя задание, вы вышли на вражеского шпиона, и всё осложнилось.\n'
'Ваш соперник сбежал с нужной вам информацией, и вы начали его преследовать.')
Table9()
elif 10<= Story[29] <= 13:#11: Поиски
print('Ваша слежка раскрыла заговор, но у вас ещё не все части головоломки.\n'
'Вы отправились за информацией, которая откроет правду.')
Table11('infa')
elif 14<= Story[29] <= 16:#---Конец---
print('Во время слежки вы провели серьёзные изыскания и сблизились с учёными по теме своих поисков.\n'
'Однако найденную вами информацию использовали неожиданным образом, и теперь эти учёные в беде.\n'
'Вы получаете тренировку Истории. Ваша жизнь искателя приключений начинается,\n'
'но те, кто помогал вам, находятся в опасности от событий, произошедших в вашем прошлом.')
# sys.exit("Конец")
elif 17<= Story[29] <= 18:#5: Злодей,19: В бегах
print('С теми, за кем вы следите, сотрудничает опасный '+Table5m()+'. \n'
'Благодаря великолепному обману, вы подобрались к нему и похитили нужную информацию.\n'
'После того как вы ушли, он понял, что вы сделали.')
skill['Обман (ХаР)'] += 2
Table19()
elif 19<= Story[29]:#---Конец---
print('В ходе миссии вы столкнулись с вражеским шпионом, и начали политическую игру на грани фола, включая взаимные подколки и подлог. \n'
'Вы разошлись друзьями, и оба оставили свою цель, ничего не узнав. Ваша жизнь искателя приключений начинается.')
skill['Обман (ХаР)'] += 2
skill['Скрытность (ЛоВ)'] += 5
def Table30(): #Таблица 30: Цирк
global Zoloto
# Story[30] = story()
print('-Цирк:')
if Story[30] == 1:# Таблица 5: Злодей Таблица 9: Месть
print('До того как началась ваша жизнь в цирке, '+Table5m()+' разрушил его. \n'
'Осталось только несколько актёров вместе с вами. '
'Вы и другие актёры попытались отомстить.')
Table9()
elif Story[30] == 2:
print('Блошиный цирк. Совершите проверку Внимательности (Сл 12). ')
if skill['Внимательность (МуД)'] + story() > 12:
skill['Внимательность (МуД)'] +=2
print('Ваша жизнь искателя приключений начинается после успешного сезона выступлений.')
Zoloto +=10
else:
print('Ваша жизнь искателя приключений начинается с того, что на вас завелись блохи.')
elif Story[30] == 3:
print('Мим.')
if skill['Обман (ХаР)'] + story() > 12:
skill['Обман (ХаР)'] += 2
print('У вас отлично выходит и вы можете провести с цирком ещё один сезон;')
Table30()
else: #Таблица 17: Неудача
print('Вас выгоняют из цирка, и вы попадаете в засаду на дороге;')
Table17()
elif Story[30] == 4:
print('Чудаковатый актёр второго плана.')
if skill['Запугивание (ХаР)'] + story() > 12:
skill['Запугивание (ХаР)'] += 2
print('После выступления один из зрителей протянул вам кусок пергамента и сразу исчез в толпе:')
Table32()
print('Ваша жизнь искателя приключений начинается.')
else: #Таблица 8: Прибытие
print('Вас выгоняют из цирка;')
Table8()
elif Story[30] == 5:
print('Клоун.')
if skill['Убеждение (ХаР)'] + story() > 12: #Таблица 33: Дворянство
skill['Убеждение (ХаР)'] += 2
print('Вы привлекаете внимание дворянина, которому нужен шут для ребёнка; ')
Table33()
else:
print('Ваша жизнь искателя приключений начинается с позорного изгнания из цирка.')
elif Story[30] == 6:
print('Силач.')
if skill['Атлетика (СиЛ)'] + story() > 12: # Таблица 34: Похищенный на корабль
skill['Атлетика (СиЛ)'] += 2
print('Ваше сильное тело привлекло внимание вербовщиков на флот;')
Table34()
else: #Таблица 22: Военная служба
print('Вам отказываются платить; \n'
'В поисках денег вы отправляетесь на службу в армию:')
Zoloto -= 10
Table22()
elif Story[30] == 7:
print('Всадник-акробат.')
if skill['Акробатика (ЛоВ)'] + story() > 12:
skill['Акробатика (ЛоВ)'] += 2
print('Ваша ловкость впечатлила зрителей. Ваша жизнь искателя приключений начинается в успешной труппе')
elif skill['Уход за животными (МуД)'] + story() > 12:
skill['Уход за животными (МуД)'] += 2
print('Вы смогли натренорвать лошадей. Ваша жизнь искателя приключений начинается в известной труппе')
else:#Таблица 31: Испытание стойкости
print('Вы получаете травму;')
Table31()
print('Ваша жизнь искателя приключений начинается после того как цирк уходит без вас.')
elif Story[30] == 8:
print('Актёр, демонстрирующий освобождение от оков. Совершите проверку Акробатики или Атлетики (Сл 12), а затем проверку Выносливости (Сл 12).')
if skill['Выступление (ХаР)'] + story() > 12:
skill['Выступление (ХаР)'] += 2
print('Ваша жизнь искателя приключений начинается после успешного сезона выступлений.')
else: #Таблица 26: Гильдия воров
print('Вашим наставником становится представитель гильдии воров;')
Table26()
elif Story[30] == 9:
print('Дрессировщик.')
if skill['Уход за животными (МуД)'] + story() > 12:
skill['Уход за животными (МуД)'] += 2
print('Ваша жизнь искателя приключений начинается после успешного сезона выступлений.')
else:#Таблица 24: Выживание в глуши
print('В случае провала ваше животное сбегает, и вы отправляетесь его искать;')
Table24()
elif Story[30] == 10:
print('Танцор.')
if skill['Акробатика (ЛоВ)'] + story() > 12:#Таблица 33: Дворянство
skill['Акробатика (ЛоВ)'] += 2
print('Вы попадаетесь на глаза дворянину, который переманивает вас к себе; ')
Table33()
else:#Таблица 23: Драка в таверне
print('Вас отправляют танцевать в таверну;')
Table23()
elif Story[30] == 11:
print('Фокусник.')
if skill['Ловкость Рук (ЛоВ)'] + story() > 12:# Таблица 10: Башня волшебника
skill['Ловкость Рук (ЛоВ)'] += 2
print('Некий волшебник проявляет желание обучить вас настоящей магии;')
Table10()
else:
print('Ваше выступление вызвало у публики хохот;')
Story[30] = 5
Table30()
elif Story[30] == 12:
print('Гибкий акробат.')
if skill['Акробатика (ЛоВ)'] + story() > 12:# Таблица 29: Нанятый шпионить
skill['Скрытность (ЛоВ)'] += 2
skill['Акробатика (ЛоВ)'] += 2
print('Вы привлекаете внимание лидера шпионов, которому нужно, чтобы кто-то с вашими навыками проник во вражеский лагерь;')
Table29()
else:#Таблица 16: Рабство
print('Жадный хозяин цирка продаёт вас в рабство;')
Table16()
elif Story[30] == 13:
print('Гимнаст.')
if skill['Атлетика (СиЛ)'] + story() > 12:#Таблица 20: Охрана каравана
skill['Атлетика (СиЛ)'] += 2
print('Вы с товарищами отправились в соседний город:')
Table20()
else: # Таблица 3: Уход из дома
print('Вы с позором возвращаетесь домой;')
Table3()
elif Story[30] == 14:
print('Жонглёр.')
if skill['Ловкость Рук (ЛоВ)'] + story() > 12:
skill['Ловкость Рук (ЛоВ)'] += 2
print('Ваша ловкость впечатлила зрителей. Ваша жизнь искателя приключений начинается в успешной труппе')
else: # #Таблица 8: Прибытие
print('Вас выгоняют из цирка;')
Table8()
elif Story[30] == 15:
print('Шпагоглотатель.')
if skill['Акробатика (ЛоВ)'] + story() > 12:#Таблица 17: Неудача
skill['Акробатика (ЛоВ)'] += 2
print('Вы выступаете в пути;')
Table17()
else:
print('Ваша жизнь искателя приключений начинается, но вы не можете говорить до первого продолжительного отдыха')
elif Story[30] == 16:
print('Акробат.')
if skill['Акробатика (ЛоВ)'] + story() > 12:# Таблица 9: Месть
skill['Акробатика (ЛоВ)'] += 4
print('Вы встречаете дворянина, который проявляет к вам интерес.\n'
'Этот дворянин хочет отомстить '+Table5m()+', и вас втягивают в это противостояние. ')
Table9()
else:
print('Вы получаете травму;)')
Table31()
if Story[31] < 9:
print('Ваша жизнь искателя приключений начинается после того, как сочувствующие товарищи по цирку уходят без вас.')
else: # Таблица 29: Нанятый шпионить
print('Вы привлекаете внимание лидера шпионов, которому нужно, чтобы кто-то с вашими навыками проник во вражеский лагерь;')
Table29()
elif Story[30] == 17:
print('Метатель кинжалов.')
if DEXm + story() > 15:
skill['Внимательность (МуД)'] += 2
print('Ваша жизнь искателя приключений начинается успешным артистом.')
else: #Таблица 21: Заключённый
print('Вы случайно кого-то убиваете или раните, и вас заключают под стражу;')
Table21()
elif Story[30] == 18:
print('Глотатель пламени.')
if CONm + story() > 12:#Таблица 23: Драка в таверне
CONm += 2
print('Вы начинаете выступать в таверне;')
Table23()
elif INTm + story() > 12:#Таблица 10: Башня волшебника
skill['Магия (ИнТ)'] +=2
print('Некий волшебник проявляет желание обучить вас призывать огонь;')
Table10()
else:
print('Ваша жизнь искателя приключений начинается, но вы не можете говорить до первого продолжительного отдыха.')
elif Story[30] == 19:
print('Предсказатель.')
if skill['Обман (ХаР)'] + story() > 12:#Таблица 32: Пророческое послание
skill['Обман (ХаР)'] += 2
print('Вы получаете пугающее видение;')
Table32()
else:#Таблица 7: Проклятый
print('Вы обижаете настоящего прорицателя и становитесь проклятым;')
Table7()
elif Story[30] == 20:
print('Управляющий.')
if skill['Убеждение (ХаР)'] + story() > 12:# Таблица 6: Торговые дела
print('Торговец нанимает вас продавцом для своего магазина;')
Zoloto += 10
Table6()
else:
print('Цирк разоряется после нескольких неудачных выступлений. \n'
'Ваша жизнь искателя приключений начинается с того, что многие актёры обвиняют вас в потере источника средств к существованию,\n'
' а кто-то может даже желать отомстить')
def Table31(): #Таблица 31: Испытание стойкости
global Zoloto, CON, CONm
Story[31] = story() + CONm
print('-Испытание стойкости:')
if Story[31] <= 4:
print('Вы чуть было не умерли. Вы теперь хромаете, вас терзает жуткий кашель, \n'
' у вас искривлённые руки, подслеповатый глаз или другое ярко выраженное недомогание.')
elif 5 <= Story[31] <= 8:
print('Вы едва выжили. Всё это оставило на вас физические и душевные шрамы, но вы как-то их компенсировали.')
elif 9 <= Story[31] <= 12:
print('Вы справились с неприятностями, не теряя боевого духа.')
elif 13 <= Story[31] <= 16:
print('Вы справились «на ура».')
CON += 2
elif 17 <= Story[31]:
print('Это было просто. Вы стали сильнее чем были.')
skill['Атлетика (СиЛ)'] +=2
CON += 2
def Table32(): #Таблица 32: Пророческое послание
global Zoloto
Story[32] = story()
print('-Пророческое послание:')
if Story[32] == 1:
print('Вы — предвестник конца света.')
elif Story[32] == 2:
print('Вы найдёте великое сокровище, которое нужно будет вернуть его владельцу, иначе вы никогда не достигнете желаемого.')
elif Story[32] == 3:
print('Вам суждено стать создателем королей.')
elif Story[32] == 4:
print('Тёмные силы считают вас принадлежащим им.')
elif Story[32] == 5:
print('Некий бог считает вас особенным, и внимательно наблюдает за вами.')
elif Story[32] == 6:
print('При рождении вас разлучили с близнецом.')
elif Story[32] == 7:
print('У вас не родная семья. На самом деле вы — наследник престола.')
elif Story[32] == 8:
print('Вы не рождались, вас создали искусственно.')
elif Story[32] == 9:
print('В вашем теле обитают две души.')
elif Story[32] == 10:
print('Ваш близкий друг заключён в предмете, принадлежащем злодею. Совершите бросок по таблице 5, чтобы определить, кто это был.')
elif Story[32] == 11:
print('Когда-то другом вашей семьи был великий злодей. Совершите бросок по таблице 5, чтобы определить, кто это был.')
elif Story[32] == 12:
print('Ваш родственник носит притворную личину.')
elif Story[32] == 13:
print('Ваших истинных друзей можно сосчитать по пальцам одной руки.')
elif Story[32] == 14:
print('Один из ваших друзей скоро вас предаст.')
elif Story[32] == 15:
print('Враг ближе, чем вы думаете.')
elif Story[32] == 16:
print('Не доверяй картам, когда заблудишься.')
elif Story[32] == 17:
print('Совет, которого вы скоро будете искать, заведёт вас не туда.')
elif Story[32] == 18:
print('Не путешествуйте по воде в течение месяца.')
elif Story[32] == 19:
print('Укрывайтесь от бурь, если только они не придут с севера.')
elif Story[32] == 20:
print('Не доверяйте своим инстинктам ночью.')
def Table33(): #Таблица 33: Дворянство
global Zoloto
Story[33] = story() + skill['Проницательность (МуД)']
print('-Дворянство:')
if Story[33] <= 3:# Таблица 16: Рабство
print('Ваше «служение» в доме дворянина на самом деле было рабством.')
Table16()
elif 4 <= Story[33] <= 5: #Таблица 21: Заключённый
print('Дворянин поручил вам задачу, которая оказалась нелегальной. \n'
'Вас арестовали, и дворянин позволил вам гнить в тюрьме.')
Table21()
elif 6 <= Story[33] <= 7: # Таблица 24: Выживание в глуши
print('Один из родственников дворянина слишком заинтересовался вами, и тот в гневе выгнал вас.')
Table24()
elif 8 <= Story[33] <= 9: #Таблица 8: Прибытие
print('Вы приняли участие в маскараде и веселили людей развязным поведением, но когда все сняли маски, \n'
'вы забылись и продолжили кривляться, чем оскорбили гостей и смутили хозяина дома.\n'
'Вас выставили из дома дворянина.')
Table8()
elif 10 <= Story[33] <= 11: #Таблица 12: Персона
print('Вы очень сблизились с дочерью или сыном дворянина, и решили сбежать вдвоём. \n'
'Ночью вы вместе убежали от разозлённого отца.')
Table12()
elif 12 <= Story[33] <= 14: #Таблица 6: Торговые дела
print('Дворянин так проникся к вам, что доверил вести дела своего дома.')
skill['Проницательность (МуД)'] += 2
Table6()
elif 15 <= Story[33] <= 17: #Конец
print('Вы так понравились дворянину, что он пообещал породниться с вами. \n'
'Ваша жизнь искателя приключений начинается до того, как вы женитесь.')
skill['Проницательность (МуД)'] += 2
Zoloto += 50
elif 18 <= Story[33]: #Таблица 18: Друг в нужде
print('Дворянин становится вашим хорошим другом.')
skill['Проницательность (МуД)'] += 2
skill['Убеждение (ХаР)'] += 2
Table18()
def Table34(): #Таблица 34: Похищенный на корабль
global Zoloto, CONm
Story_pirate = story()
print('-Похищенный на корабль:')
if Story_pirate <= 5: #Таблица 31: Испытание стойкости Таблица 34: Похищенный на корабль
print('Вы целый год прожили жизнью пирата.')
CONm += 1
Table31()
Table34()
elif 6 <= Story_pirate <= 7:#Таблица 27: Отправленный в храм
print('Корабль несколько недель простоял в зоне штиля. \n'
'Когда ветер всё же подул, он вынес вас на берег к месту у большого храма. \n'
'В знак благодарности богам вы какое-то время служили в этом храме.')
Table27()
elif 8 <= Story_pirate <= 9:
print('Вы возглавили мятеж против капитана. ')
if skill['Атлетика (СиЛ)'] + story() >= 12: # Конец
print('Вы начинаете жизнь искателя приключений в качестве капитана корабля с небольшим экипажем.')
skill['Атлетика (СиЛ)'] += 2
else: # Таблица 21: Заключённый
print('Вас сажают в тюрьму, когда корабль приходит в порт;')
Table21()
elif Story_pirate == 10: #Таблица 22: Военная служба
print('Вы славно потрудились в сражениях на море.'
'Вы продолжили службу на суше.')
skill['Атлетика (СиЛ)'] += 2
Zoloto += 10
Table22()
elif Story_pirate == 11: # Таблица 24: Выживание в глуши
print('Вы сбежали во время остановки для охоты на таинственном побережье.')
Table24()
elif Story_pirate == 12: #Таблица 31: Испытание стойкости Таблица 33: Дворянство
print('Корабль потопило морское чудище.')
Table31()
print('Вас спас проходящий мимо корабль дворянина.')
Table33()
elif Story_pirate == 13: # Конец
print(Table5()+' потопил ваш корабль и взял вас в плен. \n'
'Один из сторонников злодея сжалился над вами и позволил сбежать.\n'
'Ваша жизнь искателя приключений начинается, и у вас теперь есть враг и потенциальный друг в стане врага.')
elif Story_pirate == 14: # Таблица 7: Проклятый
print('Таинственная буря сбила корабль с курса, и вы оказались на необычном острове.\n'
'Исследуя его, вы подхватили проклятье, и экипаж бросил вас в ближайшем порту. ')
Table7()
elif Story_pirate == 15:
print('Вы дежурили в «вороньем гнезде». Совершите проверку Внимательности (Сл 12). ')
if skill['Внимательность (МуД)'] + story() >= 12:
print('Ваша жизнь искателя приключений начинается после успешного рейса.')
skill['Внимательность (МуД)'] += 2
else:
print('Корабль тонет. Вас считают приносящим неудачу, и высаживают на берег; \n'
'Ваша жизнь искателя приключений начинается.')
elif Story_pirate == 16: #Таблица 31: Испытание стойкости
print('Вы разозлили капитана, и вас заставили пройтись по рее. ')
Zoloto -= 20
Table31()
print('Ваша жизнь искателя приключений начинается с того, что вас выносит на берег.')
elif Story_pirate == 17: #Таблица 19: В бегах
print('Вы украли на корабле карту сокровищ и скрылись на берегу.'
' Теперь вы прячетесь от моряков, желающих вернуть карту.')
Table19()
elif Story_pirate == 18: #Таблица 16: Рабство
print('Пираты напали на корабль, захватили вас в плен, и продали в рабство.')
Table16()
elif Story_pirate == 19: #Таблица 31: Испытание стойкости Таблица 8: Прибытие
print('Вас высадили на острове за оскорбление капитана.')
Table31()
print('Вас спасли.')
Table8()
elif Story_pirate == 20: #Таблица 6: Торговые дела
print('Вы подружились с капитаном и убедили его заняться мирной деятельностью.\n'
'Вы начали представлять торговые интересы капитана на суше.')
skill['Убеждение (ХаР)'] += 3
Table6()
print('--------------------------------------------------------------------------')
print('-Обстоятельства рождения вашего персонажаа:\nКто Вас вырастил?')
Table1()
if Story[1] != 5:
if Story[1] != 19:
Story[1] = story() - 2
if Story[1] == 5:
Story[1] = story() - 2
print('и')
Table1()
print('-Есть ли у вас братья и сёстры?')
Table2()
Table3()
print('--------------')
Prof = profesion()
STR, DEX, CON, INT, WSD, CHR = proftochar()
print('Характеристики вашего '+Prof+':\n'
'Сила:', STR, '\n'
'Ловкость:', DEX, '\n'
"Телосложение:", CON, '\n'
"Интеллект:", INT, '\n'
"Мудрость:", WSD, '\n'
"Харизма:", CHR)
print('--------------')
print(skill)
print('Золото: ', Zoloto)
input('')
| [
"[email protected]"
] | |
bf4dfb85175642bde5e44e837f015463e1b8fed7 | 036a41c913b3a4e7ae265e22a672dd89302d3200 | /1201-1300/1237/1237_Python_1.py | 7e07569009a051950622f77e1c5934eb19a13fe8 | [] | no_license | ChangxingJiang/LeetCode | e76f96ebda68d7ade53575354479cfc33ad4f627 | a2209206cdd7229dd33e416f611e71a984a8dd9e | refs/heads/master | 2023-04-13T15:23:35.174390 | 2021-04-24T05:54:14 | 2021-04-24T05:54:14 | 272,088,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | from typing import List
class CustomFunction:
def f(self, x, y):
return x + y
class Solution:
def findSolution(self, customfunction: 'CustomFunction', z: int) -> List[List[int]]:
idx1 = 1
idx2 = 1000
ans = []
while idx1 <= 1000 and idx2 >= 1:
if customfunction.f(idx1, idx2) < z:
mid = (idx1 + idx2) // 2
if customfunction.f(mid, idx2) < z:
idx1 = max(mid, idx1 + 1)
else:
idx1 += 1
elif customfunction.f(idx1, idx2) > z:
mid = (idx1 + idx2) // 2
if customfunction.f(idx1, mid) > z:
idx2 = min(mid, idx2 - 1)
else:
idx2 -= 1
else:
ans.append([idx1, idx2])
idx1 += 1
idx2 -= 1
return ans
if __name__ == "__main__":
print(Solution().findSolution(CustomFunction(), 5)) # [[1,4],[2,3],[3,2],[4,1]]
| [
"[email protected]"
] | |
b35153d84b3d5de30ced8c149b72bb65a1168607 | 1ba5c90292dbed982a23167fd083dd2cf29f6d8d | /bin/boliau-filter | 5a2dda5ecd69ac3fffc88b2999546c11e061382c | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | hychen/boliau | 8d99a331056ef18001cae5503084624c1e43618c | 618e4557c7b4a3d97c9926b1d7e691291472ff7c | refs/heads/master | 2021-01-01T17:37:25.012545 | 2013-08-16T09:58:58 | 2013-08-16T09:58:58 | 7,454,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | #!/usr/bin/env python
# -*- coding: utf-8 -*
#
# File: boliau-filter
#
# Copyright (C) 2012 Hsin-Yi Chen (hychen)
# Author(s): Hsin-Yi Chen (hychen) <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from boliau import cmd
cmd.do_filter()
| [
"[email protected]"
] | ||
99012b699de3c397114f096d138336ae93ba00d4 | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/cdn/azure-mgmt-cdn/azure/mgmt/cdn/aio/operations/_custom_domains_operations.py | 5eb84d3dc8f8ef6e445a6fe922db3b4cd6ecf94c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 31,052 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CustomDomainsOperations:
"""CustomDomainsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cdn.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_endpoint(
self,
resource_group_name: str,
profile_name: str,
endpoint_name: str,
**kwargs
) -> AsyncIterable["models.CustomDomainListResult"]:
"""Lists all of the existing custom domains within an endpoint.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is unique globally.
:type endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomDomainListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cdn.models.CustomDomainListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CustomDomainListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_endpoint.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CustomDomainListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_endpoint.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains'} # type: ignore
async def get(
self,
resource_group_name: str,
profile_name: str,
endpoint_name: str,
custom_domain_name: str,
**kwargs
) -> "models.CustomDomain":
"""Gets an existing custom domain within an endpoint.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is unique globally.
:type endpoint_name: str
:param custom_domain_name: Name of the custom domain within an endpoint.
:type custom_domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomDomain, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.CustomDomain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CustomDomain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-15"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'customDomainName': self._serialize.url("custom_domain_name", custom_domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CustomDomain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
profile_name: str,
endpoint_name: str,
custom_domain_name: str,
host_name: Optional[str] = None,
**kwargs
) -> "models.CustomDomain":
cls = kwargs.pop('cls', None) # type: ClsType["models.CustomDomain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_custom_domain_properties = models.CustomDomainParameters(host_name=host_name)
api_version = "2020-04-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'customDomainName': self._serialize.url("custom_domain_name", custom_domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_custom_domain_properties, 'CustomDomainParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CustomDomain', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CustomDomain', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('CustomDomain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
profile_name: str,
endpoint_name: str,
custom_domain_name: str,
host_name: Optional[str] = None,
**kwargs
) -> AsyncLROPoller["models.CustomDomain"]:
"""Creates a new custom domain within an endpoint.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is unique globally.
:type endpoint_name: str
:param custom_domain_name: Name of the custom domain within an endpoint.
:type custom_domain_name: str
:param host_name: The host name of the custom domain. Must be a domain name.
:type host_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CustomDomain or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cdn.models.CustomDomain]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.CustomDomain"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
profile_name=profile_name,
endpoint_name=endpoint_name,
custom_domain_name=custom_domain_name,
host_name=host_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CustomDomain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
profile_name: str,
endpoint_name: str,
custom_domain_name: str,
**kwargs
) -> Optional["models.CustomDomain"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.CustomDomain"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-15"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'customDomainName': self._serialize.url("custom_domain_name", custom_domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('CustomDomain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
profile_name: str,
endpoint_name: str,
custom_domain_name: str,
**kwargs
) -> AsyncLROPoller["models.CustomDomain"]:
"""Deletes an existing custom domain within an endpoint.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is unique globally.
:type endpoint_name: str
:param custom_domain_name: Name of the custom domain within an endpoint.
:type custom_domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.CustomDomain"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
profile_name=profile_name,
endpoint_name=endpoint_name,
custom_domain_name=custom_domain_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CustomDomain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}'} # type: ignore
async def disable_custom_https(
self,
resource_group_name: str,
profile_name: str,
endpoint_name: str,
custom_domain_name: str,
**kwargs
) -> Optional["models.CustomDomain"]:
"""Disable https delivery of the custom domain.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is unique globally.
:type endpoint_name: str
:param custom_domain_name: Name of the custom domain within an endpoint.
:type custom_domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomDomain, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.CustomDomain or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.CustomDomain"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-15"
accept = "application/json"
# Construct URL
url = self.disable_custom_https.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'customDomainName': self._serialize.url("custom_domain_name", custom_domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('CustomDomain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
disable_custom_https.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}/disableCustomHttps'} # type: ignore
async def enable_custom_https(
self,
resource_group_name: str,
profile_name: str,
endpoint_name: str,
custom_domain_name: str,
custom_domain_https_parameters: Optional["models.CustomDomainHttpsParameters"] = None,
**kwargs
) -> Optional["models.CustomDomain"]:
"""Enable https delivery of the custom domain.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is unique globally.
:type endpoint_name: str
:param custom_domain_name: Name of the custom domain within an endpoint.
:type custom_domain_name: str
:param custom_domain_https_parameters: The configuration specifying how to enable HTTPS for the
custom domain - using CDN managed certificate or user's own certificate. If not specified,
enabling ssl uses CDN managed certificate by default.
:type custom_domain_https_parameters: ~azure.mgmt.cdn.models.CustomDomainHttpsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomDomain, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.CustomDomain or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.CustomDomain"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.enable_custom_https.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'customDomainName': self._serialize.url("custom_domain_name", custom_domain_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if custom_domain_https_parameters is not None:
body_content = self._serialize.body(custom_domain_https_parameters, 'CustomDomainHttpsParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('CustomDomain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
enable_custom_https.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}/enableCustomHttps'} # type: ignore
| [
"[email protected]"
] | |
338ab5824090a54458b92bcebc0868ff7bbea5b4 | 61eec9771de17885af1f817ddf7df8eaa1fdd168 | /build/robmovil_msgs/catkin_generated/generate_cached_setup.py | 116ed7667ac27a834209aaf5ce7318f3a3bf28e8 | [] | no_license | jrr1984/TP_FINAL_ROBOTICA | c1ce681b5ea4ca4fea7bfcde00fb289dc81842df | 46988eccec3d8e11f56a6e6e43315d446ded12e7 | refs/heads/master | 2020-03-28T21:26:39.339951 | 2018-09-17T16:42:14 | 2018-09-17T16:42:14 | 149,156,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/kinetic;/home/jrr/catkin_ws/devel".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/jrr/catkin_ws/devel/.private/robmovil_msgs/env.sh')
output_filename = '/home/jrr/catkin_ws/build/robmovil_msgs/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
fc075357702383e855d1b916cb2ef66b92f99214 | 7fdac5209f86de756b9a8123a0911b70738eceeb | /pySDC/tests/test_tutorials/test_step_1.py | 50d02c63344d3cb3b66cf680ff3d9c731d6e2d36 | [
"BSD-2-Clause"
] | permissive | Parallel-in-Time/pySDC | edc66e399f6066effc5aaa376883e88e06b5332b | 1a51834bedffd4472e344bed28f4d766614b1537 | refs/heads/master | 2023-08-30T23:17:56.017934 | 2023-08-30T05:42:00 | 2023-08-30T05:42:00 | 26,165,004 | 30 | 31 | BSD-2-Clause | 2023-09-14T06:40:13 | 2014-11-04T10:56:53 | Jupyter Notebook | UTF-8 | Python | false | false | 524 | py | import pytest
@pytest.mark.base
def test_A():
from pySDC.tutorial.step_1.A_spatial_problem_setup import main as main_A
main_A()
@pytest.mark.base
def test_B():
from pySDC.tutorial.step_1.B_spatial_accuracy_check import main as main_B
main_B()
@pytest.mark.base
def test_C():
from pySDC.tutorial.step_1.C_collocation_problem_setup import main as main_C
main_C()
@pytest.mark.base
def test_D():
from pySDC.tutorial.step_1.D_collocation_accuracy_check import main as main_D
main_D()
| [
"[email protected]"
] | |
501f74a0807b79882ffefe833d5d2afa2a9ccab0 | b1f1dc12ee3aec4235b4263424f8d4eba4f996a6 | /courses/courses/courses/settings.py | b487361dd3fee8795f6c3e82b6547138dce3142e | [] | no_license | daveydog24/courses_django | c25f6c21912392acd9459656a12373f32b1babdf | 9f900771bd1e14fc8baca23f036f4649f85ac7d4 | refs/heads/master | 2020-03-17T13:06:46.041356 | 2018-05-16T06:44:21 | 2018-05-16T06:44:21 | 133,617,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,121 | py | """
Django settings for courses project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ued$a+r6e07h9yt+1(w1q%y(1i)tmw*)qacyh$0k-4=4xgucct'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.courses_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'courses.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'courses.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
43019e739fd97a9d55793a78ce5e2ae210eff614 | 8876406eaef11bff8566b54746ca2a7fae3525db | /setup.py | 4c3c25f64e6ea9d2e13bd8210b73d610353940d2 | [
"BSD-3-Clause"
] | permissive | remibergsma/cs | 19e0ba4b9ea5c97329e743577d1c8cb10aa593d2 | de004f6aa08ee8fc0dc342b334a2731b8c5be964 | refs/heads/master | 2020-12-28T20:30:43.209507 | 2015-06-03T08:40:13 | 2015-06-03T08:40:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | # coding: utf-8
from setuptools import setup
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name='cs',
version='0.6.8',
url='https://github.com/exoscale/cs',
license='BSD',
author=u'Bruno Renié',
description=('A simple yet powerful CloudStack API client for '
'Python and the command-line.'),
long_description=long_description,
py_modules=('cs',),
zip_safe=False,
include_package_data=True,
platforms='any',
classifiers=(
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
),
install_requires=(
'requests',
),
extras_require={
'highlight': ['pygments'],
},
test_suite='tests',
entry_points={
'console_scripts': [
'cs = cs:main',
],
},
)
| [
"[email protected]"
] | |
8059df45728dd522b8eaf9bfac6c0b962d0f7839 | 0667af1539008f9c6c0dcde2d3f50e8bbccf97f3 | /source/rttov_test/profile-datasets-py/standard54lev_nogas/006.py | 29fa4a49fa333f38babeedbd3040338a5c6cfdcf | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bucricket/projectMAScorrection | bc6b90f07c34bf3e922225b2c7bd680955f901ed | 89489026c8e247ec7c364e537798e766331fe569 | refs/heads/master | 2021-01-22T03:54:21.557485 | 2019-03-10T01:47:32 | 2019-03-10T01:47:32 | 81,468,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,904 | py | """
Profile ../profile-datasets-py/standard54lev_nogas/006.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/standard54lev_nogas/006.py"
self["Q"] = numpy.array([ 1.42610400e+00, 2.25740200e+00, 3.03034000e+00,
3.69967200e+00, 4.31610400e+00, 4.78132800e+00,
5.07478900e+00, 5.19176800e+00, 5.24924200e+00,
5.19879300e+00, 5.07691300e+00, 4.97253200e+00,
4.91453000e+00, 4.85289800e+00, 4.77784900e+00,
4.68692800e+00, 4.57725700e+00, 4.47492000e+00,
4.32859300e+00, 4.16293300e+00, 4.00311700e+00,
3.89910100e+00, 3.84313200e+00, 3.83660900e+00,
3.90954900e+00, 4.65101500e+00, 5.63378900e+00,
9.11755200e+00, 1.54746500e+01, 2.64265500e+01,
4.48840100e+01, 7.45942600e+01, 1.42818600e+02,
2.77400700e+02, 4.26100500e+02, 5.67428700e+02,
8.00060400e+02, 1.05809300e+03, 1.34364000e+03,
1.75293000e+03, 2.17224400e+03, 2.70441300e+03,
3.21086700e+03, 3.87373300e+03, 4.48421200e+03,
5.05073700e+03, 5.56837500e+03, 6.03632700e+03,
6.53543000e+03, 6.97678600e+03, 7.36122200e+03,
7.68945200e+03, 7.96208200e+03, 8.17961100e+03])
self["P"] = numpy.array([ 5.00000000e-03, 1.31000000e-02, 3.04000000e-02,
6.44000000e-02, 1.26300000e-01, 2.32400000e-01,
4.05200000e-01, 6.74900000e-01, 1.08010000e+00,
1.66910000e+00, 2.50110000e+00, 3.64620000e+00,
5.18640000e+00, 7.21500000e+00, 9.83680000e+00,
1.31672000e+01, 1.73308000e+01, 2.24601000e+01,
2.86937000e+01, 3.61735000e+01, 4.50430000e+01,
5.54433000e+01, 6.75109000e+01, 8.13744000e+01,
9.71505000e+01, 1.14941500e+02, 1.34831800e+02,
1.56884600e+02, 1.81139400e+02, 2.07609200e+02,
2.36278400e+02, 2.67101200e+02, 3.00000000e+02,
3.34864800e+02, 3.71552900e+02, 4.09889300e+02,
4.49667700e+02, 4.90651600e+02, 5.32576900e+02,
5.75153800e+02, 6.18070600e+02, 6.60996500e+02,
7.03586300e+02, 7.45484100e+02, 7.86327800e+02,
8.25754600e+02, 8.63404700e+02, 8.98927500e+02,
9.31985300e+02, 9.62258700e+02, 9.89451000e+02,
1.01329200e+03, 1.03354400e+03, 1.05000000e+03])
self["T"] = numpy.array([ 190.1948, 201.2227, 211.8073, 223.5081, 236.1925, 248.2361,
259.807 , 268.0701, 270.6029, 261.7915, 253.267 , 245.5053,
238.5085, 232.1688, 228.139 , 225.8658, 224.038 , 222.399 ,
220.8289, 219.3256, 217.9126, 216.7 , 216.7 , 216.7 ,
216.7 , 216.7 , 216.7 , 216.7 , 216.7 , 216.7432,
218.4824, 223.6361, 228.5799, 233.4172, 238.0864, 242.5655,
246.9064, 251.0443, 254.9895, 258.7664, 262.3204, 265.715 ,
268.8759, 271.8706, 274.6321, 277.2104, 279.572 , 281.7077,
283.6703, 285.4074, 286.9218, 288.2157, 289.291 , 290.1495])
self["CTP"] = 500.0
self["CFRACTION"] = 1.0
self["IDG"] = 4
self["ISH"] = 4
self["ELEVATION"] = 0.2
self["S2M"]["T"] = 288.2
self["S2M"]["Q"] = 7488.49927299
self["S2M"]["O"] = 0.0164756909121
self["S2M"]["P"] = 1100.0
self["S2M"]["U"] = -6.0
self["S2M"]["V"] = 0.0
self["S2M"]["WFETC"] = 200000.0
self["SKIN"]["SURFTYPE"] = 0
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 286.2
self["SKIN"]["SALINITY"] = 37.0
self["SKIN"]["FOAM_FRACTION"] = 0.9
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 60.0
self["AZANGLE"] = 45.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = -45.0
self["GAS_UNITS"] = 2
self["BE"] = 0.7
self["COSBK"] = 1.0
self["DATE"] = numpy.array([1976, 7, 1])
self["TIME"] = numpy.array([ 9, 45, 0])
| [
"[email protected]"
] | |
14fb6d53004b22663463f0d74039ca56e3a85c52 | 8bd6b0784de9a1e6a39d0f5f23f2d8fb50c73d49 | /MethodRefine-Rand/logistics/MethodRefine/logistics_benchmark-low/testing/testing_39.py | 11af675b7493eb76367a40c8454e27bc462ca562 | [] | no_license | sysulic/MethodRefine | a483d74e65337dff4bc2539ce3caa3bf83748b48 | adbb22d4663041d853d3132f75032b7561bf605c | refs/heads/master | 2020-09-14T10:45:55.948174 | 2020-05-01T09:13:59 | 2020-05-01T09:13:59 | 223,104,986 | 3 | 2 | null | 2020-04-27T11:01:36 | 2019-11-21T06:33:16 | Python | UTF-8 | Python | false | false | 1,654 | py | #!/usr/bin/env python
# coding=utf-8
import sys
sys.path.insert(0, './')
from logistic import *
import new_tihtn_planner
state0 = new_tihtn_planner.State('state0')
allow = False
state0.loc = {'truck1':('city1','loc1'),'truck2':('city2','loc1'),'truck3':('city3','loc1'),'truck4':('city4','loc1'),'truck5':('city5','loc2'),'plane1':('city4','loc1'),'pkg1':('city1','loc1'),'pkg2':('city2','loc2'),'pkg3':('city1','loc1'),'pkg4':('city2','loc2'),}
state0.load = {'truck1':False,'truck2':False,'truck3':False,'truck4':False,'truck5':False,'plane1':False,}
state0.plane_nums = 1
new_tihtn_planner.declare_types({'location':[('city1','loc1'),('city1','loc2'),('city2','loc1'),('city2','loc2'),('city3','loc1'),('city3','loc2'),('city4','loc1'),('city4','loc2'),('city5','loc1'),('city5','loc2'),],'truck':['truck1','truck2','truck3','truck4','truck5',],'plane':['plane1',],'pkg':['pkg1','pkg2','pkg3','pkg4',]})
new_tihtn_planner.declare_funs({load_plane:['pkg', 'location', 'plane'],load_truck:['pkg', 'location', 'truck'],by_plane:['plane', 'location'],drive_truck:['truck', 'location'], unload_truck:['pkg', 'location', 'truck'],unload_plane:['pkg', 'location', 'plane']})
new_tihtn_planner.instance()
def execute(completable):
return new_tihtn_planner.pyhop(completable, allow, state0,[('delievery','pkg1',('city4','loc2')),('delievery','pkg2',('city4','loc2')),('delievery','pkg3',('city4','loc1')),('delievery','pkg4',('city4','loc2')),],[[0, 1],[1, 2],[2, 3],], 9)
def add_methods(fun_obj_list):
for fun in fun_obj_list:
new_tihtn_planner.add_method(fun.func_name.split('__')[0], fun)
def reverse_methods():
new_tihtn_planner.reverse_methods() | [
"[email protected]"
] | |
a196daf132349155bf416fdc4b57e570c83a009f | 7f5ca987c51ffd49b3cba3ffa3e719fc2b8606aa | /classtest1.py | 1160fea16133893b6bfb38a6943c5b03ddd0e5dc | [] | no_license | Yao-Phoenix/TrainCode | 1134b995fddb3d3556fdce05c3219a875b08f6d3 | 2227a70ad5389207af7a84114a5413be3ec2ada2 | refs/heads/master | 2020-09-13T03:56:02.235742 | 2019-12-17T03:48:00 | 2019-12-17T03:48:00 | 222,648,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | class UserData:
def __init__(self,id,name):
self.id = id
self._name = name
class NewUser(UserData):
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if len(value) <= 3:
print("ERROR")
return
self._name = value
if __name__ == '__main__':
user1 = NewUser(101, 'Jack')
user1.name = 'Lou'
user1.name = 'Jackie'
user2 = NewUser(102, 'Louplus')
print(user1.name)
print(user2.name)
| [
"[email protected]"
] | |
e72c98c5a54cc288211b129dfa01220fda615b8c | 2e935ca936976d2d2bd4e785e2f3f29c63771542 | /ExPy11205.py | fba7158c4dd56aa10949bc9293c983dc7ae0a0a6 | [] | no_license | zoro6908/PY_acamedy | 4a370e866fef19f6d2e7697eb809352b6ac703f5 | 460d26639f7bd8cf2486950dc70feae6a2959ca0 | refs/heads/master | 2023-04-26T18:10:44.691326 | 2021-05-25T00:11:02 | 2021-05-25T00:11:02 | 298,425,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | # 파이썬 기초부터 활용까지 (2020.09)
# [12과] 엑셀 작업
# 정렬방식 / 폰트지정 / 선긋기 : styles 객체의 Alignment, Font, Border, Side 클래스 이용
import openpyxl
# from openpyxl.styles import Alignment, Font, Border, Side
from openpyxl.styles import *
wb = openpyxl.Workbook()
ws = wb.active
ws['B3'] = 'Hello'
ws['B3'].font = Font(name = 'HY헤드라인M',
bold = True,
size = 20,
italic=True,
underline='single')
ws['B3'].alignment = Alignment(horizontal='center',
vertical='center')
th = Side(border_style='thin')
db = Side(border_style='double')
ws['B3'].border = Border(top=th, bottom=th, left=db, right=db)
wb.save('ExPy11205.xlsx')
| [
"[email protected]"
] | |
cfe2b636fcb88772b97e8c84b97d910528fa961f | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/keras/engine/training_v1.py | ed875cfba01676d954377d3288f63abbfad0c678 | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | version https://git-lfs.github.com/spec/v1
oid sha256:1eee4b42243c8bc161538de91452850d0486f84aae8e4c4cc9987a3c09eb1557
size 137398
| [
"[email protected]"
] | |
67f3dc9b0610a75280a1d0333c50e0c9d0068017 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_24308.py | 40aee1514e2f6af905f3da41368b0b9dc8eddebd | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | # How to solve permission denied error in cron?
chmod u+x shellScript.sh
| [
"[email protected]"
] | |
16d42264f14e0df27c1fd16e5ca5faa78c9249da | f504fc2714ed09c3bc1e84dfe67c04af585b8700 | /Player.py | 69028cc93fd718f9f797d04ae68fa470de7516b0 | [] | no_license | MasumTech/OOP-Concept-in-Python | 04c88a28c2c8bf7ed0231c0da223f216177a07f9 | 7a0a9e214003c759a84e2e28bf8b4702b2e3ced8 | refs/heads/master | 2020-12-15T16:06:36.909927 | 2020-01-20T19:04:19 | 2020-01-20T19:04:19 | 235,169,330 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | class Player:
minAge = 18
maxAge = 50
def __init__(self,name,age):
self.name = name
self.age = age
| [
"[email protected]"
] | |
faf85093b79775e745c7246975277e8edab9fc7e | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/services/types/income_range_view_service.py | d0425e8f88ca57d8de6d14f5c2900c3cc4c07201 | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.services',
marshal='google.ads.googleads.v4',
manifest={
'GetIncomeRangeViewRequest',
},
)
class GetIncomeRangeViewRequest(proto.Message):
r"""Request message for
[IncomeRangeViewService.GetIncomeRangeView][google.ads.googleads.v4.services.IncomeRangeViewService.GetIncomeRangeView].
Attributes:
resource_name (str):
Required. The resource name of the income
range view to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
70617f3d7d699cb8fa1781509963bc430072ca0c | b3217e2bb6e72fbcb15df99b5c6c10ea4731a5b7 | /anheng/2020NewYear/pwn/unctf_EasyShellcode/pwn1.py | 331c88293dc6c22ceb2553d7c395ca0eef05c56f | [] | no_license | CrackerCat/ctf-6 | 5704de09eda187e111c7719c71e0a81c5d5c39e3 | aa7846548451572fe54a380dc8d367a0132ad2ec | refs/heads/master | 2023-01-28T06:18:01.764650 | 2020-12-07T12:05:20 | 2020-12-07T12:05:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | #!/usr/bin/python
#coding=utf-8
#__author__:TaQini
from pwn import *
local_file = './pwn1'
local_libc = '/lib/x86_64-linux-gnu/libc.so.6'
remote_libc = local_libc # '../libc.so.6'
is_local = False
is_remote = False
if len(sys.argv) == 1:
is_local = True
p = process(local_file)
libc = ELF(local_libc)
elif len(sys.argv) > 1:
is_remote = True
if len(sys.argv) == 3:
host = sys.argv[1]
port = sys.argv[2]
else:
host, port = sys.argv[1].split(':')
p = remote(host, port)
libc = ELF(remote_libc)
elf = ELF(local_file)
context.log_level = 'debug'
context.arch = elf.arch
se = lambda data :p.send(data)
sa = lambda delim,data :p.sendafter(delim, data)
sl = lambda data :p.sendline(data)
sla = lambda delim,data :p.sendlineafter(delim, data)
sea = lambda delim,data :p.sendafter(delim, data)
rc = lambda numb=4096 :p.recv(numb)
ru = lambda delims, drop=True :p.recvuntil(delims, drop)
uu32 = lambda data :u32(data.ljust(4, '\0'))
uu64 = lambda data :u64(data.ljust(8, '\0'))
info_addr = lambda tag, addr :p.info(tag + ': {:#x}'.format(addr))
def debug(cmd=''):
if is_local: gdb.attach(p,cmd)
ru('What do you want to say?\n')
# generated by aplha3
shellcode = 'Ph0666TY1131Xh333311k13XjiV11Hc1ZXYf1TqIHf9kDqW02DqX0D1Hu3M2G0Z2o4H0u0P160Z0g7O0Z0C100y5O3G020B2n060N4q0n2t0B0001010H3S2y0Y0O0n0z01340d2F4y8P115l1n0J0h0a070t'
debug('b *0x400ca3')
sl(shellcode)
# debug()
# info_addr('tag',addr)
# log.warning('--------------')
p.interactive()
| [
"[email protected]"
] | |
48a9047e0d1aabf60095420fd3a14dd3c84a6d55 | efdd433fb2e358bb9ec4cb293db2211f5516a9e4 | /Documents/BACKEND/week6/6.1-django/activities/kittyadopt/manage.py | 0dd361a397935e4b5f3e57c33be49970bee34522 | [] | no_license | errinmarie/FeedBack | 3e5c2e581ee207f486567bd838ac864f2c959f18 | c7cf1aefea226b10b4387ded4a0693d786558d14 | refs/heads/master | 2021-10-01T20:08:53.846515 | 2018-11-28T02:49:41 | 2018-11-28T02:49:41 | 159,429,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kittyadopt.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
2523fb189b949d3b82eaec95c265a2e1f0967c70 | 48faee5b845e43e6c102cb027f43c8b886ecaa5e | /accounts/admin.py | 70ffd71bf72808920c9ff285ffcdb97d3c0f9675 | [] | no_license | hornLK/LonedayAdmin | 66c0a8b978967a0144a216f621c872a6d2197229 | 36ba3fe763788423801ad5ab14462624114da804 | refs/heads/master | 2022-12-26T06:57:47.675915 | 2018-05-15T13:08:34 | 2018-05-15T13:08:34 | 131,375,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,184 | py | from django.contrib import admin
from django import forms
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from accounts.models import UserInfo,Permission,Role
class UserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password',widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation',widget=forms.PasswordInput)
class Meta:
model = UserInfo
fields = ('email','nickname')
def clean_Password2(self):
password1 = self.cleaned_data.get("password1",None)
password2 = self.cleaned_data.get("password2",None)
if password1 and password2 and password1 != password2:
raise forms.ValidationError("密码不匹配")
return password2
def save(self,commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta:
model = UserInfo
fields = ('email','nickname','role','is_active','is_superuser')
def clean_password(self):
return self.initial["password"]
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('email','nickname','role','is_active','is_superuser')
list_filter = ('is_superuser','is_active','role')
fieldsets = (
('基础',{'fields':('email','nickname')}),
('权限',{'fields':('role','is_superuser')}),
('状态',{'fields':('is_active',)})
)
add_fieldsets = (
(None,{
'classes':('wide',),
'fields':('email','nickname','password1','password2','role','is_active','is_superuser')
}
),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
# Register your models here.
admin.site.register(UserInfo,UserAdmin)
admin.site.register(Role)
admin.site.register(Permission)
admin.site.unregister(Group)
| [
"[email protected]"
] | |
dd095298a9afc68335157cb824950644c08ba41d | aee144770c8f4ec5987777aebe5b064e558fc474 | /doc/integrations/pytorch/parlai/tasks/squad2/build.py | 094ee646331483740208e88da52a11b78a65a498 | [
"CC-BY-SA-3.0",
"MIT",
"Apache-2.0",
"AGPL-3.0-only"
] | permissive | adgang/cortx | 1d8e6314643baae0e6ee93d4136013840ead9f3b | a73e1476833fa3b281124d2cb9231ee0ca89278d | refs/heads/main | 2023-04-22T04:54:43.836690 | 2021-05-11T00:39:34 | 2021-05-11T00:39:34 | 361,394,462 | 1 | 0 | Apache-2.0 | 2021-04-25T10:12:59 | 2021-04-25T10:12:59 | null | UTF-8 | Python | false | false | 1,480 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
from parlai.core.build_data import DownloadableFile
RESOURCES = [
DownloadableFile(
'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json',
'train-v2.0.json',
'68dcfbb971bd3e96d5b46c7177b16c1a4e7d4bdef19fb204502738552dede002',
zipped=False,
),
DownloadableFile(
'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json',
'dev-v2.0.json',
'80a5225e94905956a6446d296ca1093975c4d3b3260f1d6c8f68bc2ab77182d8',
zipped=False,
),
]
def build(opt):
dpath = os.path.join(opt['datapath'], 'SQuAD2')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
| [
"[email protected]"
] | |
5c38ea20e9d5d8f3e31e5d4bd17e1440db4f3d73 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/contrib/keras/api/keras/metrics/__init__.py | 6e02c1674ee83334cbe7062ab4c5e87eda1d47c4 | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:0a12f3b7372d5432f0d79f402b4769a59e1d06dc405da5210bbf689cb36fa0e2
size 2230
| [
"github@cuba12345"
] | github@cuba12345 |
20fe5b0263f72b03e4ae1cdf890921f70b6e373a | 50a12a72e68d90fbd358e42b667242ad7c9373b1 | /shenfun/legendre/matrices.py | d2c18434839b61b1d0b412a8c8a9cfe43e93be08 | [
"BSD-2-Clause"
] | permissive | AMSSYu/shenfun | 1a3be63ec9ef8c5c97fff354838c2e88e65f71d3 | c4c657e217ab7bcfb7d6e213569d16e108c7f0b2 | refs/heads/master | 2023-05-19T11:13:36.979962 | 2021-06-08T14:20:46 | 2021-06-08T14:20:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,109 | py | r"""
This module contains specific inner product matrices for the different bases in
the Legendre family.
A naming convention is used for the first three capital letters for all matrices.
The first letter refers to type of matrix.
- Mass matrices start with `B`
- One derivative start with `C`
- Stiffness - One derivative for test and trial - start with `A`
- Biharmonic - Two derivatives for test and trial - start with `S`
The next two letters refer to the test and trialfunctions, respectively
- Dirichlet: `D`
- Neumann: `N`
- Legendre: `L`
- Biharmonic: `B`
As such, there are 4 mass matrices, BSDSDmat, BSNSNmat, BLLmat and BSBSBmat,
corresponding to the four bases above.
A matrix may consist of different types of test and trialfunctions as long as
they are all in the Legendre family. A mass matrix using Dirichlet test and
Neumann trial is named BDNmat.
All matrices in this module may be looked up using the 'mat' dictionary,
which takes test and trialfunctions along with the number of derivatives
to be applied to each. As such the mass matrix BSDSDmat may be looked up
as
>>> import numpy as np
>>> from shenfun.legendre.matrices import mat
>>> from shenfun.legendre.bases import ShenDirichlet as SD
>>> B = mat[((SD, 0), (SD, 0))]
and an instance of the matrix can be created as
>>> B0 = SD(10)
>>> BM = B((B0, 0), (B0, 0))
>>> d = {-2: np.array([-0.4, -0.28571429, -0.22222222, -0.18181818, -0.15384615, -0.13333333]),
... 0: np.array([2.4, 0.95238095, 0.62222222, 0.46753247, 0.37606838, 0.31515152, 0.27149321, 0.23859649]),
... 2: np.array([-0.4, -0.28571429, -0.22222222, -0.18181818, -0.15384615, -0.13333333])}
>>> [np.all(abs(BM[k]-v) < 1e-7) for k, v in d.items()]
[True, True, True]
However, this way of creating matrices is not reccommended use. It is far
more elegant to use the TrialFunction/TestFunction interface, and to
generate the matrix as an inner product:
>>> from shenfun import TrialFunction, TestFunction, inner
>>> u = TrialFunction(B0)
>>> v = TestFunction(B0)
>>> BM = inner(u, v)
>>> [np.all(abs(BM[k]-v) < 1e-7) for k, v in d.items()]
[True, True, True]
To see that this is in fact the BSDSDmat:
>>> print(BM.__class__)
<class 'shenfun.legendre.matrices.BSDSDmat'>
"""
from __future__ import division
#__all__ = ['mat']
import functools
import numpy as np
import sympy as sp
from shenfun.matrixbase import SpectralMatrix
from shenfun.la import TDMA as neumann_TDMA
from shenfun.optimization import cython
from .la import TDMA
from . import bases
# Short names for instances of bases
L = bases.Orthogonal
SD = bases.ShenDirichlet
SB = bases.ShenBiharmonic
SN = bases.ShenNeumann
UD = bases.UpperDirichlet
DN = bases.DirichletNeumann
BF = bases.BeamFixedFree
BCD = bases.BCDirichlet
BCB = bases.BCBiharmonic
x = sp.symbols('x', real=True)
xp = sp.symbols('x', real=True, positive=True)
#pylint: disable=unused-variable, redefined-builtin, bad-continuation
class BLLmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (L_j, L_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N
and :math:`L_k` is the Legendre basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], L)
assert isinstance(trial[0], L)
N = test[0].N
k = np.arange(N, dtype=float)
d = {0: 2./(2.*k+1)}
if test[0].quad == 'GL':
d[0][-1] = 2./(N-1)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
def solve(self, b, u=None, axis=0):
s = self.trialfunction[0].slice()
if u is None:
u = b
else:
assert u.shape == b.shape
sl = [np.newaxis]*u.ndim
sl[axis] = s
sl = tuple(sl)
ss = self.trialfunction[0].sl[s]
d = (1./self.scale)/self[0]
u[ss] = b[ss]*d[sl]
return u
class BSDSDmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
N = test[0].N
k = np.arange(N-2, dtype=float)
d = {-2: -2./(2*k[2:] + 1),
0: 2./(2.*k+1) + 2./(2*k+5)}
if test[0].quad == 'GL':
d[0][-1] = 2./(2*(N-3)+1) + 2./(N-1)
if test[0].is_scaled():
d[0] /= (4*k+6)
d[-2] /= (np.sqrt(4*k[2:]+6)*np.sqrt(4*k[:-2]+6))
d[2] = d[-2]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
self.solve = TDMA(self)
class BSNSNmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Neumann basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SN)
assert isinstance(trial[0], SN)
N = test[0].N
k = np.arange(N-2, dtype=float)
alpha = k*(k+1)/(k+2)/(k+3)
d0 = 2./(2*k+1)
d = {0: d0 + alpha**2*2./(2*(k+2)+1),
2: -d0[2:]*alpha[:-2]}
if test[0].quad == 'GL':
d[0][-1] = d0[-1] + alpha[-1]**2*2./(N-1)
d[-2] = d[2]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
#self.solve = neumann_TDMA(self)
class BSBSBmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-4 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_k` is the Shen Legendre Biharmonic basis function.
"""
def __init__(self, test, trial, measure=1):
from shenfun.la import PDMA
assert isinstance(test[0], SB)
assert isinstance(trial[0], SB)
N = test[0].N
k = np.arange(N, dtype=float)
gk = (2*k+3)/(2*k+7)
hk = -(1+gk)
ek = 2./(2*k+1)
if test[0].quad == 'GL':
ek[-1] = 2./(N-1)
d = {0: ek[:-4] + hk[:-4]**2*ek[2:-2] + gk[:-4]**2*ek[4:],
2: hk[:-6]*ek[2:-4] + gk[:-6]*hk[2:-4]*ek[4:-2],
4: gk[:-8]*ek[4:-4]}
d[-2] = d[2]
d[-4] = d[4]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
self.solve = PDMA(self)
class BBFBFmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-4 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_k` is the BeamFixedFree Biharmonic basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], BF)
assert isinstance(trial[0], BF)
N = test[0].N
k = np.arange(N-4, dtype=float)
f1 = lambda k: 4*(2*k+3)/((k+3)**2)
f2 = lambda k: -(2*(k-1)*(k+1)*(k+6)*(2*k+5)/((k+3)**2*(k+4)*(2*k+7)))
f3 = lambda k: -4*(k+1)**2*(2*k+3)/((k+3)**2*(k+4)**2)
f4 = lambda k: (((k+1)/(k+3))*((k+2)/(k+4)))**2*(2*k+3)/(2*k+7)
d = {0: 2/(2*k+1)+f1(k)**2*2/(2*k+3)+f2(k)**2*2/(2*k+5)+f3(k)**2*2/(2*k+7)+f4(k)**2*2/(2*k+9),
1: (f1(k)*2/(2*k+3)+f1(k+1)*f2(k)*2/(2*k+5)+f2(k+1)*f3(k)*2/(2*k+7)+f3(k+1)*f4(k)*2/(2*k+9))[:-1],
2: (f2(k)*2/(2*k+5)+f1(k+2)*f3(k)*2/(2*k+7)+f2(k+2)*f4(k)*2/(2*k+9))[:-2],
3: (f3(k)*2/(2*k+7)+f1(k+3)*f4(k)*2/(2*k+9))[:-3],
4: (f4(k)*2/(2*k+9))[:-4]
}
d[-1] = d[1].copy()
d[-2] = d[2].copy()
d[-3] = d[3].copy()
d[-4] = d[4].copy()
if test[0].quad == 'GL':
k = N-5
d[0][-1] = 2/(2*k+1)+f1(k)**2*2/(2*k+3)+f2(k)**2*2/(2*k+5)+f3(k)**2*2/(2*k+7)+f4(k)**2*2/(N-1)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class BSDLmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (L_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], L)
N = test[0].N
k = np.arange(N, dtype=float)
sc = np.ones(N)
if test[0].is_scaled():
sc = 1. / np.sqrt(4*k+6)
d = {2: -2./(2*k[2:] + 1)*sc[:-2],
0: 2./(2.*k[:-2]+1)*sc[:-2]}
if test[0].quad == 'GL':
d[2][-1] = -2./(N-1)*sc[N-3]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class BLSDmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, L_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N
and :math:`\psi_j` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], L)
assert isinstance(trial[0], SD)
N = test[0].N
k = np.arange(N, dtype=float)
sc = np.ones(N)
if trial[0].is_scaled():
sc = 1. / np.sqrt(4*k+6)
d = {-2: -2./(2*k[2:] + 1)*sc[:-2],
0: 2./(2.*k[:-2]+1)*sc[:-2]}
if test[0].quad == 'GL':
d[-2][-1] = -2./(N-1)*sc[-3]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class BDNDNmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is a mixed Legendre Dirichlet/Neumann basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], DN)
assert isinstance(trial[0], DN)
N = test[0].N
k = np.arange(N-2, dtype=float)
km = k[:-1]
kp = k[:-2]
d = {0: 2/(2*k+1) + 2*((2*k+3)/(k+2))/(k+2)**3 + 2*((k+1)/(k+2))**4/(2*k+5),
1: (2/(km+2)**2 - 2*((km+1)/(km+2))**2/(km+3)**2),
2: -2*((kp+1)/(kp+2))**2/(2*kp+5)
}
d[-1] = d[1].copy()
d[-2] = d[2].copy()
if test[0].quad == 'GL':
k = N-3
d[0][-1] = 2/(2*k+1) + 2*((2*k+3)/(k+2))/(k+2)**3 + 2*((k+1)/(k+2))**4/(N-1)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class ASDSDmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
A_{kj} = (\psi'_j, \psi'_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
N = test[0].N
k = np.arange(N-2, dtype=float)
if not test[0].is_scaled():
d = {0: 4*k+6}
else:
d = {0: 1}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def solve(self, b, u=None, axis=0):
N = self.shape[0] + 2
assert N == b.shape[axis]
s = self.trialfunction[0].slice()
if u is None:
u = b
else:
assert u.shape == b.shape
if not self.trialfunction[0].is_scaled():
# Move axis to first
if axis > 0:
u = np.moveaxis(u, axis, 0)
if u is not b:
b = np.moveaxis(b, axis, 0)
bs = b[s]
us = u[s]
d = 1./self[0]
sl = [np.newaxis]*bs.ndim
sl[0] = slice(None)
us[:] = bs*d[tuple(sl)]
u /= self.scale
self.testfunction[0].bc.set_boundary_dofs(u, True)
if axis > 0:
u = np.moveaxis(u, 0, axis)
if u is not b:
b = np.moveaxis(b, axis, 0)
else:
ss = [slice(None)]*b.ndim
ss[axis] = s
ss = tuple(ss)
u[ss] = b[ss]
u /= (self.scale*self[0])
self.testfunction[0].bc.set_boundary_dofs(u, True)
return u
class ASNSNmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
A_{kj} = (\psi'_j, \psi'_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Neumann basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SN)
assert isinstance(trial[0], SN)
N = test[0].N
k = np.arange(N-2, dtype=float)
alpha = k*(k+1)/(k+2)/(k+3)
d0 = 2./(2*k+1)
d = {0: d0*alpha*(k+0.5)*((k+2)*(k+3)-k*(k+1))}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def solve(self, b, u=None, axis=0):
N = self.shape[0] + 2
assert N == b.shape[axis]
s = self.trialfunction[0].slice()
if u is None:
u = b
else:
assert u.shape == b.shape
# Move axis to first
if axis > 0:
u = np.moveaxis(u, axis, 0)
if u is not b:
b = np.moveaxis(b, axis, 0)
bs = b[s]
us = u[s]
d = np.ones(self.shape[0])
d[1:] = 1./self[0][1:]
sl = [np.newaxis]*bs.ndim
sl[0] = slice(None)
us[:] = bs*d[tuple(sl)]
u /= self.scale
self.testfunction[0].bc.set_boundary_dofs(u, True)
if self.testfunction[0].use_fixed_gauge:
u[0] = self.testfunction[0].mean/(2/self.testfunction[0].domain_factor())
if axis > 0:
u = np.moveaxis(u, 0, axis)
if u is not b:
b = np.moveaxis(b, axis, 0)
return u
class ASBSBmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
A_{kj} = (\psi'_j, \psi'_k)_w
where
.. math::
j = 0, 1, ..., N-4 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_k` is the Shen Legendre Biharmonic basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SB)
assert isinstance(trial[0], SB)
N = test[0].N
k = np.arange(N-4, dtype=float)
gk = (2*k+3)/(2*k+7)
d = {0: 2*(2*k+3)*(1+gk),
2: -2*(2*k[:-2]+3)}
d[-2] = d[2]
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class ADNDNmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
A_{kj} = (\psi'_j, \psi'_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the mixed Legendre Dirichlet/Neumann basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], DN)
assert isinstance(trial[0], DN)
N = test[0].N
k = np.arange(N-2, dtype=float)
d = {0: ((k+1)/(k+2))**2*((k+2)*(k+3)- k*(k+1))}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def solve(self, b, u=None, axis=0):
N = self.shape[0] + 2
assert N == b.shape[axis]
s = self.trialfunction[0].slice()
if u is None:
u = b
else:
assert u.shape == b.shape
# Move axis to first
if axis > 0:
u = np.moveaxis(u, axis, 0)
if u is not b:
b = np.moveaxis(b, axis, 0)
bs = b[s]
us = u[s]
d = 1./self[0]
sl = [np.newaxis]*bs.ndim
sl[0] = slice(None)
us[:] = bs*d[tuple(sl)]
u /= self.scale
self.testfunction[0].bc.set_boundary_dofs(u, True)
if axis > 0:
u = np.moveaxis(u, 0, axis)
if u is not b:
b = np.moveaxis(b, axis, 0)
return u
class SBFBFmat(SpectralMatrix):
r"""Biharmonic matrix for inner product
.. math::
S_{kj} = (\psi''_j, \psi''_k)_w
where
.. math::
j = 0, 1, ..., N-4 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_k` is the BeamFixedFree basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], BF)
assert isinstance(trial[0], BF)
N = test[0].N
k = np.arange(N-4, dtype=float)
f4 = (((k+1)/(k+3))*((k+2)/(k+4)))**2*(2*k+3)/(2*k+7)
d = {0: f4*(k+2.5)*((k+4)*(k+5)-(k+2)*(k+3))*((k+2)*(k+3)-k*(k+1))}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def solve(self, b, u=None, axis=0):
N = self.shape[0] + 4
assert N == b.shape[axis]
s = self.trialfunction[0].slice()
if u is None:
u = b
else:
assert u.shape == b.shape
# Move axis to first
if axis > 0:
u = np.moveaxis(u, axis, 0)
if u is not b:
b = np.moveaxis(b, axis, 0)
bs = b[s]
us = u[s]
d = 1./self[0]
sl = [np.newaxis]*bs.ndim
sl[0] = slice(None)
us[:] = bs*d[tuple(sl)]
u /= self.scale
self.testfunction[0].bc.set_boundary_dofs(u, True)
if axis > 0:
u = np.moveaxis(u, 0, axis)
if u is not b:
b = np.moveaxis(b, axis, 0)
return u
class GLLmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
B_{kj} = (L_j'', L_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N
and :math:`L_k` is the Legendre basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], L)
assert isinstance(trial[0], L)
N = test[0].N
k = np.arange(N, dtype=float)
self._keyscale = 1
def _getkey(i):
j = abs(i)
return self._keyscale*((k[:-j]+0.5)*(k[j:]*(k[j:]+1) - k[:-j]*(k[:-j]+1))*2./(2*k[:-j]+1))
if trial[1]:
d = dict.fromkeys(np.arange(2, N, 2), _getkey)
else:
d = dict.fromkeys(-np.arange(2, N, 2), _getkey)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
self._matvec_methods += ['cython']
def matvec(self, v, c, format='cython', axis=0):
c.fill(0)
trial = self.trialfunction[1]
if format == 'cython' and v.ndim == 3 and trial:
cython.Matvec.GLL_matvec3D_ptr(v, c, axis)
self.scale_array(c, self.scale*self._keyscale)
elif format == 'cython' and v.ndim == 2 and trial:
cython.Matvec.GLL_matvec2D_ptr(v, c, axis)
self.scale_array(c, self.scale*self._keyscale)
elif format == 'cython' and v.ndim == 1 and trial:
cython.Matvec.GLL_matvec(v, c)
self.scale_array(c, self.scale*self._keyscale)
else:
c = super(GLLmat, self).matvec(v, c, format=format, axis=axis)
return c
class SSBSBmat(SpectralMatrix):
r"""Stiffness matrix for inner product
.. math::
A_{kj} = (\psi''_j, \psi''_k)_w
where
.. math::
j = 0, 1, ..., N-4 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_k` is the Shen Legendre Biharmonic basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SB)
assert isinstance(trial[0], SB)
N = test[0].N
k = np.arange(N-4, dtype=float)
d = {0: 2*(2*k+3)**2*(2*k+5)}
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class CLLmat(SpectralMatrix):
r"""Matrix for inner product
.. math::
C_{kj} = (\psi'_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N
and :math:`\psi_k` is the orthogonal Legendre basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], L)
assert isinstance(trial[0], L)
N = test[0].N
self._keyscale = 1
def _getkey(i):
return 2*self._keyscale
d = dict.fromkeys(np.arange(1, N, 2), _getkey)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
self._matvec_methods += ['cython', 'self']
def matvec(self, v, c, format='self', axis=0):
c.fill(0)
if format == 'self':
if axis > 0:
c = np.moveaxis(c, axis, 0)
v = np.moveaxis(v, axis, 0)
ve = v[-2:0:-2].cumsum(axis=0)
vo = v[-1:0:-2].cumsum(axis=0)
c[-3::-2] = ve*2
c[-2::-2] = vo*2
if axis > 0:
c = np.moveaxis(c, 0, axis)
v = np.moveaxis(v, 0, axis)
self.scale_array(c, self.scale*self._keyscale)
elif format == 'cython' and v.ndim == 3:
cython.Matvec.CLL_matvec3D_ptr(v, c, axis)
self.scale_array(c, self.scale*self._keyscale)
elif format == 'cython' and v.ndim == 2:
cython.Matvec.CLL_matvec2D_ptr(v, c, axis)
self.scale_array(c, self.scale*self._keyscale)
elif format == 'cython' and v.ndim == 1:
cython.Matvec.CLL_matvec(v, c)
self.scale_array(c, self.scale*self._keyscale)
else:
c = super(CLLmat, self).matvec(v, c, format=format, axis=axis)
return c
class CLLmatT(SpectralMatrix):
r"""Matrix for inner product
.. math::
C_{kj} = (\psi'_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N
and :math:`\psi_k` is the orthogonal Legendre basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], L)
assert isinstance(trial[0], L)
N = test[0].N
self._keyscale = 1
def _getkey(i):
return 2*self._keyscale
d = dict.fromkeys(-np.arange(1, N, 2), _getkey)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class CLSDmat(SpectralMatrix):
r"""Matrix for inner product
.. math::
C_{kj} = (\psi'_j, L_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], L)
assert isinstance(trial[0], SD)
N = test[0].N
d = {-1: -2}
if trial[0].is_scaled():
k = np.arange(N-2, dtype=float)
d[-1] = -2. / np.sqrt(4*k+6)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class CSDLmat(SpectralMatrix):
r"""Matrix for inner product
.. math::
C_{kj} = (L_j, \psi'_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], L)
N = test[0].N
d = {1: -2}
if test[0].is_scaled():
k = np.arange(N-2, dtype=float)
d[1] = -2. / np.sqrt(4*k+6)
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class CSDSDmat(SpectralMatrix):
r"""Matrix for inner product
.. math::
C_{kj} = (\psi'_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
N = test[0].N
d = {-1: -2, 1: 2}
if trial[0].is_scaled():
k = np.arange(N-2, dtype=float)
d[-1] = -2/np.sqrt(4*k[:-1]+6)
d[1] = 2/np.sqrt(4*k[:-1]+6)
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class CSDSDTmat(SpectralMatrix):
r"""Matrix for inner product
.. math::
C_{kj} = (\psi_j, \psi'_k)_w
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
N = test[0].N
d = {-1: 2, 1: -2}
if trial[0].is_scaled():
k = np.arange(N-2, dtype=float)
d[-1] = 2/np.sqrt(4*k[:-1]+6)
d[1] = -2/np.sqrt(4*k[:-1]+6)
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class ASDSDrp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j'(x) \psi_k'(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-2)
d = {0: 4*k+6, 1: 2*k[:-1]+4, -1: 2*k[:-1]+4}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class ASDSD2rp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k''(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-2)
d = {0: -(4*k+6), 1: -(2*k[:-1]+6), -1: -(2*k[:-1]+2)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class ASDSD2Trp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j''(x) \psi_k(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-2)
d = {0: -(4*k+6), -1: -(2*k[:-1]+6), 1: -(2*k[:-1]+2)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class AUDUDrp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j'(x) \psi_k'(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], UD)
assert isinstance(trial[0], UD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
d = {0: 2*k+2}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class AUDUDrp1smat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j'(x) \psi_k'(x) (1+x)**2 dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], UD)
assert isinstance(trial[0], UD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
#d = {0: 4*k**2*(k+1)/(2*k+1)+4*(k+1)**2*(k+2)/(2*k+3)-4*k*(k+1),
# 1: 2*(k[:-1]+1)*(k[:-1]+2)-4*(k[:-1]+1)**2*(k[:-1]+2)/(2*k[:-1]+3)}
d = {0: 2*(k+1)**2*(1/(2*k+1)+1/(2*k+3)),
1: 2*k[1:]*(k[1:]+1)/(2*k[1:]+1)}
d[-1] = d[1].copy()
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class GUDUDrp1smat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k''(x) (1+x)**2 dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], UD)
assert isinstance(trial[0], UD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
d = {0: -2*(k+1)*((k-1)/(2*k+1) + (k+3)/(2*k+3)),
1: -2*(k[1:]+1)*(k[1:]+2)/(2*k[1:]+1),
-1: -2*k[:-1]*(k[:-1]+1)/(2*k[:-1]+3)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class BUDUDrp1smat(SpectralMatrix):
r"""Matrix for inner product
.. math::
B_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k(x) (1+x)**2 dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], UD)
assert isinstance(trial[0], UD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
#a00 = 2/(2*k+1)
#a11 = 2/(2*k+3)
#a22 = 2/(2*k+5)
#c00 = ((k+1)**2/(2*k+1)/(2*k+3) + k**2/(2*k+1)/(2*k-1))*a00
#c11 = ((k+2)**2/(2*k+3)/(2*k+5) + (k+1)**2/(2*k+3)/(2*k+1))*a11
#c02 = (k+2)*(k+1)/(2*k+5)/(2*k+3)*a00
#c13 = ((k+3)*(k+2)/(2*k+7)/(2*k+5))*a11
#b01 = (k+1)/(2*k+3)*a00
#b12 = (k+2)/(2*k+5)*a11
#d = {0: a00+c00-4*b01+a11+c11,
# 1: (2*b01-c02-a11-c11+2*b12)[:-1],
# -1: (2*b01-c02-a11-c11+2*b12)[:-1],
# 2: (c02-2*b12+c13)[:-2],
# -2: (c02-2*b12+c13)[:-2],
# 3: -c13[:-3].copy(),
# -3: -c13[:-3].copy()}
d = {0: (k/(2*k+1))**2*(2/(2*k-1) + 2/(2*k+3)) + ((k+2)/(2*k+3))**2 * (2/(2*k+1)+2/(2*k+5)),
1: 2*k[1:]*(k[1:]+1)/(2*k[1:]+1)**2*(1/(2*k[1:]-1)+1/(2*k[1:]+3)) - 2*(k[1:]+2)*(k[1:]-1)/(2*k[1:]+3)/(2*k[1:]+1)/(2*k[1:]-1),
2: -2*k[2:]*(k[2:]-2)/(2*k[2:]+1)/(2*k[2:]-1)/(2*k[2:]-3)-2*k[2:]*(k[2:]+2)/(2*k[2:]+3)/(2*k[2:]+1)/(2*k[2:]-1),
3: -2*k[3:]*(k[3:]-1)/(2*k[3:]+1)/(2*k[3:]-1)/(2*k[3:]-3)}
d[-1] = d[1].copy()
d[-2] = d[2].copy()
d[-3] = d[3].copy()
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class CUDUDrp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k'(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], UD)
assert isinstance(trial[0], UD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
d = {0: -2*(k+1)/(2*k+1)+2*(k+1)/(2*k+3),
1: 2*(k[1:]+1)/(2*k[1:]+1),
-1: -2*(k[:-1]+1)/(2*k[:-1]+3)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class BUDUDmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \psi_k)_w
where
.. math::
j = 0, 1, ..., N-1 \text{ and } k = 0, 1, ..., N-1
and :math:`\psi_k` is the Legendre UpperDirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], UD)
assert isinstance(trial[0], UD)
N = test[0].N
k = np.arange(N-1, dtype=float)
d = {-1: -2./(2*k[1:] + 1),
0: 2./(2.*k+1) + 2./(2*k+3)}
if test[0].quad == 'GL':
d[0][-1] = 2./(2*(N-2)+1) + 2./(N-1)
d[1] = d[-1]
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class BUDUDrp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
B_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Upper Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], UD)
assert isinstance(trial[0], UD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-1)
d = {0: 2*k+2}
d = {0: 4*(k+1)/(2*k+1)/(2*k+3),
1: 4/(2*k[:-1]+1)/(2*k[:-1]+3)/(2*k[:-1]+5),
2: -2*(k[:-2]+2)/(2*k[:-2]+3)/(2*k[:-2]+5)}
d[-1] = d[1]
d[-2] = d[2]
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class BSDSD1orp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k(x) 1/(1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-2)
d = {0: 2*(2*k+3)/(k+1)/(k+2), 1: -2/(k[:-1]+2), -1: -2/(k[:-1]+2)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class BSDSDrp1mat(SpectralMatrix):
r"""Matrix for inner product
.. math::
A_{kj} = \int_{-1}^{1} \psi_j(x) \psi_k(x) (1+x) dx
where
.. math::
j = 0, 1, ..., N-2 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_k` is the Shen Legendre Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
assert test[0].quad == 'LG'
k = np.arange(test[0].N-2)
d = {0: 2/(2*k+1)+2/(2*k+5),
1: 2/(2*k[:-1]+1)/(2*k[:-1]+5) + 2*(k[:-1]+3)/(2*k[:-1]+5)/(2*k[:-1]+7),
2: -2/(2*k[:-2]+5),
3: -2*(k[:-3]+3)/(2*k[:-3]+5)/(2*k[:-3]+7)}
d[-1] = d[1]
d[-2] = d[2]
d[-3] = d[3]
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class BSDBCDmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \phi_k)_w
where
.. math::
j = 0, 1 \text{ and } k = 0, 1, ..., N-2
and :math:`\psi_j` is the Dirichlet boundary basis and
:math:`\phi_k` is the Shen Dirichlet basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], BCD)
N = test[0].N
k = np.arange(N-2, dtype=float)
if not test[0].is_scaled():
d = {0: np.array([1, 1./3.]),
1: np.array([1.0]),
-1: np.array([-1./3., 0])}
else:
d = {0: np.array([1./np.sqrt(6.), 1./3./np.sqrt(10.)]),
1: np.array([1./np.sqrt(6.)]),
-1: np.array([-1./3./np.sqrt(10.), 0])}
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class BSBBCBmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\psi_j, \phi_k)_w
where
.. math::
j = 0, 1, 2, 3 \text{ and } k = 0, 1, ..., N-4
and :math:`\psi_j` is the Biharmonic boundary basis and
:math:`\phi_k` is the Shen Biharmonic basis function.
"""
def __init__(self, test, trial, measure=1):
assert isinstance(test[0], SB)
assert isinstance(trial[0], BCB)
N = test[0].N
k = np.arange(N-4, dtype=float)
d = {0: np.array([1, 4/9, -1/15, 1/35]),
1: np.array([1, -1/9, 1/15]),
2: np.array([3/7, -1/9]),
3: np.array([-3/7]),
-1: np.array([-4/9, 0, 1/35, 0]),
-2: np.array([0, -1/35, 0, 0]),
-3: np.array([1/35, 0, 0, 0])}
SpectralMatrix.__init__(self, d, test, trial, measure=measure)
class _Legmatrix(SpectralMatrix):
def __init__(self, test, trial, measure=1):
SpectralMatrix.__init__(self, {}, test, trial, measure=measure)
class _LegMatDict(dict):
"""Dictionary of inner product matrices
Matrices that are missing keys are generated from Vandermonde type
computations.
"""
def __missing__(self, key):
measure = 1 if len(key) == 2 else key[3]
c = functools.partial(_Legmatrix, measure=measure)
self[key] = c
return c
def __getitem__(self, key):
matrix = dict.__getitem__(self, key)
return matrix
mat = _LegMatDict({
((L, 0), (L, 0)): BLLmat,
((L, 0), (L, 1)): CLLmat,
((L, 1), (L, 0)): CLLmatT,
((L, 0), (SD, 1)): CLSDmat,
((SD, 1), (L, 0)): CSDLmat,
((SD, 0), (SD, 1)): CSDSDmat,
((SD, 1), (SD, 0)): functools.partial(CSDSDmat, scale=-1.),
((SD, 0), (SD, 0)): BSDSDmat,
((SB, 0), (SB, 0)): BSBSBmat,
((SN, 0), (SN, 0)): BSNSNmat,
((SD, 0), (L, 0)): BSDLmat,
((L, 0), (SD, 0)): BLSDmat,
((SD, 1), (SD, 1)): ASDSDmat,
((SD, 2), (SD, 0)): functools.partial(ASDSDmat, scale=-1.),
((SD, 0), (SD, 2)): functools.partial(ASDSDmat, scale=-1.),
((SN, 1), (SN, 1)): ASNSNmat,
((SN, 2), (SN, 0)): functools.partial(ASNSNmat, scale=-1.),
((SN, 0), (SN, 2)): functools.partial(ASNSNmat, scale=-1.),
((L, 2), (L, 0)): GLLmat,
((L, 0), (L, 2)): GLLmat,
((SB, 2), (SB, 2)): SSBSBmat,
((SB, 1), (SB, 1)): ASBSBmat,
((SB, 0), (SB, 2)): functools.partial(ASBSBmat, scale=-1.),
((SB, 2), (SB, 0)): functools.partial(ASBSBmat, scale=-1.),
((SB, 0), (SB, 4)): SSBSBmat,
((SB, 4), (SB, 0)): SSBSBmat,
((SD, 1), (SD, 1), (-1, 1), 1+x): functools.partial(ASDSDrp1mat, measure=1+x),
((SD, 0), (SD, 2), (-1, 1), 1+x): functools.partial(ASDSD2rp1mat, measure=1+x),
((SD, 2), (SD, 0), (-1, 1), 1+x): functools.partial(ASDSD2Trp1mat, measure=1+x),
((SD, 0), (SD, 2), (0, 1), xp): functools.partial(ASDSD2rp1mat, scale=0.5, measure=xp),
((SD, 2), (SD, 0), (0, 1), xp): functools.partial(ASDSD2Trp1mat, scale=0.5, measure=xp),
((SD, 1), (SD, 1), (0, 1), xp): functools.partial(ASDSDrp1mat, scale=0.5, measure=xp),
((SD, 0), (SD, 0), (-1, 1), 1+x): functools.partial(BSDSDrp1mat, measure=1+x),
((SD, 0), (SD, 0), (0, 1), xp): functools.partial(BSDSDrp1mat, scale=0.5, measure=xp),
((SD, 0), (SD, 0), (-1, 1), 1/(1+x)): functools.partial(BSDSD1orp1mat, measure=1/(1+x)),
((SD, 0), (SD, 0), (0, 1), 1/xp): functools.partial(BSDSD1orp1mat, scale=2, measure=1/xp),
((UD, 1), (UD, 1), (-1, 1), 1+x): functools.partial(AUDUDrp1mat, measure=(1+x)),
((UD, 1), (UD, 1), (0, 1), xp): functools.partial(AUDUDrp1mat, scale=0.5, measure=xp),
((UD, 0), (UD, 0), (-1, 1), 1+x): functools.partial(BUDUDrp1mat, measure=(1+x)),
((UD, 0), (UD, 0), (0, 1), xp): functools.partial(BUDUDrp1mat, scale=0.5, measure=xp),
((UD, 1), (UD, 1), (-1, 1), (1+x)**2): functools.partial(AUDUDrp1smat, measure=(1+x)**2),
((UD, 1), (UD, 1), (0, 1), xp**2): functools.partial(AUDUDrp1smat, scale=0.25, measure=xp**2),
((UD, 0), (UD, 2), (-1, 1), (1+x)**2): functools.partial(GUDUDrp1smat, measure=(1+x)**2),
((UD, 0), (UD, 2), (0, 1), xp**2): functools.partial(GUDUDrp1smat, scale=0.25, measure=xp**2),
((UD, 0), (UD, 1), (-1, 1), (1+x)): functools.partial(CUDUDrp1mat, measure=(1+x)),
((UD, 0), (UD, 1), (0, 1), xp): functools.partial(CUDUDrp1mat, scale=0.5, measure=xp),
((UD, 0), (UD, 0), (-1, 1), (1+x)**2): functools.partial(BUDUDrp1smat, measure=(1+x)**2),
((UD, 0), (UD, 0), (0, 1), xp**2): functools.partial(BUDUDrp1smat, scale=0.25, measure=xp**2),
((UD, 0), (UD, 0)): BUDUDmat,
((SD, 0), (BCD, 0)): BSDBCDmat,
((SB, 0), (BCB, 0)): BSBBCBmat,
((DN, 0), (DN, 0)): BDNDNmat,
((DN, 1), (DN, 1)): ADNDNmat,
((DN, 2), (DN, 0)): functools.partial(ADNDNmat, scale=-1.),
((DN, 0), (DN, 2)): functools.partial(ADNDNmat, scale=-1.),
((BF, 4), (BF, 0)): SBFBFmat,
((BF, 0), (BF, 4)): SBFBFmat,
((BF, 2), (BF, 2)): SBFBFmat,
((BF, 0), (BF, 0)): BBFBFmat
})
#mat = _LegMatDict({})
| [
"[email protected]"
] | |
67ccb1475ec05c04027a13cfadd67b4b28e9004e | cb8d1db4af4401b019775132fe92f3eae4cb92df | /_unittests/ut_cli/test_cli_file_helper.py | b24c031f8bd1ec2a412e0fabd6edd470918e6747 | [
"MIT"
] | permissive | Pandinosaurus/pyquickhelper | 041d6c6d2832845bd89027ffaab333239efc3959 | 860ec5b9a53bae4fc616076c0b52dbe2a1153d30 | refs/heads/master | 2023-07-24T12:27:25.700277 | 2023-07-17T07:02:34 | 2023-07-17T07:02:34 | 169,426,142 | 0 | 0 | MIT | 2023-07-17T07:53:04 | 2019-02-06T15:17:53 | Jupyter Notebook | UTF-8 | Python | false | false | 1,549 | py | """
@brief test tree node (time=7s)
"""
import sys
import os
import unittest
from io import StringIO
from pyquickhelper.loghelper import fLOG, BufferedPrint
from pyquickhelper.pycode import get_temp_folder
from pyquickhelper.__main__ import main
class TestCliFileHelper(unittest.TestCase):
def test_cli_file_helper(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
this = os.path.abspath(os.path.dirname(__file__))
st = BufferedPrint()
main(args=["ls", "-f", this, "-p",
".*[.]py", "-r", "f"], fLOG=st.fprint)
res = str(st)
self.assertIn(".py", res)
this = os.path.abspath(os.path.dirname(__file__))
st = BufferedPrint()
main(args=["ls", "-f", this, "-p", ".*[.]py", "-r",
"f", '-n', 'pycache', '-fu', '1'],
fLOG=st.fprint)
res = str(st)
self.assertIn(".py", res)
self.assertNotIn("pycache", res)
this = os.path.abspath(os.path.dirname(__file__))
st = BufferedPrint()
main(args=["ls", "-f", this, "-p", ".*[.]py", "-r",
"f", '-n', 'pycache', '-fu', '1', '-s', "test_(.*)",
'-su', 'unit_\\1'],
fLOG=st.fprint)
res = str(st)
self.assertIn(".py", res)
self.assertNotIn("pycache", res)
self.assertNotIn("test_parser", res)
self.assertIn("unit_parser", res)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
51142e7b8d3e124bfee304c104a86f3a4b396682 | f8ffa8ff257266df3de9d20d95b291e393f88434 | /Python from scratch/Warsztaty/Warsztaty01/zad01.py | 33e1c3878f88c9c49cec628e9e7d3339b1b0c28c | [] | no_license | janiszewskibartlomiej/Python_Code_Me_Gda | c0583c068ef08b6130398ddf93c3a3d1a843b487 | 7568de2a9acf80bab1429bb55bafd89daad9b729 | refs/heads/master | 2020-03-30T05:06:26.757033 | 2020-03-02T08:53:28 | 2020-03-02T08:53:28 | 150,781,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | ekwipunek = {'pieniądze':158.40,
'sprzęt':['kompas', 'latarka', 'śpiwór'],
'prowiant':['jabłko', 'woda', 'batonik', 'batonik']}
print('Lista ekwipunku to: ', ekwipunek)
print('Harcesz kupił karimatę za 29.99 zł')
ekwipunek['pieniądze'] = ekwipunek['pieniądze'] - 29.99
ekwipunek['sprzęt'].append('karimata')
print(ekwipunek)
print('Harcerz zjadł batonik')
ekwipunek['prowiant'].remove('batonik')
print(ekwipunek['prowiant'])
print('Harcerz ma 7 przedmiotów w plecaku: ', ekwipunek['sprzęt'] + ekwipunek['prowiant']) | [
"[email protected]"
] | |
1c6026e1dbb38669cf355d1ec7f80ae280b72c1f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_wagering.py | 3ad88745da9973ac13e723d2d10defa24d8be66f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py |
from xai.brain.wordbase.nouns._wager import _WAGER
#calss header
class _WAGERING(_WAGER, ):
def __init__(self,):
_WAGER.__init__(self)
self.name = "WAGERING"
self.specie = 'nouns'
self.basic = "wager"
self.jsondata = {}
| [
"[email protected]"
] | |
db604c59ed67ae9680c1fe9108853daaf57af74b | 3d5958a79c02fe885324956bfead037999c73c7a | /trimesh/io/stl.py | 32912d8f687bcee9809cc8e06c6380fe138bfba6 | [
"MIT"
] | permissive | Mambix/trimesh | def56d14994076cfcc24e3c3d67d5e27bea5bb49 | 99d01909a1f4cf56d777a8339a6c2443cf37d6b8 | refs/heads/master | 2020-05-27T21:20:11.800221 | 2017-03-02T00:57:53 | 2017-03-02T00:57:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,643 | py | import numpy as np
# the exception raised if an STL file object doesn't match its header
class HeaderError(Exception):
pass
# define a numpy datatype for the data section of a binary STL file
_stl_dtype = np.dtype([('normals', np.float32, (3)),
('vertices', np.float32, (3, 3)),
('attributes', np.uint16)])
# define a numpy datatype for the header of a binary STL file
_stl_dtype_header = np.dtype([('header', np.void, 80),
('face_count', np.int32)])
def load_stl(file_obj, file_type=None):
'''
Load an STL file from a file object.
Arguments
----------
file_obj: open file- like object
file_type: not used
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
'''
# save start of file obj
file_pos = file_obj.tell()
try:
# check the file for a header which matches the file length
# if that is true, it is almost certainly a binary STL file
# if the header doesn't match the file length a HeaderError will be
# raised
return load_stl_binary(file_obj)
except HeaderError:
# move the file back to where it was initially
file_obj.seek(file_pos)
# try to load the file as an ASCII STL
# if the header doesn't match the file length a HeaderError will be
# raised
return load_stl_ascii(file_obj)
def load_stl_binary(file_obj):
'''
Load a binary STL file from a file object.
Arguments
----------
file_obj: open file- like object
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
'''
header = np.fromstring(file_obj.read(84), dtype=_stl_dtype_header)
# now we check the length from the header versus the length of the file
# data_start should always be position 84, but hard coding that felt ugly
data_start = file_obj.tell()
# this seeks to the end of the file
# position 0, relative to the end of the file 'whence=2'
file_obj.seek(0, 2)
# we save the location of the end of the file and seek back to where we
# started from
data_end = file_obj.tell()
file_obj.seek(data_start)
# the binary format has a rigidly defined structure, and if the length
# of the file doesn't match the header, the loaded version is almost
# certainly going to be garbage.
data_ok = (
data_end - data_start) == (header['face_count'] * _stl_dtype.itemsize)
# this check is to see if this really is a binary STL file.
# if we don't do this and try to load a file that isn't structured properly
# we will be producing garbage or crashing hard
# so it's much better to raise an exception here.
if not data_ok:
raise HeaderError('Binary STL has incorrect length in header!')
# all of our vertices will be loaded in order due to the STL format,
# so faces are just sequential indices reshaped.
faces = np.arange(header['face_count'] * 3).reshape((-1, 3))
blob = np.fromstring(file_obj.read(), dtype=_stl_dtype)
result = {'vertices': blob['vertices'].reshape((-1, 3)),
'face_normals': blob['normals'].reshape((-1, 3)),
'faces': faces}
return result
def load_stl_ascii(file_obj):
'''
Load an ASCII STL file from a file object.
Arguments
----------
file_obj: open file- like object
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
'''
header = file_obj.readline()
text = file_obj.read()
if hasattr(text, 'decode'):
text = text.decode('utf-8')
text = text.lower().split('endsolid')[0]
blob = np.array(text.split())
# there are 21 'words' in each face
face_len = 21
face_count = len(blob) / face_len
if (len(blob) % face_len) != 0:
raise HeaderError('Incorrect number of values in STL file!')
face_count = int(face_count)
# this offset is to be added to a fixed set of indices that is tiled
offset = face_len * np.arange(face_count).reshape((-1, 1))
normal_index = np.tile([2, 3, 4], (face_count, 1)) + offset
vertex_index = np.tile(
[8, 9, 10, 12, 13, 14, 16, 17, 18], (face_count, 1)) + offset
# faces are groups of three sequential vertices, as vertices are not
# references
faces = np.arange(face_count * 3).reshape((-1, 3))
face_normals = blob[normal_index].astype(np.float64)
vertices = blob[vertex_index.reshape((-1, 3))].astype(np.float64)
return {'vertices': vertices,
'faces': faces,
'face_normals': face_normals}
def export_stl(mesh):
'''
Convert a Trimesh object into a binary STL file.
Arguments
---------
mesh: Trimesh object
Returns
---------
export: bytes, representing mesh in binary STL form
'''
header = np.zeros(1, dtype=_stl_dtype_header)
header['face_count'] = len(mesh.faces)
packed = np.zeros(len(mesh.faces), dtype=_stl_dtype)
packed['normals'] = mesh.face_normals
packed['vertices'] = mesh.triangles
export = header.tostring()
export += packed.tostring()
return export
def export_stl_ascii(mesh):
'''
Convert a Trimesh object into an ASCII STL file.
Arguments
---------
mesh: Trimesh object
Returns
---------
export: str, mesh represented as an ASCII STL file
'''
# move all the data thats going into the STL file into one array
blob = np.zeros((len(mesh.faces), 4, 3))
blob[:, 0, :] = mesh.face_normals
blob[:, 1:, :] = mesh.triangles
# create a lengthy format string for the data section of the file
format_string = 'facet normal {} {} {}\nouter loop\n'
format_string += 'vertex {} {} {}\n' * 3
format_string += 'endloop\nendfacet\n'
format_string *= len(mesh.faces)
# concatenate the header, data, and footer
export = 'solid \n'
export += format_string.format(*blob.reshape(-1))
export += 'endsolid'
return export
_stl_loaders = {'stl': load_stl,
'stl_ascii': load_stl}
| [
"[email protected]"
] | |
480d9a66e2a6c4e56d92084f3b27f5626c89d12a | f9b5c37a098ed940d943415aadda130c13271754 | /dailyfresh/f_goods/migrations/0003_auto_20180619_2120.py | 47da203ab64800d2950a865ce0879526e9bf7ef2 | [] | no_license | duanHongPy/django | cd00fe819576741a7c1860ea420275129d759d67 | 3fee019b3d8d50218410366d62eac29700f9e69a | refs/heads/master | 2020-03-19T23:28:01.420061 | 2018-06-21T02:53:19 | 2018-06-21T02:53:19 | 137,007,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-19 13:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('f_goods', '0002_auto_20180619_1910'),
]
operations = [
migrations.RemoveField(
model_name='goodinfo',
name='gsales',
),
migrations.AlterField(
model_name='goodinfo',
name='gclick',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
96e8c5c0de2d305c63d6d23a0b1aaeb507e67ff8 | ddd7d37f3a98b508e16f27978bf788fc61358225 | /wsgi.py | 06b4bc7a115d792c6e0690fac47c8d93e9c07d3e | [] | no_license | nbh847/bbs_practice | b124e59b86b8fb65b2e5ee34b8e7065a05bcee41 | 21946cbf27a34028a53144a2c202d763fda6ee21 | refs/heads/master | 2020-03-28T19:20:31.590484 | 2018-10-24T14:15:40 | 2018-10-24T14:15:40 | 148,966,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | #!/usr/bin/env python3
import sys
from os.path import abspath
from os.path import dirname
# 设置当前目录为工作目录
sys.path.insert(0, abspath(dirname(__file__)))
# 引入 app.py
import app
# 必须有一个叫做 application 的变量
# gunicorn 就要这个变量
# 这个变量的值必须是 Flask 实例
# 这是规定的套路(协议)
application = app.app
# 这是把代码部署到 apache gunicorn nginx 后面的套路
"""
➜ ~ cat /etc/supervisor/conf.d/xx.conf
[program:todo]
command=/usr/local/bin/gunicorn wsgi --bind 0.0.0.0:2000 --pid /tmp/todo.pid
directory=/root/web13
autostart=true
"""
| [
"[email protected]"
] | |
1fe4732f5442e87e5d6cb0918e6cdd85870b6616 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/lifesciences/v2beta/lifesciences-v2beta-py/google/cloud/lifesciences_v2beta/services/workflows_service_v2_beta/transports/grpc.py | 442db128588b94be3735bf27ac5f0af52c460e0e | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,300 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.lifesciences_v2beta.types import workflows
from google.longrunning import operations_pb2 # type: ignore
from .base import WorkflowsServiceV2BetaTransport, DEFAULT_CLIENT_INFO
class WorkflowsServiceV2BetaGrpcTransport(WorkflowsServiceV2BetaTransport):
"""gRPC backend transport for WorkflowsServiceV2Beta.
A service for running workflows, such as pipelines consisting
of Docker containers.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'lifesciences.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'lifesciences.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def run_pipeline(self) -> Callable[
[workflows.RunPipelineRequest],
operations_pb2.Operation]:
r"""Return a callable for the run pipeline method over gRPC.
Runs a pipeline. The returned Operation's [metadata]
[google.longrunning.Operation.metadata] field will contain a
[google.cloud.lifesciences.v2beta.Metadata][google.cloud.lifesciences.v2beta.Metadata]
object describing the status of the pipeline execution. The
[response][google.longrunning.Operation.response] field will
contain a
[google.cloud.lifesciences.v2beta.RunPipelineResponse][google.cloud.lifesciences.v2beta.RunPipelineResponse]
object if the pipeline completes successfully.
**Note:** Before you can use this method, the *Life Sciences
Service Agent* must have access to your project. This is done
automatically when the Cloud Life Sciences API is first enabled,
but if you delete this permission you must disable and re-enable
the API to grant the Life Sciences Service Agent the required
permissions. Authorization requires the following `Google
IAM <https://cloud.google.com/iam/>`__ permission:
- ``lifesciences.workflows.run``
Returns:
Callable[[~.RunPipelineRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'run_pipeline' not in self._stubs:
self._stubs['run_pipeline'] = self.grpc_channel.unary_unary(
'/google.cloud.lifesciences.v2beta.WorkflowsServiceV2Beta/RunPipeline',
request_serializer=workflows.RunPipelineRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['run_pipeline']
__all__ = (
'WorkflowsServiceV2BetaGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
7c2da6da0584f39818fbf738cbb2c1a7d78c13aa | 8535bbc7781c4691880c935bd7025646f0dbb7c3 | /check mirror images of two arrays.py | daa0276c484e75516f27c7d2d1b8b51dc8139981 | [] | no_license | Mahadev0317/Codekata | 3b2149f3116ebe4b48b2059b873544c27b23ff39 | c35fa0ed0c4870faea69152638f461e743a9ff69 | refs/heads/master | 2020-04-15T04:59:17.062947 | 2019-05-29T04:46:35 | 2019-05-29T04:46:35 | 164,404,727 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | n=int(input())
li=list(map(int,input().split()))
lis=list(map(int,input().split()))
k=n-1
for i in range(n):
if li[i]!=lis[k]:
print("no")
break
k-=1
else:
print("yes")
| [
"[email protected]"
] | |
9404fc7b69c9461b11360f92805c0269adeb68e9 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/9e606bfdbe039112b609c263cb4dfc23e31ffe93-<check_regressors_train>-bug.py | 46b71af4c66c69b7a702b9f117d56ad37df9194f | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | @ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_regressors_train(name, regressor_orig):
(X, y) = _boston_subset()
y = StandardScaler().fit_transform(y.reshape((- 1), 1))
y = y.ravel()
regressor = clone(regressor_orig)
y = multioutput_estimator_convert_y_2d(regressor, y)
rnd = np.random.RandomState(0)
if ((not hasattr(regressor, 'alphas')) and hasattr(regressor, 'alpha')):
regressor.alpha = 0.01
if (name == 'PassiveAggressiveRegressor'):
regressor.C = 0.01
assert_raises(ValueError, regressor.fit, X, y[:(- 1)])
if (name in CROSS_DECOMPOSITION):
y_ = np.vstack([y, ((2 * y) + rnd.randint(2, size=len(y)))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
if (name not in ('PLSCanonical', 'CCA', 'RANSACRegressor')):
assert_greater(regressor.score(X, y_), 0.5) | [
"[email protected]"
] | |
3b2933c2a78105815d35f620b0fe153ed6a99d8c | fca36ece36254e6175d7ac26791ae0e0abedd040 | /ex03.py | 8f728c512cd7fc125cef46901dabc8aa8b3eabc0 | [] | no_license | DikranHachikyan/python-programming-20190318 | 72c63ccdb716db871c755bb589e333c9fc57bcd5 | 0e6b4c599be3d69efdb4acf7817abc3d9d41eb7b | refs/heads/master | 2020-04-29T23:35:10.401263 | 2019-04-18T14:59:30 | 2019-04-18T14:59:30 | 176,480,840 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | #!/home/wizard/anaconda3/bin/python
def main():
x = int(input('x='))
if x < 10:
print('x={}'.format(x))
main() | [
"[email protected]"
] | |
1f8bd03e1ef888abd81c307876d56b9be3c041cf | b3a2ac9eb02a6eef9e6f3504afabc6400f894f56 | /clld/tests/test_web_app.py | b32979cb1f2d24ff5e626188403624fba5dc541a | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | Anaphory/clld | 9f67c477e228eff05fdc7b7fa4310c703af02108 | bed1a6d08275a41fd7b5d13a0af19e4e538d186c | refs/heads/master | 2021-01-17T04:50:05.382411 | 2017-02-16T11:20:11 | 2017-02-16T11:20:11 | 66,831,136 | 0 | 0 | null | 2016-08-29T09:43:06 | 2016-08-29T09:43:05 | null | UTF-8 | Python | false | false | 3,180 | py | # coding: utf8
from __future__ import unicode_literals, print_function, division, absolute_import
import importlib
from zope.interface import Interface
from pyramid.testing import Configurator
from pyramid.httpexceptions import HTTPNotFound
from purl import URL
from clld.db.models.common import Contribution, ValueSet, Language, Language_files
from clld.tests.util import TestWithEnv, Route, TESTS_DIR, WithDbAndDataMixin
from clld.interfaces import IMapMarker
from clld.web.adapters.download import N3Dump
class Tests(WithDbAndDataMixin, TestWithEnv):
def test_CLLDRequest(self):
self.assertTrue(isinstance(self.env['request'].purl, URL))
c = self.env['request'].db.query(Contribution).first()
self.env['request'].resource_url(c, ext='geojson')
self.assertEqual(None, self.env['request'].ctx_for_url('/some/path/to/nowhere'))
assert self.env['request'].ctx_for_url('/')
self.env['request'].file_url(Language_files(id='1', object=Language.first()))
assert self.env['request'].get_datatable('valuesets', ValueSet)
assert self.env['request'].blog is None
def test_menu_item(self):
from clld.web.app import menu_item
assert menu_item('contributions', None, self.env['request'])
def test_ctx_factory(self):
from clld.web.app import ctx_factory
for model, route in [
(Contribution, 'contributions'),
(ValueSet, 'valuesets'),
(Language, 'languages'),
]:
obj = model.first()
self.set_request_properties(
matchdict={'id': obj.id}, matched_route=Route(route))
ctx_factory(model, 'index', self.env['request'])
ctx_factory(model, 'rsc', self.env['request'])
self.set_request_properties(matchdict={'id': 'xxx'})
self.assertRaises(
HTTPNotFound, ctx_factory, Contribution, 'rsc', self.env['request'])
def test_MapMarker(self):
marker = self.env['request'].registry.getUtility(IMapMarker)
self.assertTrue(marker(None, self.env['request']))
def test_add_config_from_file(self):
from clld.web.app import add_settings_from_file
config = Configurator()
add_settings_from_file(config, TESTS_DIR.joinpath('test.ini'))
assert 'app:main.use' in config.registry.settings
def test_config(self):
class IF(Interface):
pass
config = Configurator(
root_package=importlib.import_module('clld.web'),
settings={
'sqlalchemy.url': 'sqlite://',
'clld.pacific_centered_maps': True})
config.include('clld.web.app')
# should have no effect, because a resource with this name is registered by
# default:
config.register_menu('languages', ('sources', dict(label='References')))
config.register_resource('language', None, None)
config.register_resource('testresource', Language, IF, with_index=True, test=True)
config.register_download(N3Dump(Language, 'clld'))
config.add_301('/301pattern', 'http://example.org')
config.add_410('/410pattern')
| [
"[email protected]"
] | |
ba67521102acfb8f7814511d76092c36dc6602be | 5ca85847885c6fd6f9728b0b2dffb66e96a81a1d | /hemlock/database/types/__init__.py | 6c06c43fc1ad509b883efcf9d4a7a2afd3756cbc | [] | no_license | syfreed/hemlock_test2 | 682d843636883a6a2b883932cd7282e9b865ebcd | 61933fd17630ddd1bb46d8f2090b1b039a3b4e99 | refs/heads/master | 2020-08-03T11:21:18.460905 | 2019-09-29T22:36:36 | 2019-09-29T22:36:36 | 211,733,895 | 0 | 0 | null | 2019-10-22T14:21:27 | 2019-09-29T22:25:30 | Python | UTF-8 | Python | false | false | 227 | py | """Custom Hemlock database types"""
from hemlock.database.types.data_frame import DataFrame, DataFrameType
from hemlock.database.types.function import Function, FunctionType
from hemlock.database.types.markup import MarkupType | [
"[email protected]"
] | |
380aef44b9b7964ea582816fee79936176253abd | 8152e8fba564bcfa435c45dab41cd6f0b455f857 | /farmdation_project/farmdation/contrib/sites/migrations/0002_set_site_domain_and_name.py | 4c91e98dd67d65f97ee31ca5c1c705f719a236ea | [
"MIT"
] | permissive | panuta/django-storehouse | af068d9fb356f1f6243854c8944ff8146833202e | 6b90b0de231671b20f2f549a74a52d87694d821a | refs/heads/master | 2021-01-17T18:19:45.791785 | 2016-07-02T02:52:04 | 2016-07-02T02:52:04 | 62,430,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.org/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": settings.WEBSITE_DOMAIN,
"name": settings.WEBSITE_NAME
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| [
"[email protected]"
] | |
df67e70950e1afba7bbc49e3c1d809e2e069b4f4 | ecb6b752523a126ef17895854b18e02df41c4cfe | /app_backend/tests/test_skf_categories.py | cd07666e4b26e67263b352f3da1a4e2f7482217e | [
"MIT"
] | permissive | zhanghe06/bearing_project | cd6a1b2ba509392da37e5797a3619454ca464276 | 25729aa7a8a5b38906e60b370609b15e8911ecdd | refs/heads/master | 2023-05-27T17:23:22.561045 | 2023-05-23T09:26:07 | 2023-05-23T09:39:14 | 126,219,603 | 2 | 5 | MIT | 2022-12-08T03:11:27 | 2018-03-21T17:54:44 | JavaScript | UTF-8 | Python | false | false | 1,824 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: test_skf_categories.py
@time: 2020-05-19 16:32
"""
import csv
import requests
def test_get_skf_categories(csv_name, category_id):
params = {
'id': category_id,
'language': 'en',
'source': 'webpim',
'site': '307',
'hits': 100,
'offset': 0,
}
url = 'https://search.skf.com/prod/search-skfcom/rest/apps/opc_v1/searchers/categories'
header = ['Designation', 'd[mm]', 'D[mm]', 'B[mm]', 'C[kN]', 'Co[kN]', 'Pu[kN]', 'G-Speed[r/min]', 'O-Speed[r/min]']
out = open('skf_%s.csv' % csv_name, 'a')
csv_write = csv.writer(out, dialect='excel')
csv_write.writerow(header)
c = 0
next_page = 0
while 1:
if next_page == -1:
break
res = requests.get(url, params=params).json()
rows = res.get('documentList', {}).get('documents', [])
for r in rows:
data = [r['title']] + r['table_values']
csv_write.writerow(data[:9])
c += 1
print(params['hits'] * next_page + len(rows))
if res.get('documentList', {}).get('numberOfHits', 0) > params['hits'] * next_page + len(rows):
next_page += 1
else:
next_page = -1
params['offset'] = params['hits'] * next_page
out.close()
print('共计%s行记录' % c)
def run():
category_map = {
'angular_contact_ball_bearings': 'BA1_010',
'cylindrical_roller_bearings': 'BC1_010',
'angular_contact_thrust_ball_bearings_double_direction': 'BEA_010',
'angular_contact_thrust_ball_bearings_for_screw_drives_single direction': 'BDA_010',
}
for k, v in category_map.items():
test_get_skf_categories(k, v)
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
91aaf6d522486538a0edfd27de42e4a83a77f21e | ab4b210d204512f51a4807a652ccc0edaabe3341 | /kombu/tests/test_functional/test_amqplib.py | 94b9bfc92db9136c20c361ea1cc76fe35f9ccf9a | [
"BSD-3-Clause"
] | permissive | mixedpuppy/kombu | 2c4ec30ccc8ab9ccb20bab525cd525febb085ce2 | 04b9a6f2fb6854fadbb4c29880866135354fdeef | refs/heads/master | 2021-01-18T08:40:39.106451 | 2010-09-06T22:41:02 | 2010-09-06T22:41:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | import socket
import time
import unittest2 as unittest
from nose import SkipTest
from kombu import BrokerConnection
from kombu import Producer, Consumer, Exchange, Queue
def consumeN(conn, consumer, n=1):
messages = []
def callback(message_data, message):
messages.append(message_data)
message.ack()
prev, consumer.callbacks = consumer.callbacks, [callback]
while True:
conn.drain_events(timeout=1)
if len(messages) >= n:
break
consumer.callback = prev
return messages
class test_amqplib(unittest.TestCase):
def purge(self, names):
chan = self.connection.channel()
map(chan.queue_purge, names)
def setUp(self):
self.connection = BrokerConnection(transport="amqplib")
try:
self.connection.connect()
except socket.error:
self.connected = False
else:
self.connected = True
self.exchange = Exchange("tamqplib", "direct")
self.queue = Queue("tamqplib", self.exchange, "tamqplib")
def test_produce__consume(self):
if not self.connected:
raise SkipTest("Broker not running.")
chan1 = self.connection.channel()
producer = Producer(chan1, self.exchange)
producer.publish({"foo": "bar"}, routing_key="tamqplib")
chan1.close()
chan2 = self.connection.channel()
consumer = Consumer(chan2, self.queue)
message = consumeN(self.connection, consumer)
self.assertDictEqual(message[0], {"foo": "bar"})
chan2.close()
self.purge(["tamqplib"])
def test_produce__consume_multiple(self):
if not self.connected:
raise SkipTest("Broker not running.")
chan1 = self.connection.channel()
producer = Producer(chan1, self.exchange)
b1 = Queue("pyamqplib.b1", self.exchange, "b1")
b2 = Queue("pyamqplib.b2", self.exchange, "b2")
b3 = Queue("pyamqplib.b3", self.exchange, "b3")
producer.publish("b1", routing_key="b1")
producer.publish("b2", routing_key="b2")
producer.publish("b3", routing_key="b3")
chan1.close()
chan2 = self.connection.channel()
consumer = Consumer(chan2, [b1, b2, b3])
messages = consumeN(self.connection, consumer, 3)
self.assertItemsEqual(messages, ["b1", "b2", "b3"])
chan2.close()
self.purge(["pyamqplib.b1", "pyamqplib.b2", "pyamqplib.b3"])
def test_timeout(self):
if not self.connected:
raise SkipTest("Broker not running.")
chan = self.connection.channel()
self.purge([self.queue.name])
consumer = Consumer(chan, self.queue)
self.assertRaises(socket.timeout, self.connection.drain_events,
timeout=0.3)
consumer.cancel()
def test_basic_get(self):
chan1 = self.connection.channel()
producer = Producer(chan1, self.exchange)
producer.publish({"basic.get": "this"}, routing_key="basic_get")
chan1.close()
chan2 = self.connection.channel()
queue = Queue("amqplib_basic_get", self.exchange, "basic_get")
queue = queue(chan2)
queue.declare()
for i in range(50):
m = queue.get()
if m:
break
time.sleep(0.1)
self.assertEqual(m.payload, {"basic.get": "this"})
chan2.close()
def tearDown(self):
if self.connected:
self.connection.close()
| [
"[email protected]"
] | |
f2f0d7e4e1a772df81cc5683ffdb4d196a8873f2 | 219566971a08625ca14c5ea7a6e1231454694a4b | /utils/sk_utils/encoder.py | bf2df2abcba4cbf2f1eb74522ece933562b35e72 | [] | no_license | daxiongshu/kaggle-review | 6b22e73702cd7a61f3d175f301c37dcc0d6e3ae2 | fc02e85d0544dd64d57c05081c8774dc87d1972e | refs/heads/master | 2021-01-19T18:03:23.795917 | 2017-11-26T18:16:25 | 2017-11-26T18:16:25 | 101,109,595 | 28 | 14 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | from sklearn.feature_extraction import DictVectorizer
from scipy import sparse
def onehot_encode(tr,te,cols=None):
if cols is None:
cols = [i for i in tr.columns.values if i in te.columns.values]
vec = DictVectorizer()
for col in cols:
tr[col] = tr[col].map(str)
te[col] = te[col].map(str)
print("start fitting")
X = vec.fit_transform(tr[cols].T.to_dict().values())
Xt = vec.transform(te[cols].T.to_dict().values())
print("done fitting",X.shape,Xt.shape)
return X,Xt
def onehot_encode_bar(tr,te,cols=None,bar=10000):
if cols is None:
cols = [i for i in tr.columns.values if i in te.columns.values]
vec = DictVectorizer()
cat,num = [],[]
for col in cols:
nu = tr[col].unique().shape[0]
if (nu<bar and nu>2) or tr[col].dtype=='object':
cat.append(col)
tr[col] = tr[col].map(str)
te[col] = te[col].map(str)
else:
num.append(col)
print("start fitting num of cat features:",len(cat))
X = vec.fit_transform(tr[cat].T.to_dict().values())
Xt = vec.transform(te[cat].T.to_dict().values())
print("done fitting",X.shape,Xt.shape)
X = sparse.hstack([X,tr[num].values],format='csr')
Xt = sparse.hstack([Xt,te[num].values],format='csr')
return X,Xt
| [
"[email protected]"
] | |
af68bf54c136aa54b298013c4dfeb5bfe6778541 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/float/Schema+Instance/NISTXML-SV-IV-list-float-minLength-2-2.py | ff4cdb9e18778ab154122ead5ea2d8575b566545 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 392 | py | from output.models.nist_data.list_pkg.float_pkg.schema_instance.nistschema_sv_iv_list_float_min_length_2_xsd.nistschema_sv_iv_list_float_min_length_2 import NistschemaSvIvListFloatMinLength2
obj = NistschemaSvIvListFloatMinLength2(
value=[
4.9827486e+21,
3.5627644e+26,
5.6398728e+22,
8.9484692e+16,
5.2629679e+25,
6.6862685e+16,
]
)
| [
"[email protected]"
] | |
1806b9d06d3b1f2f058b6de59649d283c0cc1248 | d15092c5fa7e5d825f5204fa2e799f88c9495de5 | /non_semantic_speech_benchmark/export_model/model_export_utils.py | 4bd07af3f1681f17c17e09b7d7d39b1a8d54e647 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | GuyLor/google-research | 229e27ff7cf1c838736704537e3636defa710200 | 083ccfb249a3e2bfc49a9d56f7d2b7aae42e8c2c | refs/heads/master | 2023-09-05T16:28:26.720671 | 2021-11-16T06:17:56 | 2021-11-16T06:17:56 | 428,534,818 | 0 | 0 | Apache-2.0 | 2021-11-16T05:56:17 | 2021-11-16T05:56:16 | null | UTF-8 | Python | false | false | 6,971 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities and common steps for model export."""
import os
from typing import Any, Dict, List, Optional
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from non_semantic_speech_benchmark.data_prep import data_prep_utils
from non_semantic_speech_benchmark.distillation import frontend_lib
from non_semantic_speech_benchmark.distillation import models
from non_semantic_speech_benchmark.distillation.compression_lib import compression_op as compression
from non_semantic_speech_benchmark.distillation.compression_lib import compression_wrapper
def get_experiment_dirs(experiment_dir):
"""Returns a list of experiment directories.
NOTE: This assumes that only folders with hyperparams in their name occur in
the working dict.
Args:
experiment_dir: Base for all directories.
Returns:
List of specific experiment subdirs.
"""
if not tf.io.gfile.exists(experiment_dir):
raise ValueError(f'Experiment dir doesn\'t exist: {experiment_dir}')
experiment_dirs = [f for f in tf.io.gfile.listdir(experiment_dir)
if tf.io.gfile.isdir(os.path.join(experiment_dir, f))]
return experiment_dirs
def get_params(experiment_dir_str):
"""Extracts hyperparams from experiment directory string.
Args:
experiment_dir_str: The folder-name for the set of hyperparams. Eg:
'1-al=1.0,ap=False,lr=0.0001,ms=small,tbs=512'
Returns:
A dict mapping param key (str) to eval'ed value (float/eval/string).
"""
parsed_params = {}
start_idx = experiment_dir_str.find('-') + 1
for kv in experiment_dir_str[start_idx:].split(','):
cur_split = kv.split('=')
if len(cur_split) != 2:
raise ValueError(f'Folder doesn\'t split properly: {kv}')
key, value = cur_split
try:
value = eval(value) # pylint: disable=eval-used
except: # pylint: disable=bare-except
pass
parsed_params[key] = value
return parsed_params
def get_default_compressor():
compression_params = compression.CompressionOp.get_default_hparams().parse('')
compressor = compression_wrapper.get_apply_compression(
compression_params, global_step=0)
return compressor
def get_model(checkpoint_folder_path,
params,
tflite_friendly,
checkpoint_number = None,
include_frontend = False):
"""Given folder & training params, exports SavedModel without frontend."""
# Optionally override frontend flags from
# `non_semantic_speech_benchmark/export_model/tf_frontend.py`
override_flag_names = ['frame_hop', 'n_required', 'num_mel_bins',
'frame_width']
for flag_name in override_flag_names:
if flag_name in params:
setattr(flags.FLAGS, flag_name, params[flag_name])
static_model = models.get_keras_model(
params['mt'],
bottleneck_dimension=None,
output_dimension=1024,
truncate_output=params['tr'] if 'tr' in params else False,
frontend=include_frontend,
compressor=None,
tflite=tflite_friendly)
checkpoint = tf.train.Checkpoint(model=static_model)
if checkpoint_number:
checkpoint_to_load = os.path.join(
checkpoint_folder_path, f'ckpt-{checkpoint_number}')
assert tf.train.load_checkpoint(checkpoint_to_load)
else:
checkpoint_to_load = tf.train.latest_checkpoint(checkpoint_folder_path)
checkpoint.restore(checkpoint_to_load).expect_partial()
return static_model
def convert_tflite_model(model, quantize,
model_path):
"""Uses TFLiteConverter to convert a Keras Model.
Args:
model: Keras model obtained from get_tflite_friendly_model.
quantize: Whether to quantize TFLite model using dynamic quantization. See:
https://www.tensorflow.org/lite/performance/post_training_quant
model_path: Path for TFLite file.
"""
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
# There is a GatherV2 op in the frontend that isn't supported by TFLite
# as a builtin op. (It works as a TFLite builtin only if the sample size
# to the frontend is a constant)
# However, TFLite supports importing some relevant operators from TF,
# at the cost of binary size (~ a few MB).
# See: https://www.tensorflow.org/lite/guide/ops_select
# NOTE: This has no effect on the model/binary size if the graph does not
# required the extra TF ops (for example, for no-frontend versio
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
if quantize:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_buffer = converter.convert()
with tf.io.gfile.GFile(model_path, 'wb') as f:
f.write(tflite_buffer)
logging.info('Exported TFLite model to %s.', model_path)
def sanity_check(
include_frontend,
model_path,
embedding_dim,
tflite,
n_required = None,
frame_width = None,
num_mel_bins = None):
"""Sanity check model by running dummy inference."""
n_required = n_required or flags.FLAGS.n_required
frame_width = frame_width or flags.FLAGS.frame_width
num_mel_bins = num_mel_bins or flags.FLAGS.num_mel_bins
if include_frontend:
input_shape = (1, 2 * n_required)
expected_output_shape = (7, embedding_dim)
else:
feats_inner_dim = frontend_lib.get_frontend_output_shape()[0] * frame_width
input_shape = (1, feats_inner_dim, num_mel_bins, 1)
expected_output_shape = (1, embedding_dim)
logging.info('Input shape: %s. Expected output shape: %s', input_shape,
expected_output_shape)
model_input = np.zeros(input_shape, dtype=np.float32)
if tflite:
logging.info('Building tflite interpreter...')
interpreter = data_prep_utils.build_tflite_interpreter(
model_path)
logging.info('Running inference...')
output = data_prep_utils.samples_to_embedding_tflite(
model_input, sample_rate=16000, interpreter=interpreter, output_key='0',
name='sanity_check')
else:
logging.info('Loading and running inference with SavedModel...')
model = tf.saved_model.load(model_path)
output = model(model_input)['embedding'].numpy()
np.testing.assert_array_equal(output.shape, expected_output_shape)
logging.info('Model "%s" worked.', model_path)
| [
"[email protected]"
] | |
4a0f4b497cb62c2567f2afacb29986f97a64d2b4 | ddea930392ac5360b21e9043b620e703a9ccb31c | /tfx/components/transform/component.py | 7cb900078da95ed33cbe2fdf9bd9a465b5e9a56e | [
"Apache-2.0"
] | permissive | Ark-kun/tfx | 9c82b688776c80b2435bbb6154476526e8525ec8 | f685f0387bd145316f43ceb484e64f893e749dcb | refs/heads/master | 2021-07-25T05:58:15.168607 | 2020-05-22T01:07:44 | 2020-05-22T01:08:18 | 180,868,735 | 0 | 0 | Apache-2.0 | 2019-04-11T20:01:57 | 2019-04-11T20:01:57 | null | UTF-8 | Python | false | false | 6,330 | py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Transform component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text, Union
import absl
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import executor_spec
from tfx.components.transform import executor
from tfx.orchestration import data_types
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import TransformSpec
class Transform(base_component.BaseComponent):
"""A TFX component to transform the input examples.
The Transform component wraps TensorFlow Transform (tf.Transform) to
preprocess data in a TFX pipeline. This component will load the
preprocessing_fn from input module file, preprocess both 'train' and 'eval'
splits of input examples, generate the `tf.Transform` output, and save both
transform function and transformed examples to orchestrator desired locations.
## Providing a preprocessing function
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Transform executor will look specifically for the
`preprocessing_fn()` function within that file.
An example of `preprocessing_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
## Example
```
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['schema'],
module_file=module_file)
```
Please see https://www.tensorflow.org/tfx/transform for more details.
"""
SPEC_CLASS = TransformSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
schema: types.Channel = None,
module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None,
preprocessing_fn: Optional[Union[Text,
data_types.RuntimeParameter]] = None,
transform_graph: Optional[types.Channel] = None,
transformed_examples: Optional[types.Channel] = None,
input_data: Optional[types.Channel] = None,
instance_name: Optional[Text] = None,
enable_cache: Optional[bool] = None):
"""Construct a Transform component.
Args:
examples: A Channel of type `standard_artifacts.Examples` (required).
This should contain the two splits 'train' and 'eval'.
schema: A Channel of type `standard_artifacts.Schema`. This should
contain a single schema artifact.
module_file: The file path to a python module file, from which the
'preprocessing_fn' function will be loaded. The function must have the
following signature.
def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:
...
where the values of input and returned Dict are either tf.Tensor or
tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'
must be supplied.
preprocessing_fn: The path to python function that implements a
'preprocessing_fn'. See 'module_file' for expected signature of the
function. Exactly one of 'module_file' or 'preprocessing_fn' must be
supplied.
transform_graph: Optional output 'TransformPath' channel for output of
'tf.Transform', which includes an exported Tensorflow graph suitable for
both training and serving;
transformed_examples: Optional output 'ExamplesPath' channel for
materialized transformed examples, which includes both 'train' and
'eval' splits.
input_data: Backwards compatibility alias for the 'examples' argument.
instance_name: Optional unique instance name. Necessary iff multiple
transform components are declared in the same pipeline.
enable_cache: Optional boolean to indicate if cache is enabled for the
Transform component. If not specified, defaults to the value
specified for pipeline's enable_cache parameter.
Raises:
ValueError: When both or neither of 'module_file' and 'preprocessing_fn'
is supplied.
"""
if input_data:
absl.logging.warning(
'The "input_data" argument to the Transform component has '
'been renamed to "examples" and is deprecated. Please update your '
'usage as support for this argument will be removed soon.')
examples = input_data
if bool(module_file) == bool(preprocessing_fn):
raise ValueError(
"Exactly one of 'module_file' or 'preprocessing_fn' must be supplied."
)
transform_graph = transform_graph or types.Channel(
type=standard_artifacts.TransformGraph,
artifacts=[standard_artifacts.TransformGraph()])
if not transformed_examples:
example_artifact = standard_artifacts.Examples()
example_artifact.split_names = artifact_utils.encode_split_names(
artifact.DEFAULT_EXAMPLE_SPLITS)
transformed_examples = types.Channel(
type=standard_artifacts.Examples, artifacts=[example_artifact])
spec = TransformSpec(
examples=examples,
schema=schema,
module_file=module_file,
preprocessing_fn=preprocessing_fn,
transform_graph=transform_graph,
transformed_examples=transformed_examples)
super(Transform, self).__init__(
spec=spec, instance_name=instance_name, enable_cache=enable_cache)
| [
"[email protected]"
] | |
c7c37367f4842a662f51398fc768a9d153243f39 | 159fddadea70761e5fa15ecc15ab68342958d088 | /tours/migrations/0039_auto_20190923_2153.py | 60ceae4908da7ea110c38c75959b195ff22bd085 | [] | no_license | lalit1796/mytrip | 56a2bcdaa70ffe1234025e3b0599c53d4633462c | 2b8ab34d16960ef228adb2458e5b4bd0213ee923 | refs/heads/master | 2023-08-08T01:07:30.487038 | 2021-09-04T14:04:42 | 2021-09-04T14:04:42 | 402,809,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # Generated by Django 2.2.2 on 2019-09-23 16:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tours', '0038_auto_20190923_2151'),
]
operations = [
migrations.AlterField(
model_name='package',
name='uid',
field=models.CharField(default='--uid--', max_length=200, unique=True),
),
]
| [
"[email protected]"
] | |
059ed3b99dab6d54ca4af7819e104d653bac903a | 0a2cc497665f2a14460577f129405f6e4f793791 | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_12_01/aio/operations/_disks_operations.py | 908b970b39f436b84f4bc140829b20f3c1d96b40 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | hivyas/azure-sdk-for-python | 112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b | 8b3258fa45f5dc25236c22ad950e48aa4e1c181c | refs/heads/master | 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 | MIT | 2020-12-02T17:48:22 | 2020-11-17T22:42:00 | Python | UTF-8 | Python | false | false | 41,458 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DisksOperations:
"""DisksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.Disk",
**kwargs
) -> "_models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(disk, 'Disk')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.Disk",
**kwargs
) -> AsyncLROPoller["_models.Disk"]:
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk operation.
:type disk: ~azure.mgmt.compute.v2020_12_01.models.Disk
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_12_01.models.Disk]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.DiskUpdate",
**kwargs
) -> "_models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(disk, 'DiskUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.DiskUpdate",
**kwargs
) -> AsyncLROPoller["_models.Disk"]:
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk operation.
:type disk: ~azure.mgmt.compute.v2020_12_01.models.DiskUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_12_01.models.Disk]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def get(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> "_models.Disk":
"""Gets information about a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Disk, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_12_01.models.Disk
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.DiskList"]:
"""Lists all the disks under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_12_01.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DiskList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.DiskList"]:
"""Lists all the disks under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_12_01.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DiskList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks'} # type: ignore
async def _grant_access_initial(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs
) -> Optional["_models.AccessUri"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AccessUri"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._grant_access_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(grant_access_data, 'GrantAccessData')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'} # type: ignore
async def begin_grant_access(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs
) -> AsyncLROPoller["_models.AccessUri"]:
"""Grants access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param grant_access_data: Access data object supplied in the body of the get disk access
operation.
:type grant_access_data: ~azure.mgmt.compute.v2020_12_01.models.GrantAccessData
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AccessUri or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_12_01.models.AccessUri]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessUri"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._grant_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
grant_access_data=grant_access_data,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'} # type: ignore
async def _revoke_access_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
# Construct URL
url = self._revoke_access_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revoke_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'} # type: ignore
async def begin_revoke_access(
self,
resource_group_name: str,
disk_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Revokes access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._revoke_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'} # type: ignore
| [
"[email protected]"
] | |
d41f85e2c73f965a8555cd3f6da0967a0e321fde | 5cc4a73d6fb144d72e74b07a10b60fc36bfe50ec | /videos/api/serializers.py | 462b50c2b48ff27afdefbc67ced6a79e795bd6b4 | [] | no_license | pedrofolch/digitalsoil | 79d9497dcbb54df3c7df64f9da35d71d592fe580 | 7b6d1ffd34e991cf87c91342e5336a97fa1cf59b | refs/heads/master | 2022-12-11T00:47:01.728729 | 2019-04-11T03:34:12 | 2019-04-11T03:34:12 | 120,937,159 | 0 | 0 | null | 2022-12-08T04:58:09 | 2018-02-09T17:49:10 | CSS | UTF-8 | Python | false | false | 1,113 | py | from rest_framework import serializers
from videos.models import Video
class VideoSerializer(serializers.ModelSerializer):
uri = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Video
fields = [
'uri',
'pk',
'title',
'embed_code',
'share_message',
'order',
'tags',
'slug',
'active',
'featured',
'free_preview',
'category',
'timestamp',
'updated'
]
read_only_fields = ['user', 'order', ]
def get_uri(self, obj):
request = self.context.get('request')
return obj.get_api_url(request=request)
def validate_title(self, value):
"""We want the title to be unique"""
qs = Video.objects.filter(title__iexact=value) # including instance
if self.instance:
qs = qs.exclude(pk=self.instance.pk)
if qs.exists():
raise serializers.ValidationError("This title has already been used")
return value
| [
"[email protected]"
] | |
c079f148da7e00212e3eaf5c747a85338457e1a3 | 08d02c1dcdac6b909d725cdc3ba4e0cd8a7f203b | /src/krotov/propagators.py | fcdec377f370598de080711e3aad775cb3142963 | [
"BSD-3-Clause"
] | permissive | huangdouzi11/krotov | 5f49c5e054a079373c40e5e10c2698387a58ef49 | 3c3d98255e97c3cebde0e1956c2c3de326274c6f | refs/heads/master | 2020-11-30T14:22:16.998735 | 2019-12-16T05:32:33 | 2019-12-16T05:32:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,115 | py | r"""Routines that can be passed as `propagator` to :func:`.optimize_pulses`
The numerical effort involved in the optimization is almost entirely within the
simulation of the system dynamics. In every iteration and for every objective,
the system must be "propagated" once forwards in time and once backwards in
time, see also :mod:`krotov.parallelization`.
The implementation of this time propagation must be inside the user-supplied
routine `propagator` that is passed to :func:`.optimize_pulses` and must
calculate the propagation over a single time step. In particular,
:func:`qutip.mesolve.mesolve` is not automatically used for simulating any
dynamics within the optimization. The signature for any `propagator`
must be the same as the "reference" :func:`expm` propagator:
>>> str(inspect.signature(krotov.propagators.expm))
'(H, state, dt, c_ops=None, backwards=False, initialize=False)'
The arguments are as follows (cf. :class:`Propagator`):
* `H` is the system Hamiltonian or Liouvillian, in a nested-list format similar
to that used by :func:`qutip.mesolve.mesolve`, e.g., for a Hamiltonian
$\Op{H} = \Op{H}_0 + c \Op{H}_1$, where $c$ is the value of a control field
at a particular point in time, `propagator` would receive a list ``[H0, [H1,
c]]`` where ``H0`` and ``H1`` are :class:`qutip.Qobj` operators.
The nested-list for `H` used here, with scalar values for the controls, is
obtained internally from the format used by :func:`~qutip.mesolve.mesolve`,
with time-dependent controls over the entire time grid, via
:func:`krotov.conversions.plug_in_pulse_values`.
* `state` is the :class:`qutip.Qobj` state that should be propagated, either a
Hilbert space state, or a density matrix.
* `dt` is the time step (a float). It is always positive, even for
``backwards=True``.
* `c_ops` is None, or a list of collapse (Lindblad) operators, where each list
element is a :class:`qutip.Qobj` instance (or possibly a nested list, for
time-dependent Lindblad operators. Note that is generally preferred for `H`
to be a Liouvillian, for dissipative dynamics.
* `backwards` (:class:`bool`): If passed as `True`, the `propagator` should
propagate backwards in time. In Hilbert space, this means using -`dt` instead
of `dt`. In Liouville space, there is no difference between forward and
backward propagation. In the context of Krotov's method, the backward
propagation uses the conjugate Hamiltonian, respectively Liouvillian.
However, the `propagator` routine does not need to be aware of this fact: it
will receive the appropriate `H` and `c_ops`.
* `initialize` (:class:`bool`): A flag to indicate the beginning of a
propagation over a time grid. If False in subsequent calls, the `propagator`
may assume that the input `state` is the result of the previous call to
`propagator`.
The routines in this module are provided with no guarantee to be either
general or efficient. The :func:`expm` propagator is exact to machine
precision, but generally extremely slow. For "production use", it is
recommended to supply a problem-specific `propagator` that is highly optimized
for speed. You might consider the use of Cython_. This is key to minimize the
runtime of the optimization.
The `initialize` flag enables "stateful" propagators that cache data between
calls. This can significantly improve numerical efficiency.
:class:`DensityMatrixODEPropagator` is an example for such a propagator. In
general, any stateful `propagator` should be an instance of
:class:`Propagator`.
.. _Cython: https://cython.org
"""
from abc import ABC, abstractmethod
import numpy as np
import qutip
import scipy
from qutip.cy.spconvert import dense2D_to_fastcsr_fmode
from qutip.cy.spmatfuncs import spmvpy_csr
from qutip.superoperator import mat2vec, vec2mat
__all__ = ['expm', 'Propagator', 'DensityMatrixODEPropagator']
def expm(H, state, dt, c_ops=None, backwards=False, initialize=False):
"""Propagate using matrix exponentiation
This supports `H` being a Hamiltonian (for a Hilbert space `state`) or a
Liouvillian (for `state` being a density matrix) in nested-list format.
Collapse operators `c_ops` are not supported. The propagator is not
stateful, thus `initialize` is ignored.
"""
if c_ops is None:
c_ops = []
if len(c_ops) > 0:
raise NotImplementedError("Liouville exponentiation not implemented")
assert isinstance(H, list) and len(H) > 0
eqm_factor = -1j # factor in front of H on rhs of the equation of motion
if isinstance(H[0], list):
if H[0][1].type == 'super':
eqm_factor = 1
if backwards:
eqm_factor = eqm_factor.conjugate()
A = (eqm_factor * H[0][1]) * H[0][0]
else:
if H[0].type == 'super':
eqm_factor = 1
if backwards:
eqm_factor = eqm_factor.conjugate()
A = eqm_factor * H[0]
for part in H[1:]:
if isinstance(part, list):
A += (eqm_factor * part[1]) * part[0]
else:
A += eqm_factor * part
ok_types = (state.type == 'oper' and A.type == 'super') or (
state.type in ['ket', 'bra'] and A.type == 'oper'
)
if ok_types:
return ((A * dt).expm())(state)
else:
raise NotImplementedError(
"Cannot handle argument types A:%s, state:%s"
% (A.type, state.type)
)
class Propagator(ABC):
"""Abstract base class for stateful propagators"""
@abstractmethod
def __call__(
self, H, state, dt, c_ops=None, backwards=False, initialize=False
):
"""Evaluation of a single propagation step
Args:
H (list): A Hamiltonian or Liouvillian in qutip's nested-list
format, with a scalar value in the place of a time-dependency.
For example, ``[H0, [H1, u]]`` for a drift Hamiltonian ``H0``,
a control Hamiltonian ``H1``, and a scalar value ``u`` that is
a time-dependent control evaluated for a particular point in
time.
state (qutip.Qobj): The state to propagate
dt (float): The time step over which to propagate
c_ops (list or None): A list of Lindblad operators. Using explicit
Lindblad operators should be avoided: it is usually more
efficient to convert them into a Lindbladian, passed as `H`
backwards (bool): Whether the propagation is forward in time or
backward in time
initialize (bool): Whether the propagator should (re-)initialize
for a new propagation, when the propagator is used to advance
on a time grid, `initialize` should be passed as True for the
initial time step (0 to `dt` in a forward propagation, or T to
T-dt for a backward propagation), and False otherwise.
Note:
A propagator may assume the propagation to be "sequential"
when `initialize` is False. That is, the state to propagate is the
result of the previous call to the propagator.
"""
pass
class DensityMatrixODEPropagator(Propagator):
"""Propagator for density matrix evolution under a Lindbladian
See :class:`qutip.solver.Options` for all arguments except `reentrant`.
Passing True for the `reentrant` re-initializes the propagator in every
time step.
Warning:
By default, the propagator is not "re-entrant". That is, you cannot use
more than one instance of :class:`DensityMatrixODEPropagator` in the
same process at the same time. This limitation is due to
:class:`scipy.integrate.ode` with the "zvode" integrator not being
re-entrant. Passing ``reentrant=True`` side-steps this problem by
re-initializating :class:`scipy.integrate.ode` in every time step. This
makes it possible to use :class:`DensityMatrixODEPropagator` in the
optimization of multiple objectives, but creates a significant
overhead.
"""
def __init__(
self,
method='adams',
order=12,
atol=1e-8,
rtol=1e-6,
nsteps=1000,
first_step=0,
min_step=0,
max_step=0,
reentrant=False,
):
self.method = method
self.order = order
self.atol = atol
self.rtol = rtol
self.nsteps = nsteps
self.first_step = first_step
self.min_step = min_step
self.max_step = max_step
self._L_list = None # cached Liouvillian data
self._control_indices = None # which indices in `L` have a control val
self._r = None # the integrator
self._t = 0.0 # time up to which we've integrated
self._y = None # current vectorized state
self.reentrant = reentrant
def __call__(
self, H, state, dt, c_ops=None, backwards=False, initialize=False
):
"""Evaluation of a single propagation step
Args:
H (list): A Liouvillian superoperator in qutip's nested-list
format, with a scalar value in the place of a time-dependency.
For example, ``[L0, [L1, u]]`` for a drift Liouvillian ``L0``,
a control Liouvillian ``H1``, and a scalar value ``u`` that is
a time-dependent control evaluated for a particular point in
time. If `initialize` is False, only the control values are
taken into account; any operators are assumed to be identical
to the internally cached values of `H` during initialization.
state (qutip.Qobj): The density matrix to propagate. The passed
value is ignored unless `initialize` is given as True.
Otherwise, it is assumed that `state` matches the (internally
stored) state that was the result from the previous propagation
step.
dt (float): The time step over which to propagate
c_ops (list or None): An empty list, or None. Since this propagator
assumes a full Liouvillian, it cannot be combined with Lindblad
operators.
backwards (bool): Whether the propagation is forward in time or
backward in time. Since the equation of motion for a
Liouvillian and conjugate Liouvillian is the same, this
parameter has no effect. Instead, for the backward propagation,
the conjugate Liouvillian must be passed for `L`.
initialize (bool): Whether to (re-)initialize for a new
propagation. This caches `H` (except for the control values)
and `state` internally.
"""
# H is really an L, but it's a very bad idea for a subclass not to
# follow the interface of the parent (including argument names).
# Internally, however, we'll use L instead of H
if initialize or self.reentrant:
self._initialize(H, state, dt, c_ops, backwards)
else:
if self.reentrant:
self._initialize_integrator(self._y)
# only update the control values
for i in self._control_indices:
self._L_list[i][1] = H[i][1]
self._t += dt
self._r.integrate(self._t)
self._y = self._r.y
return qutip.Qobj(
dense2D_to_fastcsr_fmode(
vec2mat(self._y), state.shape[0], state.shape[1]
),
dims=state.dims,
isherm=True,
)
@staticmethod
def _rhs(t, rho, L_list):
# _rhs being a staticmethod enables the propagator to be pickled (for
# parallelization)
out = np.zeros(rho.shape[0], dtype=complex)
L = L_list[0][0]
L_coeff = L_list[0][1]
spmvpy_csr(L.data, L.indices, L.indptr, rho, L_coeff, out)
for n in range(1, len(L_list)):
L = L_list[n][0]
L_coeff = L_list[n][1]
spmvpy_csr(L.data, L.indices, L.indptr, rho, L_coeff, out)
return out
def _initialize(self, L, rho, dt, c_ops, backwards):
self._initialize_data(L, rho, dt, c_ops, backwards)
self._initialize_integrator(self._y)
def _initialize_data(self, L, rho, dt, c_ops, backwards):
L_list = []
control_indices = []
if not (c_ops is None or len(c_ops) == 0):
# in principle, we could convert c_ops to a Lindbladian, here
raise NotImplementedError("c_ops not implemented")
for (i, spec) in enumerate(L):
if isinstance(spec, qutip.Qobj):
l_op = spec
l_coeff = 1
elif isinstance(spec, list) and isinstance(spec[0], qutip.Qobj):
l_op = spec[0]
l_coeff = spec[1]
control_indices.append(i)
else:
raise ValueError(
"Incorrect specification of time-dependent Liouvillian"
)
if l_op.type == 'super':
L_list.append([l_op.data, l_coeff, False])
else:
raise ValueError(
"Incorrect specification of time-dependent Liouvillian"
)
self._L_list = L_list
self._control_indices = control_indices
if rho.type == 'oper':
self._y = mat2vec(rho.full()).ravel('F') # initial state
else:
raise ValueError("rho must be a density matrix")
def _initialize_integrator(self, initial_vector):
r = scipy.integrate.ode(self._rhs)
r.set_integrator(
'zvode',
method=self.method,
order=self.order,
atol=self.atol,
rtol=self.rtol,
nsteps=self.nsteps,
first_step=self.first_step,
min_step=self.min_step,
max_step=self.max_step,
)
r.set_initial_value(initial_vector)
r.set_f_params(self._L_list)
self._r = r
self._t = 0.0
| [
"[email protected]"
] | |
6d640e9621785207c8da853c9f502cf40e5f4c34 | 74a30d76f49051ec0200a847fe83431a9501f3d7 | /address_func.py | a5b6612a9078be5ba1d28b66d635386fd78f59b7 | [] | no_license | Milziade/nano_dash | 97e827ac24c5e8f3d97e8de69e151afd4d7d8fac | 00618fa1f6d425b63f6ada625569eadda69a0176 | refs/heads/master | 2023-05-01T02:36:51.595622 | 2021-05-12T14:06:07 | 2021-05-12T14:06:07 | 362,456,210 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,908 | py | import requests
import pandas as pd
import random
import colors
def get_json(nano_address):
action = {
'action': 'account_history',
'account': nano_address,
'count': '-1'
}
r = requests.post('https://mynano.ninja/api/node', json=action).json()
if 'error' in r:
return False
return r['history']
def get_df(history):
df = pd.DataFrame().from_dict(history)
df = df.replace('nano_3kwppxjcggzs65fjh771ch6dbuic3xthsn5wsg6i5537jacw7m493ra8574x', 'FreeNanoFaucet.com')
df = df.replace('nano_34prihdxwz3u4ps8qjnn14p7ujyewkoxkwyxm3u665it8rg5rdqw84qrypzk', 'nano-faucet.org')
df = df.replace('nano_3pg8khw8gs94c1qeq9741n99ubrut8sj3n9kpntim1rm35h4wdzirofazmwt', 'nano.trade')
df = df.replace('nano_1tyd79peyzk4bs5ok1enb633dqsrxou91k7y4zzo1oegw4s75bokmj1pey4s', 'Apollo Faucet')
df['amount'] = [int(i)/10**30 for i in df['amount']]
df['local_timestamp'] = pd.to_datetime(df['local_timestamp'], unit='s')
del df['hash']
del df['height']
return df.to_dict('records'), df.columns.values
def get_balance(nano_address):
action = {
"action": "account_info",
"account": nano_address
}
r = requests.post('https://mynano.ninja/api/node', json=action).json()
return int(r['balance'])/10**30
def pie_chart(df):
receive_acc = list(set(item['account'] for item in df if item['type'] == 'receive'))
amount_receive = {i: 0 for i in receive_acc}
send_acc = list(set(item['account'] for item in df if item['type'] == 'send'))
amount_send = {i: 0 for i in send_acc}
for d in df:
key = d['account']
if key in amount_receive:
amount_receive[key] += d['amount']
else:
amount_send[key] += d['amount']
return list(amount_receive.keys()), list(amount_receive.values()), \
list(amount_send.keys()), list(amount_send.values())
def balance_over_time(df: dict):
# Otteniamo il momento, il tipo di transazione e l'ammontare
time = list([item['type'], item['amount'], str(item['local_timestamp']).split()[0]] for item in df)
# Se l'account invia soldi, l'ammontare per il bilancio diviene negativo
for item in time:
if item[0] == 'send':
item[1] = -item[1]
time_n = [time[i][2] for i in range(len(time))] # Date e orari di ogni transazione
insta_bal = [time[i][1] for i in range(len(time))]
overall_bal = [0] # Bilancio cumulativo
for i in range(len(time)):
x = time[-1-i][1] + overall_bal[-1]
overall_bal.append(x)
overall_bal.pop(0)
return overall_bal, list(reversed(time_n)), list(reversed(insta_bal))
def get_colors(n):
# creates n different color
colors_list = []
for i in range(n):
cols = colors.colors
col = random.choice(cols)
#cols.remove(col)
colors_list.append(col)
return colors_list
| [
"[email protected]"
] | |
9cfd5d850bcc7982efa80f394dbca752ca768af5 | 39c7f0955e0247bbe34ec0f2a4a7e2d3294dc0df | /deployment-scripts/scripts/infrastructure/openstack/openstack-create-instance.py | 2426dac27ae7ea1e05036985173780e9337887f9 | [] | no_license | marjancek/Showcase | df9eac6b7c32d2209b6ffac3f80d6c6d92c5c1d3 | eced27c10b7b9d5e4b10d296661e33cb0375a5fa | refs/heads/master | 2021-01-15T09:08:48.146581 | 2014-11-19T12:13:16 | 2014-11-19T12:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | from novaclient.v1_1 import client as novaclient
from common.Cloudscale import *
from scripts.common.Cloudscale import check_args, parse_args
class CreateInstance:
def __init__(self, cfg):
self.cfg = cfg
self.user = cfg.get('OPENSTACK', 'username')
self.pwd = cfg.get('OPENSTACK', 'password')
self.url = cfg.get('OPENSTACK', 'auth_url')
self.tenant = cfg.get('OPENSTACK', 'tenant_name')
self.image_name = cfg.get('OPENSTACK', 'image_name')
server = self.create_instance()
print [s['addr'] for s in server.addresses[self.tenant] if s['OS-EXT-IPS:type'] == 'floating'][0]
def create_instance(self):
nc = novaclient.Client(self.user, self.pwd, self.tenant, auth_url=self.url)
for f in nc.flavors.list():
print f
for server in nc.servers.list():
if server._info['name'] == self.instance_name:
return server
if __name__ == '__main__':
check_args(1, "<config_path>")
_, cfg, _, _ = parse_args()
CreateInstance(cfg) | [
"[email protected]"
] | |
43d1732932045b85378fb924ea0c306a767dc816 | 93090ffc3ccaf142a0c739e00c28486175220373 | /04-day/main.py | 96482f4046784b840f4b1a090c566af8992a3b4e | [
"MIT"
] | permissive | timbook/advent-of-code-2020 | f17b89fb1ccb5fe449b4864cca64a95f527c6782 | 3b1c107a67b9f31891e77a258e45aee76fac4e47 | refs/heads/main | 2023-01-25T01:19:02.520349 | 2020-12-17T20:31:31 | 2020-12-17T20:31:31 | 317,431,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,161 | py | import re
from functools import reduce
raw = open('input.txt', 'r').read().split('\n\n')
class Passport:
required_fields = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']
def __init__(self, entry):
fields = re.split('\s', entry.strip())
self.fields = {field.split(':')[0]:field.split(':')[1] for field in fields}
def is_valid_a(self):
return all(req in self.fields for req in self.required_fields)
def is_valid_b(self):
return self.is_valid_a() and reduce(
lambda a, b: a and b,
[
self.is_valid_byr(),
self.is_valid_iyr(),
self.is_valid_eyr(),
self.is_valid_hgt(),
self.is_valid_hcl(),
self.is_valid_ecl(),
self.is_valid_pid()
]
)
def is_valid_byr(self):
byr = self.fields['byr']
return re.match('\d{4}', byr) and (1920 <= int(byr) <= 2002)
def is_valid_iyr(self):
iyr = self.fields['iyr']
return re.match('\d{4}', iyr) and (2010 <= int(iyr) <= 2020)
def is_valid_eyr(self):
eyr = self.fields['eyr']
return re.match('\d{4}', eyr) and (2020 <= int(eyr) <= 2030)
def is_valid_hgt(self):
hgt = self.fields['hgt'][:-2]
unit = self.fields['hgt'][-2:]
if unit == 'cm':
return re.match('\d+', hgt) and (150 <= int(hgt) <= 193)
elif unit == 'in':
return re.match('\d+', hgt) and (59 <= int(hgt) <= 76)
else:
return False
def is_valid_hcl(self):
hcl = self.fields['hcl']
return bool(re.match('#[0-9a-f]{6}', hcl))
def is_valid_ecl(self):
return self.fields['ecl'] in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
def is_valid_pid(self):
return bool(re.match('^\d{9}$', self.fields['pid']))
passports = [Passport(line) for line in raw]
n_valid = sum(ppt.is_valid_a() for ppt in passports)
print(f"A :: Number of valid passports: {n_valid}")
# 199 too high
n_valid = sum(ppt.is_valid_b() for ppt in passports)
print(f"B :: Number of valid passports: {n_valid}")
| [
"[email protected]"
] | |
5d028cc1080c4c3e46b7d4d114f8177121ac2c6d | d16c15f2fb433c6b5874ea49ff5db89315e9711d | /benchmarks/comparison-with-addict.py | 2f6c181632636f0ac48d33a840027e004ad8d8d9 | [
"MIT"
] | permissive | vltr/middle | f9e16735beaccafdc7535271418dd9a8f6b22706 | f7782610fbb1d9232a3b4cfea057a9331db2775e | refs/heads/develop | 2023-08-17T20:35:02.878999 | 2019-06-18T15:44:46 | 2019-06-18T15:44:46 | 138,063,484 | 11 | 2 | MIT | 2023-09-04T21:27:09 | 2018-06-20T17:15:26 | Python | UTF-8 | Python | false | false | 4,561 | py | import cProfile
import sys
from enum import Enum
from enum import IntEnum
from enum import unique
from typing import Dict
from typing import List
from typing import Set
# --------------------------------------------------------------- #
# Import boilerplate
# --------------------------------------------------------------- #
try:
import timy
import middle
from addict import Dict as ADict
except ImportError:
print(
"To run this script, you must install these dependencies:",
file=sys.stderr,
)
print("- addict", file=sys.stderr)
print("- middle", file=sys.stderr)
print("- timy", file=sys.stderr)
sys.exit(1)
# --------------------------------------------------------------- #
# Fixed variables
# --------------------------------------------------------------- #
TOTAL_LOOPS = 1_000_000
if "short" in sys.argv:
TOTAL_LOOPS = 1
# --------------------------------------------------------------- #
# Enum definition
# --------------------------------------------------------------- #
@unique
class PlatformEnum(str, Enum):
XBOX1 = "XBOX1"
PLAYSTATION4 = "PLAYSTATION4"
PC = "PC"
@unique
class LanguageEnum(IntEnum):
ENGLISH = 1
JAPANESE = 2
SPANISH = 3
GERMAN = 4
PORTUGUESE = 5
@unique
class CityRegionEnum(str, Enum):
TROPICAL = "TROPICAL"
TEMPERATE = "TEMPERATE"
BOREAL = "BOREAL"
# --------------------------------------------------------------- #
# middle model definition
# --------------------------------------------------------------- #
class MiddleCity(middle.Model):
name: str = middle.field()
region: CityRegionEnum = middle.field()
class MiddleGame(middle.Model):
name: str = middle.field()
platform: PlatformEnum = middle.field()
score: float = middle.field()
resolution_tested: str = middle.field()
genre: List[str] = middle.field()
rating: Dict[str, float] = middle.field()
players: Set[str] = middle.field()
language: LanguageEnum = middle.field()
awesome_city: MiddleCity = middle.field()
# --------------------------------------------------------------- #
# Test variable
# --------------------------------------------------------------- #
MODEL_INSTANCE = {
"name": "Cities: Skylines",
"platform": "PC",
"score": 9.0,
"resolution_tested": "1920x1080",
"genre": ["Simulators", "City Building"],
"rating": {"IGN": 8.5, "Gamespot": 8.0, "Steam": 4.5},
"players": ["Flux", "strictoaster"],
"language": 1,
"awesome_city": {"name": "Blumenau", "region": "TEMPERATE"},
}
# --------------------------------------------------------------- #
# Test runnable
# --------------------------------------------------------------- #
def test_addict():
game = ADict(MODEL_INSTANCE)
assert isinstance(game.name, str)
assert isinstance(game.platform, str)
assert isinstance(game.score, float)
assert isinstance(game.resolution_tested, str)
assert isinstance(game.genre, list)
assert isinstance(game.rating, dict)
assert isinstance(game.players, list)
assert isinstance(game.language, int)
assert isinstance(game.awesome_city, dict)
assert isinstance(game.awesome_city.name, str)
assert isinstance(game.awesome_city.region, str)
def test_middle():
game = MiddleGame(**MODEL_INSTANCE)
assert isinstance(game, MiddleGame)
assert isinstance(game.name, str)
assert isinstance(game.platform, PlatformEnum)
assert isinstance(game.score, float)
assert isinstance(game.resolution_tested, str)
assert isinstance(game.genre, list)
assert isinstance(game.rating, dict)
assert isinstance(game.players, set)
assert isinstance(game.language, LanguageEnum)
assert isinstance(game.awesome_city, MiddleCity)
assert isinstance(game.awesome_city.name, str)
assert isinstance(game.awesome_city.region, CityRegionEnum)
# --------------------------------------------------------------- #
# Run tests
# --------------------------------------------------------------- #
def main():
if "profile" in sys.argv:
cProfile.run(
"for i in range({}): test_addict()".format(TOTAL_LOOPS),
sort="tottime",
)
cProfile.run(
"for i in range({}): test_middle()".format(TOTAL_LOOPS),
sort="tottime",
)
else:
timy.timer(ident="addict", loops=TOTAL_LOOPS)(test_addict).__call__()
timy.timer(ident="middle", loops=TOTAL_LOOPS)(test_middle).__call__()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
8d00346ccd82c5a105df92d54bf196574f2e3de6 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/9c113f9b86aece3efdf169af67c4e436/snippet.py | 0262b68437c1a2c6ae2c332e575f4c5626c155a7 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 288 | py | def main():
n,k=[int(i) for i in input().split(' ')]
a=[1]*(n+1)
for i in range(1,min(k, n+1)):
a[i]=a[i-1]+a[i-1]
if k <= n:
a[k]=sum([a[j] for j in range(k)])
for i in range(k+1, n+1):
a[i]=a[i-1]+a[i-1]-a[i-k-1]
print(str(a[n]))
main()
| [
"[email protected]"
] | |
6f30e11fcb2ba4ecc7185185027b62f16a44e4c6 | be5ea20226c37d81f1ccb2f704d8825d36e88765 | /01. Defining classes/Exercise/01_car.py | 00078b2c67447f092dffe365c113ef2ae8c2b86b | [] | no_license | dimDamyanov/PythonOOP | 3845e450e5a48fef4f70a186664e07c0cd60e09b | 723204f5b7e953874fac9314e48eb1d1628d6ff5 | refs/heads/main | 2023-04-07T18:00:36.735248 | 2021-04-19T20:57:14 | 2021-04-19T20:57:14 | 341,329,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | class Car:
def __init__(self, name: str, model: str, engine: str):
self.name = name
self.model = model
self.engine = engine
def get_info(self):
return f'This is {self.name} {self.model} with engine {self.engine}'
car = Car("Kia", "Rio", "1.3L B3 I4")
print(car.get_info())
| [
"[email protected]"
] | |
06ab40e0b88490e56e9bd9ac37b30153986def03 | 09d7c902e35df7eec3d3da192f0aaa47739540a5 | /user_portrait/cron/flow4/zmq_work_weibo_flow4.py | 241ba690235543b12bd7c89723eb1de326fac4c5 | [] | no_license | yuwendong/user_portrait | 2a9604ea5389f9410aae98acad11742454c36d6e | 1b2cd78c91a7154c3e360a90d8426b53b68b4453 | refs/heads/master | 2020-12-24T16:41:06.469723 | 2015-10-15T14:32:26 | 2015-10-15T14:32:26 | 38,371,050 | 1 | 0 | null | 2015-07-01T12:54:02 | 2015-07-01T12:54:01 | null | UTF-8 | Python | false | false | 5,036 | py | # -*- coding=utf-8 -*-
import re
import sys
import zmq
import time
import json
import math
from datetime import datetime
reload(sys)
sys.path.append('../../')
from time_utils import ts2datetime, datetime2ts
from global_utils import R_CLUSTER_FLOW2 as r_cluster
from global_config import ZMQ_VENT_PORT_FLOW4, ZMQ_CTRL_VENT_PORT_FLOW4,\
ZMQ_VENT_HOST_FLOW1, ZMQ_CTRL_HOST_FLOW1
from global_config import SENSITIVE_WORDS_PATH
f = open(SENSITIVE_WORDS_PATH, 'rb')
# test
#f = open('/home/ubuntu8/huxiaoqian/user_portrait/user_portrait/cron/flow2/zz.txt', 'rb')
def load_sensitive_words():
ZZ_WORD = []
for line in f:
line_list = line.split('=')
word = line_list[0]
ZZ_WORD.append(word.decode('utf-8'))
f.close()
return ZZ_WORD
SENSITIVE_WORD = load_sensitive_words()
print 'sensitive_word:', SENSITIVE_WORD
def cal_text_work(item):
uid = item['uid']
timestamp = item['timestamp']
date = ts2datetime(timestamp)
ts = datetime2ts(date)
#print 'ts:', date, ts
text = item['text']
if isinstance(text, str):
text = text.decode('utf-8', 'ignore')
RE = re.compile(u'#([a-zA-Z-_⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]+)#', re.UNICODE)
hashtag_list = RE.findall(text)
if hashtag_list:
# there all use unicode·
hashtag_dict = dict()
for hashtag in hashtag_list:
try:
hashtag_dict[hashtag] += 1
except:
hashtag_dict[hashtag] = 1
try:
hashtag_count_string = r_cluster.hget('hashtag_'+str(ts), str(uid))
#print 'key:hashtag_'+ str(ts)
#print 'hget hashtag result:', hashtag_count_string
hashtag_count_dict = json.loads(hashtag_count_string)
for hashtag in hashtag_dict:
count = hashtag_dict[hashtag]
try:
hashtag_count_dict[hashtag] += count
except:
hashtag_count_dict[hashtag] = count
#print 'hashtag_count_dict:', hashtag_count_dict
r_cluster.hset('hashtag_'+str(ts), str(uid), json.dumps(hashtag_count_dict))
except:
#print 'hash_dict:', hashtag_dict
r_cluster.hset('hashtag_'+str(ts), str(uid), json.dumps(hashtag_dict))
def cal_text_sensitive(item):
text = item['text']
uid = item['uid']
timestamp = item['timestamp']
date = ts2datetime(timestamp)
ts = datetime2ts(date)
if isinstance(text, str):
text = text.decode('utf-8', 'ignore')
sensitive_result = [word for word in SENSITIVE_WORD if word in text]
if sensitive_result:
sensitive_dict = dict()
for word in sensitive_result:
try:
sensitive_dict[word] += 1
except:
sensitive_dict[word] = 1
#print 'sensitive_dict:', sensitive_dict
try:
sensitive_count_string = r_cluster.hget('sensitive_'+str(ts), str(uid))
#print 'key:sensitive_', str(ts)
#print 'hget sensitive result:', sensitive_count_string
sensitive_count_dict = json.loads(sensitive_count_string)
for word in sensitive_dict:
count = sensitive_dict[word]
try:
sensitive_count_dict[word] += count
except:
sensitive_count_dict[word] = count
#print 'sensitive_count_dict:', sensitive_count_dict
r_cluster.hset('sensitive_'+str(ts), str(uid), json.dumps(sensitive_count_dict))
except:
#print 'sensitive:', sensitive_dict
r_cluster.hset('sensitive_'+str(ts), str(uid), json.dumps(sensitive_dict))
if __name__ == "__main__":
"""
receive weibo
"""
context = zmq.Context()
receiver = context.socket(zmq.PULL)
receiver.connect('tcp://%s:%s' %(ZMQ_VENT_HOST_FLOW1, ZMQ_VENT_PORT_FLOW4))
controller = context.socket(zmq.SUB)
controller.connect("tcp://%s:%s" %(ZMQ_VENT_HOST_FLOW1, ZMQ_CTRL_VENT_PORT_FLOW4)
count = 0
tb = time.time()
ts = tb
while 1:
try:
item = receiver.recv_json()
except Exception, e:
print Exception, ":", e
if not item:
continue
if item['sp_type'] == '1':
try:
if item and (item['message_type']==1 or item['message_type']==3):
cal_text_work(item)
cal_text_sensitive(item)
except:
pass
count += 1
if count % 10000 == 0:
te = time.time()
print '[%s] cal speed: %s sec/per %s' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), te - ts, 10000)
#if count % 100000 == 0:
# print '[%s] total cal %s, cost %s sec [avg %s per/sec]' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), count, te - tb, count / (te - tb))
ts = te
| [
"[email protected]"
] | |
324d2ab09e9924efc6e877be13f4cdfe40095ef3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03495/s459851114.py | 65115c84d9ff5de8c900d6c54330919f477407fb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | # arc086_a.py
import collections
N, K = map(int, input().split())
A = list(map(int, input().split()))
C = collections.Counter(A)
C = sorted(C.values())
lenght = len(C)
# print(lenght, K, C)
print(sum(C[:(lenght-K)]))
| [
"[email protected]"
] | |
2df623dd9db20cd2aa0eb30f1334f2ffc828c00c | a47e5a1565b8b4a23010020fa6ed4225459845e7 | /marubatsu1.py | 3085d8d5bc8fb46f0a2622392e8575a84bd52493 | [] | no_license | katuhito/workspace13 | 0a4832e510c5f59a48aa0262985bcda91c01908d | 16e55c5e6f32e009ff27c9a22315916175303970 | refs/heads/master | 2023-03-08T08:48:28.602697 | 2021-03-07T01:07:41 | 2021-03-07T01:07:41 | 335,497,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | # 3目並べ
import random
goal = [
0b111000000, 0b000111000, 0b000000111,
0b100100100, 0b010010010, 0b001001001,
0b100010001, 0b001010100
]
# 3つ並んだか判定
def check(player):
for mask in goal:
if player & mask == mask:
return True
return False
# 交互に置く
def play(p1, p2):
if check(p2): #3つ並んだら出力して終了
print([bin(p1), bin(p2)])
return
board = p1 | p2
if board == 0b111111111: #すべて置いたら引き分けで終了
print([bin(p1), bin(p2)])
return
# 置ける場所を探す
w = [i for i in range(9) if(board & (1 << i)) == 0]
# ランダムに置いてみる
r = random.choice(w)
play(p2, p1 | (1 << r)) #手番を入れ替えて次を探す
play(0, 0)
| [
"[email protected]"
] | |
a29f6993d7d12c4674aecc24f39f9aa52dc357fc | 8b85b933041abac2879484c6280c1bf79f91358d | /moss/plotting.py | b2b3d28e4123da388554e57a623c5361d41b42e4 | [
"BSD-3-Clause"
] | permissive | ghaseminya/moss | 109a4c37d2ecd85bfaff03c563eded98d5296530 | 06a0ea862b9a6112921dcf5cf2a6d445d7f7e0dc | refs/heads/master | 2020-12-30T14:44:33.504941 | 2017-05-09T17:24:28 | 2017-05-09T17:24:28 | 91,079,527 | 1 | 0 | null | 2017-05-12T10:20:17 | 2017-05-12T10:20:17 | null | UTF-8 | Python | false | false | 338 | py | import matplotlib.pyplot as plt
def grid_axes_labels(axes, xlabel=None, ylabel=None, **kws):
plt.setp(axes.flat, xlabel="", ylabel="")
if xlabel is not None:
for ax in axes[-1]:
ax.set_xlabel(xlabel, **kws)
if ylabel is not None:
for ax in axes[:, 0]:
ax.set_ylabel(ylabel, **kws)
| [
"[email protected]"
] | |
83a1f372106c9220a41955a816d98ee436bd4081 | 7e118b7f02275e7d1faf1e24e6bb1f6d1e173da5 | /04_factory/pizza_store/lib/pizza.py | 4bd3d2bd59b7f2f37614cabbd5b5c69869ede551 | [
"MIT"
] | permissive | denzow/practice-design-pattern | e378a7d2d95585ab6e3c3a4b72f46c5faeb2b92b | 141d59c51375e36769a73b6ff135a8afae64b664 | refs/heads/master | 2021-05-13T17:33:10.892543 | 2018-02-11T14:53:06 | 2018-02-11T14:53:06 | 116,826,619 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | # coding: utf-8
from abc import ABC, abstractmethod
class Pizza(ABC):
def __init__(self, ingredient_factory):
"""
:param lib.pizza_factory.PizzaIngredientFactory ingredient_factory:
"""
self._ingredient_factory = ingredient_factory
self._name = None
self._dough = None
self._sauce = None
self._veggies = []
self._cheese = None
self._pepperoni = None
self._clam = None
@abstractmethod
def prepare(self):
pass
def bake(self):
print('350度で25分間焼く')
def cut(self):
print('ピザを扇型に切り分ける')
def box(self):
print('PizzaStoreの正式な箱にピザを入れる')
def set_name(self, name):
self._name = name
def get_name(self):
return self._name
class CheesePizza(Pizza):
def prepare(self):
print('{}を下処理'.format(self._name))
self._dough = self._ingredient_factory.create_dough()
self._sauce = self._ingredient_factory.create_sauce()
self._cheese = self._ingredient_factory.create_cheese()
class ClamPizza(Pizza):
def prepare(self):
print('{}を下処理'.format(self._name))
self._dough = self._ingredient_factory.create_dough()
self._sauce = self._ingredient_factory.create_sauce()
self._cheese = self._ingredient_factory.create_cheese()
self._clam = self._ingredient_factory.create_clam()
| [
"[email protected]"
] | |
9ec342d11277ab3c3daa4867c1bb011eb93f1655 | fc529d1d801d695150a6ebcd3e2d548ffa8d738d | /tests/test_parser.py | 86ba3dbec713ac04cd077e2304299e0d3fd5199a | [] | permissive | JeanExtreme002/Virtual-Assistant | 1db080cf3026a64918c0ebadd1727c29bb46205e | 1444af964b21c6d043b1b8ccb23f34999c5fd81a | refs/heads/master | 2021-07-25T05:54:58.886570 | 2021-07-09T05:35:31 | 2021-07-09T05:35:31 | 219,252,394 | 7 | 2 | BSD-3-Clause | 2020-09-22T16:01:20 | 2019-11-03T04:41:47 | Python | UTF-8 | Python | false | false | 1,510 | py | import os, sys, util
sys.path.append(os.getcwd())
from src.assistant.exec.commandList import CommandList
from src.assistant.exec.parser import VoiceCommandParser
command_list = CommandList("EN-US")
parser = VoiceCommandParser(command_list)
def test_parse_system_command():
command_instance = parser.parse("repeat hello world")
target_command = command_list.get_command_instance_by_voice_command("repeat")
assert command_instance.system_command.lower() == "repeat"
assert command_instance.args.lower() == "hello world"
assert command_instance.info == target_command.info
def test_parse_user_command():
voice_command, terminal_command = util.generate_random_user_command()
command_list.set_user_command(voice_command, {"terminal_command": terminal_command})
other_voice_command = voice_command + " and something more"
other_terminal_command = terminal_command + " and something more"
command_list.set_user_command(other_voice_command, {"terminal_command": other_terminal_command})
argument1, argument2 = "a docile dog eating meet", "a cute cat sleeping"
command_instance = parser.parse(voice_command + " " + argument1)
other_command_instance = parser.parse(other_voice_command + " " + argument2)
assert command_instance.terminal_command == terminal_command
assert command_instance.args.lower() == argument1
assert other_command_instance.terminal_command == other_terminal_command
assert other_command_instance.args.lower() == argument2
| [
"[email protected]"
] | |
7f418a52a6716871d15b6b8f3ba89cbaa3cd4da8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03138/s702162180.py | 23360336ae7fa25d7e78a6efe78b883d1ec939af | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | import os, sys, re, math
N,K = list(map(int,input().split(' ')))
A = list(map(int,input().split(' ')))
d = math.ceil(math.log2(1e+12))
ones = [0 for _ in range(d)]
for a in A:
s = bin(a)[::-1]
for i in range(len(s)-2):
if s[i] == '1':
ones[i] += 1
X = 0
for di in range(d-1,-1,-1):
if X + 2 ** di <= K and ones[di] <= N * 0.5:
X += 2 ** di
ret = 0
for a in A:
ret += X ^ a
print(ret)
| [
"[email protected]"
] | |
efdd83f89865011b36433b262e2cd99fe684ad1c | 6c10c6e229014dc3bf14efaec2ea8bf07c406752 | /AILearning/OptimationDeepLearning/MinibatchSGD.py | e41b377b4f3ba2829d2a1b4188d25f78bd57522d | [] | no_license | GuyRobot/AIPythonExamples | e59c6edb355d9cadee2b3f19a087b1b656956262 | 4acdd0d4966e31a616910554bc075b641aa152df | refs/heads/master | 2021-05-21T13:05:49.615593 | 2021-02-28T06:41:04 | 2021-02-28T06:41:04 | 252,662,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,465 | py | from d2l import AllDeepLearning as d2l
from mxnet import autograd, gluon, init, nd
from mxnet.gluon import nn
import numpy as np
"""
Mini batches
w←w−η_t*g_t
where gt=∂_wf(x_t,w)
We can increase the computational efficiency of this operation by
applying it to a minibatch of observations at a time. That is, we
replace the gradient gt over a single observation by one over a small batch
gt= 1/|B_t| * ∂w∑i∈B_t(f(xi,w))
"""
def get_data_ch11(batch_size=10, n=1500):
data = np.genfromtxt("E:\\Python_Data\\airfoil_self_noise.dat",
dtype=np.float32)
data = (data - data.mean(axis=0)) / data.std(axis=0)
data_iter = d2l.load_array((data[:n, :-1], data[:n, -1]), batch_size, is_train=True)
return data_iter, data.shape[1] - 1
def sgd(params, states, hyper_params):
for p in params:
p[:] -= hyper_params['lr'] * p.grad
def train_ch11(trainer_fn, state, hyper_params, data_iter, feature_dim, num_epochs=2):
w = nd.random.normal(scale=0.01, shape=(feature_dim, 1))
b = nd.zeros(1)
w.attach_grad()
b.attach_grad()
net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss
animator = d2l.Animator(xlabel='epoch', ylabel='loss', xlim=[0, num_epochs],
ylim=[0.22, 0.32])
n, timer = 0, d2l.Timer()
for _ in range(num_epochs):
for X, y in data_iter:
with autograd.record():
l = loss(net(X), y).mean()
l.backward()
trainer_fn([w, b], state, hyper_params)
n += X.shape[0]
if n % 200 == 0:
timer.stop()
animator.add(n / X.shape[0] / len(data_iter),
d2l.evaluate_loss(net, data_iter, loss))
timer.start()
print('loss: %.3f, %.3f sec/epoch' % (animator.Y[0][-1], timer.avg()))
return timer.cumsum(), animator.Y[0]
def train_sgd(lr, batch_size, num_epochs=2):
data_iter, feature_dim = get_data_ch11(batch_size)
return train_ch11(sgd, None, {'lr': lr}, data_iter, feature_dim, num_epochs)
gd_res = train_sgd(1, 1500, 10)
sgd_res = train_sgd(0.005, 1)
mini1_res = train_sgd(.4, 100)
mini2_res = train_sgd(.05, 10)
d2l.set_figsize([6, 3])
d2l.plot(*list(map(list, zip(gd_res, sgd_res, mini1_res, mini2_res))),
'time (sec)', 'loss', xlim=[1e-2, 10],
legend=['gd', 'sgd', 'batch size = 100', 'batch size = 10'])
d2l.plt.gca().set_xscale('log')
d2l.plt.show()
def train_gluon_ch11(tr_name, hyper_params, data_iter, num_epochs=2):
net = nn.Sequential()
net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=.01))
trainer = gluon.Trainer(net.collect_params(), tr_name, hyper_params)
loss = gluon.loss.L2Loss()
animator = d2l.Animator(xlabel='epoch', ylabel='loss', xlim=[0, num_epochs],
ylim=[0.22, 0.35])
n, timer = 0, d2l.Timer()
for _ in range(num_epochs):
for X, y in data_iter:
with autograd.record():
l = loss(net(X), y)
l.backward()
trainer.step(X.shape[0])
n += X.shape[0]
if n % 200 == 0:
timer.stop()
animator.add(n / X.shape[0]/len(data_iter),
(d2l.evaluate_loss(net, data_iter, loss)))
timer.start()
print('loss: %.3f, %.3f sec/epoch' % (animator.Y[0][-1], timer.avg()))
data_iter, _ = get_data_ch11(10)
train_gluon_ch11('sgd', {'learning_rate': 0.05}, data_iter)
d2l.plt.show()
"""
Vectorization makes code more efficient due to reduced overhead
arising from the deep learning framework and due to better memory
locality and caching on CPUs and GPUs.
There is a trade-off between statistical efficiency arising
from SGD and computational efficiency arising from processing large
batches of data at a time.
Minibatch stochastic gradient descent offers the best of
both worlds: computational and statistical efficiency.
In minibatch SGD we process batches of data obtained by a random
permutation of the training data (i.e., each observation is processed only
once per epoch, albeit in random order).
It is advisable to decay the learning rates during training.
In general, minibatch SGD is faster than SGD and gradient descent
for convergence to a smaller risk, when measured in terms of clock time.
""" | [
"[email protected]"
] | |
149b691ee439a8d0a0d99a8d0e4ac6254fc50945 | 2cc6cf6e9d91799cbd9ac02f2771f7c9f95776bd | /test13.py | ce7a89c8373a7740d97c943989fa75b86726a611 | [] | no_license | Master-sum/python_project | f938aa9f27e040c68c11766e1358dd7fff231b22 | 63036d1d80cd645d8080c8fee5bb30f241ab9914 | refs/heads/master | 2022-04-22T04:45:21.263195 | 2020-04-11T11:14:54 | 2020-04-11T11:14:54 | 254,782,126 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | def change (aint,alst):
aint = 0
alst[0] = 0
alst.append(4)
print('aint:',aint)
print('alst:',alst)
aint = 3
alst = [1,2,3]
print('1',aint)
print('1',alst)
change(aint,alst)
print('3',aint)
print('3',alst) | [
"[email protected]"
] | |
aeb8b2fe9095164e8fa12451d0d10e1a784add85 | 651a296c8f45b5799781fd78a6b5329effe702a0 | /subset/comp_rank_grlex.py | 0d203ef0c529c7bf4b41b524004553b9408e4928 | [] | no_license | pdhhiep/Computation_using_Python | 095d14370fe1a01a192d7e44fcc81a52655f652b | 407ed29fddc267950e9860b8bbd1e038f0387c97 | refs/heads/master | 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,691 | py | #!/usr/bin/env python
def comp_rank_grlex ( kc, xc ):
#*****************************************************************************80
#
## COMP_RANK_GRLEX computes the graded lexicographic rank of a composition.
#
# Discussion:
#
# The graded lexicographic ordering is used, over all KC-compositions
# for NC = 0, 1, 2, ...
#
# For example, if KC = 3, the ranking begins:
#
# Rank Sum 1 2 3
# ---- --- -- -- --
# 1 0 0 0 0
#
# 2 1 0 0 1
# 3 1 0 1 0
# 4 1 1 0 1
#
# 5 2 0 0 2
# 6 2 0 1 1
# 7 2 0 2 0
# 8 2 1 0 1
# 9 2 1 1 0
# 10 2 2 0 0
#
# 11 3 0 0 3
# 12 3 0 1 2
# 13 3 0 2 1
# 14 3 0 3 0
# 15 3 1 0 2
# 16 3 1 1 1
# 17 3 1 2 0
# 18 3 2 0 1
# 19 3 2 1 0
# 20 3 3 0 0
#
# 21 4 0 0 4
# .. .. .. .. ..
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 30 October 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, int KC, the number of parts in the composition.
# 1 <= KC.
#
# Input, int XC[KC], the composition.
# For each 1 <= I <= KC, we have 0 <= XC(I).
#
# Output, int RANK, the rank of the composition.
#
from i4_choose import i4_choose
from i4vec_sum import i4vec_sum
from sys import exit
import numpy as np
#
# Ensure that 1 <= KC.
#
if ( kc < 1 ):
print '';
print 'COMP_RANK_GRLEX - Fatal error!'
print ' KC < 1'
exit ( 'COMP_RANK_GRLEX - Fatal error!' )
#
# Ensure that 0 <= XC(I).
#
for i in range ( 0, kc ):
if ( xc[i] < 0 ):
print ''
print 'COMP_RANK_GRLEX - Fatal error!'
print ' XC[I] < 0'
exit ( 'COMP_RANK_GRLEX - Fatal error!' );
#
# NC = sum ( XC )
#
nc = i4vec_sum ( kc, xc )
#
# Convert to KSUBSET format.
#
ns = nc + kc - 1
ks = kc - 1
xs = np.zeros ( ks, dtype = np.int32 )
xs[0] = xc[0] + 1
for i in range ( 2, kc ):
xs[i-1] = xs[i-2] + xc[i-1] + 1
#
# Compute the rank.
#
rank = 1;
for i in range ( 1, ks + 1 ):
if ( i == 1 ):
tim1 = 0
else:
tim1 = xs[i-2];
if ( tim1 + 1 <= xs[i-1] - 1 ):
for j in range ( tim1 + 1, xs[i-1] ):
rank = rank + i4_choose ( ns - j, ks - i )
for n in range ( 0, nc ):
rank = rank + i4_choose ( n + kc - 1, n )
return rank
def comp_rank_grlex_test ( ):
#*****************************************************************************80
#
## COMP_RANK_GRLEX_TEST tests COMP_RANK_GRLEX.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 30 October 2014
#
# Author:
#
# John Burkardt
#
from comp_random_grlex import comp_random_grlex
print ''
print 'COMP_RANK_GRLEX_TEST'
print ' A COMP is a composition of an integer N into K parts.'
print ' Each part is nonnegative. The order matters.'
print ' COMP_RANK_GRLEX determines the rank of a COMP'
print ' from its parts.'
print ''
print ' Actual Inferred'
print ' Test Rank Rank'
print ''
kc = 3
rank1 = 20
rank2 = 60
seed = 123456789
for test in range ( 0, 5 ):
xc, rank3, seed = comp_random_grlex ( kc, rank1, rank2, seed )
rank4 = comp_rank_grlex ( kc, xc )
print ' %4d %6d %8d' % ( test, rank3, rank4 )
#
# Terminate.
#
print ''
print 'COMP_RANK_GRLEX_TEST:'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
comp_rank_grlex_test ( )
timestamp ( )
| [
"[email protected]"
] | |
917f1e4058d8ddec0274a5dd448fc901123da679 | e1f53cb481f2b6ea2ac3ee53d0251c5d9ea782e0 | /src/pyaid/xml/XMLConfigParser.py | 8b3e6cb5d6bcf0235329fc8e3052b949a29c32ed | [] | no_license | hannahp/PyAid | 8771ee35c2fdf9503e68e808dc0028e885e68158 | b9562a954552334fab16c32a6b8285ea3e1571e0 | refs/heads/master | 2021-01-22T14:02:26.068735 | 2014-01-29T23:33:33 | 2014-01-29T23:33:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,164 | py | # XMLConfigParser.py
# (C)2011
# Scott Ernst
import re
import xml.dom.minidom as minidom
import codecs
from pyaid.xml.ConfigData import ConfigData
#___________________________________________________________________________________________________ XMLConfigParser
class XMLConfigParser(object):
"""XMLConfigParser."""
#===================================================================================================
# C L A S S
TYPE_ID = 'xml'
#===================================================================================================
# P U B L I C
#___________________________________________________________________________________________________ parseFile
@staticmethod
def parseFile(path, target=None, parseToInterchangeFormat =False):
fh = codecs.open(path, 'r', 'utf-8')
xml = fh.read()
fh.close()
return XMLConfigParser.parse(xml, target, parseToInterchangeFormat)
#___________________________________________________________________________________________________ parse
@staticmethod
def parse(xml, target=None, parseToInterchangeFormat =False):
# Removes whitespace between tags to reduce potential parsing issues.
pattern = re.compile('\<\?xml(.*)\?\>')
if pattern.search(xml) is None:
xml = '<?xml version="1.0" encoding="utf-8"?>' + xml
dom = minidom.parseString(re.sub('>[\n\r\s\t]+<','><',xml))
if target is None:
target = {}
cd = ConfigData()
for node in dom.childNodes[0].childNodes:
# Ignore whitespace generated text nodes
if isinstance(node, (minidom.Comment, minidom.Text)):
continue
XMLConfigParser._parseNode(node, cd)
if parseToInterchangeFormat:
cd.writeToInterchangeDict(target)
else:
cd.writeToDict(target)
return target
#___________________________________________________________________________________________________ serializeToFile
@staticmethod
def serializeToFile(targetFile, interchangeData):
xml = XMLConfigParser.serializeToXML(interchangeData)
fh = codecs.open(targetFile, 'wb', 'utf-8')
fh.write(xml)
fh.close()
#___________________________________________________________________________________________________ serialize
@staticmethod
def serialize(interchangeData):
xml = '<vm>\n'
for n,v in interchangeData.iteritems():
xml += XMLConfigParser._writeNode(n, v)
return (xml + '</vm>').decode('unicode_escape')
#===================================================================================================
# P R I V A T E
#___________________________________________________________________________________________________ _writeNode
@staticmethod
def _writeNode(name, data, depth =1):
indent = (' '*4*depth)
target = indent + '<'
if isinstance(data, list):
d = '|'.join(data[1]) if isinstance(data[1], list) else str(data)
target += data[0] + ' n="' + name + '" v="' + d + '" />\n'
elif isinstance(data, dict):
target += 'o n="' + name + '">\n'
for n,v in data.iteritems():
target += XMLConfigParser._writeNode(n, v, depth+1)
target += indent + '</o>'
elif isinstance(data, str):
target += 's' + 'n="' + name + '" v="' + data + '" />\n'
elif isinstance(data, (int, float)):
target += 'n' + 'n="' + name + '" v="' + str(data) + '" />\n'
else:
target += 'unknown n="' + name + '" />'
return target
#___________________________________________________________________________________________________ _parseNode
@staticmethod
def _parseNode(node, configData):
nodeName = node.getAttribute('n')
nodeType = node.tagName
if nodeType != 'o':
XMLConfigParser._parseAttribute(nodeName, nodeType, node.getAttribute('v'), configData)
return
cd = ConfigData()
for k in node.attributes.keys():
if k != 'n':
aValue = node.getAttribute(k)
aType = 's'
if aValue.find(':') != -1:
aValue = node.getAttribute(k).split(':')
aType = str(aValue[0])
aValue = aValue[-1]
XMLConfigParser._parseAttribute(k, aType, aValue, cd)
for child in node.childNodes:
XMLConfigParser._parseNode(child, cd)
configData.setItem(nodeName, 'o', cd)
#___________________________________________________________________________________________________ _parseAttribute
@staticmethod
def _parseAttribute(attrName, attrType, attrValue, configData):
configData.setItem(attrName, attrType, attrValue)
| [
"[email protected]"
] | |
8c0a4f5b85c510d63ce5695baafd7aac77604f94 | 3d6bb3df9ca1d0de6f749b927531de0790aa2e1d | /compare_SV_groups_to_trees.py | 5927663662c3f71b3728d89b37c671512184bf25 | [] | no_license | standardgalactic/kuhner-python | da1d66a6d638a9a379ba6bae2affdf151f8c27c5 | 30b73554cc8bc9d532c8108b34dd1a056596fec7 | refs/heads/master | 2023-07-07T04:18:30.634268 | 2020-04-06T04:37:48 | 2020-04-06T04:37:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,128 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 12:43:41 2018
@author: Lucian
"""
from __future__ import division
from os import walk
from os import path
from os import readlink
from os import mkdir
from os.path import isfile
from copy import deepcopy
import numpy
import math
import matplotlib.pyplot as plt
import csv
import ete3
#import lucianSNPLibrary as lsl
groupdir = "SNV_groups/"
treedir = "phylip_TS_analysis/"
SVfile = "SV_events.txt"
patientfile = "patient_analysis_SVs.tsv"
allg_outfile = "all_groups_SVs.tsv"
#outdir = "SNV_SV_tree_compare" + tag + "/"
#if not path.isdir(outdir):
# mkdir(outdir)
groupfiles = []
for __, _, files in walk(groupdir):
groupfiles += files
def callGroup(group, allsamples, tree):
if len(group) == 1:
return "Singleton"
if len(group) == len(allsamples):
return "Root"
trueset = set()
falseset = set()
for branch in tree:
if branch.name != "":
for sample in group:
if sample in branch.name:
trueset.add(branch)
for sample in allsamples:
if sample in group:
continue
if sample in branch.name:
falseset.add(branch)
if "blood" in branch.name:
tree.set_outgroup(branch)
trueroot = tree.get_common_ancestor(trueset)
for fbranch in falseset:
testset = trueset.copy()
testset.add(fbranch)
newroot = tree.get_common_ancestor(testset)
if newroot == trueroot:
return "Ungrouped"
return "Grouped"
# Read in the SNV numbers
groupdata = {}
allsamples= {}
for gfile in groupfiles:
if "all" in gfile or "patient" in gfile or "SV_" in gfile:
continue
patient = gfile.split("_")[0]
groupdata[patient] = {}
allsamples[patient] = set()
for line in open(groupdir + gfile, "r"):
if "Patient" in line:
continue
lvec = line.rstrip().split()
assert(patient == lvec[0])
count = int(lvec[1])
perc = float(lvec[2])
samples = tuple(lvec[3:])
groupdata[patient][samples] = {}
groupdata[patient][samples]["count"] = count
groupdata[patient][samples]["percentage"] = perc
groupdata[patient][samples]["SV_count"] = 0
for sample in samples:
allsamples[patient].add(sample)
#Read in the SV numbes
SVs = {}
samplelists = {}
for line in open(SVfile, "r"):
if "chr" in line:
continue
lvec = line.rstrip().split()
(__, __, patient, sample, type, ch1, start1, end1, __, ch2, start2, end2, __, __, __, __) = lvec
svid = (type, ch1, start1, end1, ch2, start2, end2)
if patient not in SVs:
SVs[patient] = {}
if svid not in SVs[patient]:
SVs[patient][svid] = set()
SVs[patient][svid].add(sample)
#Count the SVs by sample list
nmulti = 0
nmulti_singletons = 0
nmulti_multis = 0
nsingle = 0
SVcounts = {}
for patient in SVs:
SVtotal = 0
for segid in SVs[patient]:
samples = list(SVs[patient][segid])
samples.sort()
samples = tuple(samples)
if samples not in groupdata[patient]:
groupdata[patient][samples] = {}
groupdata[patient][samples]["count"] = 0
groupdata[patient][samples]["percentage"] = 0.0
groupdata[patient][samples]["SV_count"] = 0
groupdata[patient][samples]["SV_count"] += 1
SVtotal += 1
nsingle += 1
for samples in groupdata[patient]:
groupdata[patient][samples]["SV_percentage"] = groupdata[patient][samples]["SV_count"]/SVtotal
print("Number of segments with a single call:", str(nsingle))
print("Number of segments with multiple calls:", str(nmulti))
print("Number of segments with multiple calls, all singletons:", str(nmulti_singletons))
print("Number of segments with multiple calls, all multiples:", str(nmulti_multis))
#Now put the tree data in there, too:
for patient in groupdata:
treefilename = treedir + patient + "_outtree.txt"
if patient == "891":
treefilename = treedir + patient + "a_outtree.txt"
tree = ete3.Tree(treefilename)
for samples in groupdata[patient]:
groupdata[patient][samples]["matches_tree"] = callGroup(samples, allsamples[patient], tree)
#And finally, write out all of our information.
outfile = open(groupdir + allg_outfile, "w")
outfile.write("Patient\tMatches_tree\tCount\tPercentage\tSV count\tSV percentage\tSample1\tSample2\tSample3\tSample4\tSample5\tSample6\n")
for patient in groupdata:
for samples in groupdata[patient]:
outfile.write(patient)
outfile.write("\t" + groupdata[patient][samples]["matches_tree"])
outfile.write("\t" + str(groupdata[patient][samples]["count"]))
outfile.write("\t" + str(groupdata[patient][samples]["percentage"]))
outfile.write("\t" + str(groupdata[patient][samples]["SV_count"]))
outfile.write("\t" + str(groupdata[patient][samples]["SV_percentage"]))
for sample in samples:
outfile.write("\t" + sample)
outfile.write("\n")
outfile.close()
#Now do some analysis
has23GD = ["74", "279", "303", "391", "396", "450", "772", "997"]
types = ["Singleton", "Root", "Grouped", "Ungrouped"]
outfile = open(groupdir + patientfile, "w")
outfile.write("Patient\tnSNVmin\tnSNVmax\thas 2-3 GD")
for type in types:
outfile.write("\t" + type + " counts")
outfile.write("\t" + type + " total")
outfile.write("\tUngrouped potential subclone counts\tUngrouped potential scomubclone total\n")
for patient in groupdata:
smallestSNVcount = 100000
maxSNVcount = 0
for samples in groupdata[patient]:
SNVcount = groupdata[patient][samples]["count"]
if groupdata[patient][samples]["matches_tree"] == "Grouped":
if SNVcount < smallestSNVcount:
smallestSNVcount = SNVcount
if SNVcount > maxSNVcount:
maxSNVcount = SNVcount
possibleSubcloneThreshold = smallestSNVcount*3/4
SVcounts = {}
for match in types:
SVcounts[match] = []
SVcounts["subclones"] = []
for samples in groupdata[patient]:
theseSamples = groupdata[patient][samples]
SVcount = theseSamples["SV_count"]
if SVcount ==0:
continue
if theseSamples["matches_tree"] == "Ungrouped" and theseSamples["count"] >= possibleSubcloneThreshold:
SVcounts["subclones"].append(SVcount)
else:
SVcounts[theseSamples["matches_tree"]].append(SVcount)
outfile.write(patient)
outfile.write("\t" + str(possibleSubcloneThreshold))
outfile.write("\t" + str(maxSNVcount))
outfile.write("\t" + str(patient in has23GD))
for type in types:
outfile.write("\t")
for num in SVcounts[type]:
outfile.write("//" + str(num))
outfile.write("\t" + str(sum(SVcounts[type])))
outfile.write("\t")
for num in SVcounts["subclones"]:
outfile.write("//" + str(num))
outfile.write("\t" + str(sum(SVcounts["subclones"])))
outfile.write("\n")
outfile.close()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.