blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea9cae42173bae1b6fd88abe2e029323cb284b9b | fe0017ae33385d7a2857d0aa39fa8861b40c8a88 | /env/lib/python3.8/site-packages/sklearn/manifold/spectral_embedding_.py | efa8372ddb519ebbe9f08ab3616ad83b8ebd6fad | [] | no_license | enriquemoncerrat/frasesback | eec60cc7f078f9d24d155713ca8aa86f401c61bf | e2c77f839c77f54e08a2f0930880cf423e66165b | refs/heads/main | 2023-01-03T23:21:05.968846 | 2020-10-18T21:20:27 | 2020-10-18T21:20:27 | 305,198,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py |
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
# mypy error: Module X has no attribute y (typically for C extensions)
from . import _spectral_embedding # type: ignore
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.manifold.spectral_embedding_'
correct_import_path = 'sklearn.manifold'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_spectral_embedding, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
| [
"[email protected]"
] | |
22148b082fd05a86e771c13e027a987707f444a9 | 8f6cc0e8bd15067f1d9161a4b178383e62377bc7 | /__PPPPLLLLTTTT__LLLLOOOOGGGG/workspace/a3c/PPPPPPLLLLLLLOOOOOOTTTTTTT/trainer-family/plot_a3c_log3.py | 813665c694efb63fdab1081416eb186ed9292934 | [] | no_license | humorbeing/python_github | 9c4dfc61a3cefbb266fefff335f6b28d05797e5e | e4b4b49bee7e7e3843c6874717779ce8d619bd02 | refs/heads/master | 2023-01-22T21:51:20.193131 | 2020-01-26T21:47:23 | 2020-01-26T21:47:23 | 163,707,778 | 0 | 0 | null | 2022-12-27T15:37:48 | 2019-01-01T01:58:18 | Python | UTF-8 | Python | false | false | 3,330 | py | import numpy as np
import matplotlib.pyplot as plt
def xy(name, num=None):
with open(name) as f:
lines = [line for line in f]
log = []
count = 0
step_stack = []
reward_stack = []
for line in lines:
count += 1
# if count % 500 == 0:
reads = line.split(',')
reads = [cleanse.strip() for cleanse in reads]
step_line = reads[1]
reward_line = reads[3]
# print(step_line)
# print(reward_line)
step_line = step_line.split(' ')
step_num = int(step_line[2])
# print(step_num)
# print(step_num+1)
reward_line = reward_line.split(' ')
# print(reward_line)
reward_num = float(reward_line[2])
# print(reward_num)
# print(reward_num+0.2)
# step_stack.append(step_num)
# reward_stack.append(reward_num)
log.append([step_num, reward_num])
# print('num raw data', count)
log = np.array(log)
# print(log.shape)
log = log[log[:, 0].argsort()]
# if count > 5000:
# break
# print(log)
logs = []
step_stack = []
reward_stack = []
if num is None:
num = 50
for count in range(len(log)):
# print(log[count])
step_stack.append(log[count][0])
reward_stack.append(log[count][1])
if count % num == 0:
s = np.mean(step_stack)
r = np.mean(reward_stack)
logs.append([s, r])
step_stack = []
reward_stack = []
log = np.array(logs)
# print(log.shape)
# print(log)
# log.sort(axis=0)
# print(log.shape)
# print(log.shape)
# print(log)
t_log = np.transpose(log)
# print(t_log.shape)
# print(t_log)
x = t_log[0]
y = t_log[1]
return x, y
def plot_this(file_name, plot_name, color=None, num=None):
x, y = xy(file_name, num=num)
ax.plot(x, y, label=plot_name, color=color)
# def plot_these(file_names, plot_name, color=None, num=None):
# xs =
# ys =
# plt.plot(x, y)
# plt.scatter(t_log[0], t_log[1])
fig, ax = plt.subplots()
# plot_this('test_log.txt', 'A3C')
# plot_this('a3c-1.txt', 'A3C')
# plot_this('a3c-200.txt', 'A3C')
# plot_this('a3c-500.txt', 'A3C')
plot_this('dmb-all.txt', 'DMB(Our)', 'r', 100)
plot_this('a3c-all.txt', 'A3C(Baseline)', 'g', 60)
# plot_this('dmb-freeze-all.txt', 'DMB(our), Freeze weight', 'r', num=60)
# plot_this('a3c-1-fre.txt', 'A3C, Freeze Weight')
# plot_this('a3c-all2.txt', 'A3C2-all')
plot_this('en-1.txt', 'Autoencoder', num=40)
# plot_this('en-fre.txt', 'AutoEncoder, Freeze weight')
# plot_this('g1-1.txt', '1')
# plot_this('g1-2-fre.txt', 'g2')
# plot_this('g1-200.txt', '2')
# plot_this('g1-500.txt', '3')
# plot_this('g1-1000.txt', '4')
# plot_this('g2-1.txt', '5')
# plot_this('g2-1000.txt', '6')
# plot_this('soso-1.txt', '7')
# plot_this('soso-1-fre.txt', '1000A3C')
# plot_this('soso-200.txt', '8')
# plot_this('soso-500.txt', '9')
# plot_this('mb-1.txt', 'Encoder')
# plot_this('mb-1-fre.txt', 'Model-Based, Freeze weight', 'y')
plot_this('mb-1000.txt', 'Model-based', num=60)
ax.grid(True)
ax.legend(loc='upper left')
ax.set_title('Pong-ram-v0')
ax.set_xlabel('Frame')
ax.set_ylabel('Episodic Reward')
ax.set_xlim(left=0, right=5000000*6)
ax.set_ylim(bottom=-22, top=-4)
plt.show() | [
"[email protected]"
] | |
29a98f623108212df0f2f2577f6c897f848ea3db | 5ccfa68d2e26facc7dd51a65bdb80d3372903365 | /adventofcode.com/utils/color.py | 62053fdefbb2c92149bccb26a3ed5c8c19c363b9 | [] | no_license | ceasaro/ceasaro_py | 71b93165a48dce48b027c2a3c727f6fdeaf62f0f | a3e2c02140e2abc165cc522756a9bce1f422a9e2 | refs/heads/master | 2023-06-25T20:17:33.826726 | 2023-06-12T11:38:07 | 2023-06-12T11:38:07 | 12,404,093 | 0 | 0 | null | 2023-05-22T22:36:36 | 2013-08-27T11:18:46 | Python | UTF-8 | Python | false | false | 631 | py | BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
LIGHT_GRAY = 37
GRAY = 90
LIGHT_RED = 91
LIGHT_GREEN = 92
LIGHT_YELLOW = 93
LIGHT_BLUE = 94
LIGHT_MAGENTA = 95
LIGHT_CYAN = 96
WHITE = 97
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def color_str(color, msg):
return f"\033[{color}m{msg}{ENDC}"
def background_color_str(color, msg):
return f"\033[{color+10}m{msg}{ENDC}"
def red(msg):
return color_str(RED, msg)
def light_red(msg):
return color_str(LIGHT_RED, msg)
def yellow(msg):
return color_str(YELLOW, msg)
def green(msg):
return color_str(GREEN, msg)
| [
"[email protected]"
] | |
52cc00f46994e1856562d91ec95b2bb010b70b6d | d0cb58e1658d4b5b88bdc07e497dc8092707ae02 | /2020/08August/24PandasDataFrame13.py | 0e993fe386a578073837a0851e831148ec09f103 | [] | no_license | June-fu/python365 | 27f9b753d38ade549d59aa8f2d8bda0fb8b1e20c | 242033a4b644a7566fbfa4dba9b60f60aa31fe91 | refs/heads/master | 2021-07-02T21:42:28.454091 | 2021-05-04T15:08:44 | 2021-05-04T15:08:44 | 233,629,713 | 0 | 0 | null | 2020-01-13T15:52:58 | 2020-01-13T15:36:53 | null | UTF-8 | Python | false | false | 734 | py | #!/usr/bin/python
'''
# @ Author: june-fu
# @ Create Time: 2020-12-20 21:49:51
# @ Modified by: june-fu
# @ Modified time: 2020-12-20 22:09:47
# @ Description:
calculate the sum of the examination attempts by the students.
'''
import pandas as pd
import numpy as np
dct1 = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],
'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],
'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
df = pd.DataFrame(dct1, index=labels)
print(df)
print(df['attempts'].sum()) | [
"[email protected]"
] | |
80ca915f03a9b34dea6395041cf2e15786e0e031 | 91ff6fdf7b2ccc58869d6ad41842f230644952c1 | /ultratech_core/migrations/0001_initial.py | 4860a82e6b68f5421ad191cac9812d4f5cdc2779 | [] | no_license | KONASANI-0143/Dev | dd4564f54117f54ccfa003d1fcec4220e6cbe1f9 | 23d31fbeddcd303a7dc90ac9cfbe2c762d61c61e | refs/heads/master | 2023-08-14T15:59:59.012414 | 2021-10-13T14:54:49 | 2021-10-13T15:10:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | # Generated by Django 3.0.6 on 2020-05-21 06:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Preferences',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('key', models.CharField(max_length=50)),
('value', models.CharField(max_length=10000)),
('description', models.TextField()),
],
options={
'verbose_name': 'Ultra Tech Preference',
'verbose_name_plural': 'Ultra Tech Preferences',
'db_table': 'ultra_tech_preferences',
},
),
]
| [
"[email protected]"
] | |
6d48e276f58017bdc4f368b3f92159159624a379 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SOLOv2/mmcv/mmcv/utils/__init__.py | f75168a74ed5087b90edea3d504a8a1a673eba24 | [
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,695 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Open-MMLab. All rights reserved.
from .config import Config, ConfigDict
from .misc import (check_prerequisites, concat_list, is_list_of, is_seq_of,
is_str, is_tuple_of, iter_cast, list_cast,
requires_executable, requires_package, slice_list,
tuple_cast)
from .path import (FileNotFoundError, check_file_exist, fopen, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .timer import Timer, TimerError, check_time
__all__ = [
'ConfigDict', 'Config', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'FileNotFoundError', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Timer', 'TimerError',
'check_time'
]
| [
"[email protected]"
] | |
434f96c146f909870b57c3cc98deab45416581e2 | 71acb7214efd91c0d327f6d8958e1798eadb4401 | /locations/spiders/costa_coffee_gg_gb_im_je.py | a4e2022daf006b70b9e6b2c80b60d7fc022e2027 | [
"CC0-1.0",
"MIT"
] | permissive | alltheplaces/alltheplaces | 21b9f8b4ace1352e52ae7b8f8825a930d2cb033e | 1bcbb55cfcf06f2c714465570711f6e83f205c22 | refs/heads/master | 2023-08-30T19:45:35.098658 | 2023-08-30T17:51:54 | 2023-08-30T17:51:54 | 61,166,935 | 453 | 176 | NOASSERTION | 2023-09-14T17:16:40 | 2016-06-15T01:09:18 | Python | UTF-8 | Python | false | false | 5,603 | py | from scrapy import Spider
from scrapy.http import JsonRequest
from locations.categories import Categories, Extras, apply_category, apply_yes_no
from locations.dict_parser import DictParser
from locations.geo import point_locations
from locations.hours import OpeningHours
class CostaCoffeeGGGBIMJESpider(Spider):
name = "costa_coffee_gg_gb_im_je"
item_attributes = {"brand": "Costa Coffee", "brand_wikidata": "Q608845"}
allowed_domains = ["www.costa.co.uk"]
start_urls = ["https://www.costa.co.uk/api/mdm/"]
custom_settings = {"ROBOTSTXT_OBEY": False} # No robots.txt. 404 HTML page returned instead.
def start_requests(self):
graphql_query_template = """query Sites {
sites(
siteStatuses: ["OPEN"]
tradingStatusAvailable: true
geo: {
latitude: __LATITUDE__
longitude: __LONGITUDE__
}
countries: "GB"
orderBy: { distance: ASC }
first: 2500
) {
items {
id
extendedName: name
location {
address {
address1
address2
city
postCode
}
geo {
latitude
longitude
distanceMiles
}
}
siteType
facilities {
babyChanging
clickAndServe
coffeeClub
collect
delivery
disabledAccess
disabledWC
driveThru
giftCard
preOrderCollect
tooGoodToGo
wifi
}
expressMachines {
characteristics {
icedDrinks
}
}
operatingHours(timeTypes: ["Standard"]) {
Monday: monday {
open24Hours
open
close
}
Tuesday: tuesday {
open24Hours
open
close
}
Wednesday: wednesday {
open24Hours
open
close
}
Thursday: thursday {
open24Hours
open
close
}
Friday: friday {
open24Hours
open
close
}
Saturday: saturday {
open24Hours
open
close
}
Sunday: sunday {
open24Hours
open
close
}
}
name: knownAs
}
}
}"""
for lat, lon in point_locations("gg_gb_im_je_centroids_iseadgg_50km_radius.csv"):
graphql_query = graphql_query_template.replace("__LATITUDE__", str(lat)).replace("__LONGITUDE__", str(lon))
yield JsonRequest(url=self.start_urls[0], data={"query": graphql_query})
def parse(self, response):
for location in response.json()["data"]["sites"]["items"]:
item = DictParser.parse(location)
if location["siteType"] == "Global Express":
item["brand"] = "Costa Express"
item["brand_wikidata"] = "Q113556385"
apply_category(Categories.VENDING_MACHINE_COFFEE, item)
else:
apply_category(Categories.COFFEE_SHOP, item)
item["lat"] = location["location"]["geo"]["latitude"]
item["lon"] = location["location"]["geo"]["longitude"]
item["street_address"] = ", ".join(
filter(None, [location["location"]["address"]["address1"], location["location"]["address"]["address2"]])
)
item["city"] = location["location"]["address"]["city"]
item["postcode"] = location["location"]["address"]["postCode"]
if item["postcode"]:
if item["postcode"][:2] == "GY":
item["country"] = "GG"
elif item["postcode"][:2] == "IM":
item["country"] = "IM"
elif item["postcode"][:2] == "JE":
item["country"] = "JE"
else:
item["country"] = "GB"
if len(location["operatingHours"]) > 0:
item["opening_hours"] = OpeningHours()
for day_name, day_hours in location["operatingHours"][0].items():
if day_hours["open24Hours"]:
item["opening_hours"].add_range(day_name, "00:00", "24:00")
else:
item["opening_hours"].add_range(day_name, day_hours["open"], day_hours["close"])
apply_yes_no(Extras.BABY_CHANGING_TABLE, item, location["facilities"].get("babyChanging"), False)
apply_yes_no(Extras.DELIVERY, item, location["facilities"].get("delivery"), False)
apply_yes_no(Extras.WHEELCHAIR, item, location["facilities"].get("disabledAccess"), False)
apply_yes_no(Extras.TOILETS_WHEELCHAIR, item, location["facilities"].get("disabledWC"), False)
apply_yes_no(Extras.DRIVE_THROUGH, item, location["facilities"].get("driveThru"), False)
apply_yes_no(Extras.WIFI, item, location["facilities"].get("wifi"), False)
yield item
| [
"[email protected]"
] | |
7128fdf457058ab4d21c61a581c11328e4ce0797 | 02c394db353d996038c9bedbeaf91bb080c12ca2 | /dsm/epaxos/inst/store.py | 28229c7aa18c70adcccb274eb860057ec8542551 | [
"MIT"
] | permissive | Limber0117/python-epaxos | 0633752cffaca65c0d8b9c3aecf9c8bc6ca70f3e | e68bab50e7df32770103196c91d8708863691579 | refs/heads/master | 2021-08-23T22:31:47.283682 | 2017-12-06T22:16:21 | 2017-12-06T22:16:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,719 | py | import logging
from typing import NamedTuple, Dict, Optional, Tuple
from uuid import UUID
from dsm.epaxos.cmd.state import CommandID
from dsm.epaxos.inst.deps.cache import KeyedDepsCache
from dsm.epaxos.inst.state import State, Ballot, Slot, Stage
class InstanceStoreState(NamedTuple):
ballot: Ballot
state: State
def __repr__(self):
return f'ISS({self.ballot},{self.state})'
logger = logging.getLogger(__name__)
class TransitionException(Exception):
def __init__(self, slot: Slot, curr_inst: Optional[InstanceStoreState], new_inst: Optional[InstanceStoreState]):
self.slot = slot
self.inst = curr_inst
class IncorrectBallot(TransitionException):
pass
class IncorrectStage(TransitionException):
pass
class IncorrectCommand(TransitionException):
pass
class SlotTooOld(TransitionException):
pass
class LoadResult(NamedTuple):
exists: bool
inst: InstanceStoreState
def between_checkpoints(old, new):
for x in new.keys():
max_slot = new.get(x, Slot(x, 0))
low_slot = old.get(x, Slot(x, 0))
for y in range(low_slot.instance_id, max_slot.instance_id):
yield Slot(x, y)
CP_T = Dict[int, Slot]
class CheckpointCycle:
def __init__(self):
# [ old ][ mid ][ current ]
self.cp_old = {} # type: CP_T
self.cp_mid = {} # type: CP_T
def earlier(self, slot: Slot):
return slot < self.cp_old.get(slot.replica_id, Slot(slot.replica_id, -1))
def cycle(self, cp: Dict[int, Slot]) -> Tuple[CP_T, CP_T]:
"""
:param cp: new checkpoint
:return: range of the recycled checkpoint
"""
cp_prev_old = self.cp_old
cp_prev_mid = self.cp_mid
cp_old = {**self.cp_old, **self.cp_mid}
cp_mid = {**self.cp_mid, **cp}
self.cp_old = cp_old
self.cp_mid = cp_mid
return cp_prev_old, cp_prev_mid
def __repr__(self):
o = sorted(self.cp_old.items())
m = sorted(self.cp_mid.items())
return f'CheckpointCycle({o}, {m})'
class InstanceStore:
def __init__(self):
self.inst = {} # type: Dict[Slot, InstanceStoreState]
self.cmd_to_slot = {} # type: Dict[CommandID, Slot]
self.deps_cache = KeyedDepsCache()
self.cp = CheckpointCycle()
def set_cp(self, cp: Dict[int, Slot]):
for slot in between_checkpoints(*self.cp.cycle(cp)):
if slot in self.inst:
assert self.inst[slot].state.stage == Stage.Committed, 'Attempt to checkpoint before Commit'
del self.inst[slot]
def load(self, slot: Slot):
if self.cp.earlier(slot):
raise SlotTooOld(slot, None, None)
r = self.inst.get(slot)
exists = True
if r is None:
exists = False
r = InstanceStoreState(
slot.ballot_initial(),
State(
Stage.Prepared,
None,
-1,
[]
)
)
return LoadResult(exists, r)
def load_cmd_slot(self, id: CommandID) -> Optional[Tuple[Slot, InstanceStoreState]]:
r = self.cmd_to_slot.get(id)
if not r:
return None
else:
return r, self.load(r).inst
def update(self, slot: Slot, new: InstanceStoreState):
exists, old = self.load(slot)
if new.ballot < old.ballot:
raise IncorrectBallot(slot, old, new)
if new.state.stage < old.state.stage:
raise IncorrectStage(slot, old, new)
if old.state.stage > Stage.PreAccepted and old.state.command is not None and old.state.command != new.state.command:
raise IncorrectCommand(slot, old, new)
if new.state.stage == Stage.PreAccepted and new.state.command:
# rethink the command ordering
seq, deps = self.deps_cache.xchange(slot, new.state.command)
upd = InstanceStoreState(
new.ballot,
State(
new.state.stage,
new.state.command,
max(seq, new.state.seq),
sorted(set(new.state.deps + deps))
)
)
else:
upd = new
self.inst[slot] = upd
if exists and old.state.command:
if old.state.command.id in self.cmd_to_slot:
del self.cmd_to_slot[old.state.command.id]
else:
logger.error(f'Command id {old.state.command} not found in self.cmd_to_slot')
if new.state.command:
self.cmd_to_slot[new.state.command.id] = slot
return old, upd
| [
"[email protected]"
] | |
42214b04b82a14043926515e9a9d0b506da81f74 | 98a359465e6e0620accede5b87b819aed663179d | /schol_library/migrations/0167_auto_20200526_1433.py | 0bcdcd0a3380ba4d95d05f24f178d1bf85eff7eb | [] | no_license | mustavfaa/back-end | 88f8674bd6c2f8d0c4984a2a3d34f2aece3ec8d1 | 6635e8f504c7a7ba9709121b4dd8d5ccecdf05ca | refs/heads/main | 2023-08-15T10:48:03.461138 | 2021-09-27T15:26:03 | 2021-09-27T15:26:03 | 410,938,832 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | # Generated by Django 2.2 on 2020-05-26 08:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('schol_library', '0166_auto_20200526_1432'),
]
operations = [
migrations.AlterUniqueTogether(
name='checkidrequestedition',
unique_together=set(),
),
migrations.AlterUniqueTogether(
name='editionbooksorder',
unique_together=set(),
),
migrations.AlterUniqueTogether(
name='numberbooks',
unique_together=set(),
),
]
| [
"[email protected]"
] | |
4009119be865df166884eaf6f38adf6113478806 | 3f1dab410b11b1f7b3979a2436bcc099edf3b9c1 | /src/graph_transpiler/webdnn/frontend/chainer/converter.py | d08dd2f7ae6a742adb81b3763fa4cf68b5489aeb | [
"Zlib",
"MIT"
] | permissive | qifu99/webdnn | c7386ee3db3adbb718e9c71771a77ffe839b892f | dbf6c22e2555988d098575595cbc37fc042bc713 | refs/heads/master | 2021-01-19T08:04:27.598406 | 2017-08-17T01:17:06 | 2017-08-17T01:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,401 | py | # -*- coding:utf-8 -*-
"""
Chainer Link -> Graph object converters
Assuming Chainer 1.23 or 2.0
"""
import warnings
from typing import List, Union, Sequence, Set
from chainer import Function
from webdnn.frontend.constraints import AxisVar
from webdnn.frontend.converter import Converter
from webdnn.graph.graph import Graph
from webdnn.graph.order import Order
from webdnn.graph.variable import Variable
from webdnn.graph.variables.constant_variable import ConstantVariable
from webdnn.util import console
FLAG_CHAINER_INSTALLED = False
try:
import chainer
import chainer.computational_graph
if chainer.__version__ >= "2.":
chainer_v2 = True
# noinspection PyUnresolvedReferences
VariableNode = chainer.variable.VariableNode
else:
chainer_v2 = False
VariableNode = chainer.variable.Variable
FLAG_CHAINER_INSTALLED = True
except ImportError as e:
console.debug("Chainer is not completely installed.")
pass
def _to_variable_node(chainer_variable: Union["chainer.Variable", "VariableNode"]) -> "VariableNode":
if chainer_v2 and not isinstance(chainer_variable, VariableNode):
# noinspection PyUnresolvedReferences
return chainer_variable.node
else:
# noinspection PyTypeChecker
return chainer_variable
def _listup_functions(inputs: Sequence["VariableNode"], outputs: Sequence["VariableNode"]):
stack = list(outputs) # type: List[Union[VariableNode, Function]]
resolved = set(inputs) # type: Set[Union[VariableNode, Function]]
result = [] # type: List[Function]
while len(stack) > 0:
node = stack.pop(0)
if node in resolved:
continue
if isinstance(node, VariableNode):
prev_nodes = [] if node.creator is None else [node.creator]
else:
prev_nodes = node.inputs
unresolved_prevs = [prev_node for prev_node in prev_nodes if prev_node not in resolved]
if len(unresolved_prevs) == 0:
resolved.add(node)
if isinstance(node, Function):
result.append(node)
else:
stack.append(node)
stack += unresolved_prevs
return result
class ChainerConverter(Converter["Function"]):
"""ChainerConverter()
"""
def __init__(self):
if not FLAG_CHAINER_INSTALLED:
raise ImportError("ImportError is occurred when chainer is loaded.")
def convert_from_inout_vars(self, inputs: List["chainer.Variable"], outputs: List["chainer.Variable"]):
"""convert_from_inout_vars(inputs, output)
Construct computational graph from input and output chainer variables, and convert the graph into WebDNN IR.
Args:
inputs(list of chainer.Variable): input chainer variables
outputs(list of chainer.Variable): output chainer variables
.. warning::
This method will be removed in the future version. Use :func:`~webdnn.frontend.chainer.ChainerConverter.convert(inputs,
outputs)`.
.. admonition:: Example
.. code::
model = chainer.links.model.vision.resnet.ResNet50Layers()
# Forward propagation with dummy input to build computational graph
x = chainer.Variable(np.empty((1, 3, 224, 224), dtype=np.float32))
y = model(x, layers=["fc6"])["fc6"]
graph = ChainerConverter().convert_from_inout_vars([x], [y])
Returns:
(:class:`~webdnn.Graph`): WebDNN Graph
"""
warnings.warn("This method will be removed in the future version. Use ChainerConverter#convert(inputs, outputs).",
DeprecationWarning)
return self.convert(inputs, outputs)
def convert(self, inputs: List["chainer.Variable"], outputs: List["chainer.Variable"]) -> Graph:
"""convert(inputs, outputs)
Convert chainer computational graph into WebDNN IR.
Args:
inputs(list of chainer.Variable): input chainer variables
outputs(list of chainer.Variable): output chainer variables
.. admonition:: Example
.. code::
model = chainer.links.model.vision.resnet.ResNet50Layers()
# Forward propagation with dummy input to build computational graph
x = chainer.Variable(np.empty((1, 3, 224, 224), dtype=np.float32))
y = model(x, layers=["fc6"])["fc6"]
graph = ChainerConverter().convert_from_inout_vars([x], [y])
Returns:
(:class:`~webdnn.Graph`): WebDNN Graph
"""
chainer_graph = chainer.computational_graph.build_computational_graph(outputs)
# In chainer v2, variables are represented as Variable and VariableNode object, and
# graph information such as edge connection is contained in variable node.
# Therefore all chainer variable must be normalized into variable node.
c_vars = list(map(_to_variable_node,
filter(lambda v: isinstance(v, VariableNode), chainer_graph.nodes))) # type: List[VariableNode]
inputs = [_to_variable_node(v) for v in inputs]
outputs = [_to_variable_node(v) for v in outputs]
for c_var in c_vars:
if c_var.creator is None:
# If :code:`creator is None` and it's not input variable, it's parameter.
self._convert_var(c_var, constant=c_var not in inputs)
for c_opr in _listup_functions(inputs, outputs):
self._convert_operator(c_opr)
graph = Graph([self.get_variable(c_var) for c_var in inputs],
[self.get_variable(c_var) for c_var in outputs])
return graph
def _convert_var(self, c_var: "VariableNode", constant=False):
assert not self.has_variable(c_var), f"{c_var} is already converted"
ndim = len(c_var.shape)
order = Order([AxisVar() for _ in range(ndim)])
if constant:
data = c_var.data
if chainer_v2 and data is None:
# noinspection PyProtectedMember
data = c_var._variable().data
n_var = ConstantVariable(chainer.cuda.to_cpu(data), order) # force on CPU
else:
n_var = Variable(c_var.shape, order)
self.set_variable(c_var, n_var)
return n_var
| [
"[email protected]"
] | |
30197389acb3578590648bb805b98e79d74595bc | 573220da9574b1ca16b530b93eb6801838b38ee5 | /app.py | d18c2cd5260fb14e64a22287e9ee9df08a46bb0b | [] | no_license | nova-sangeeth/Flask__blog__main | 0810879d9ed09940e334f0fa5827c74acbcd5dfd | 15b95f180608d051e3deb4aaf8f3a4889fc3d381 | refs/heads/master | 2020-09-14T03:21:22.758857 | 2020-06-18T17:55:25 | 2020-06-18T17:55:25 | 223,000,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,186 | py | from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config[
"SQLALCHEMY_DATABASE_URI"
] = "sqlite:///blog.db" # /// means a relative path, //// means it is a absolute path.
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
# creating the database file
db = SQLAlchemy(app)
# models ------ classes
class BlogPost(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(256), nullable=False) # this is a required field.
content = db.Column(db.Text, nullable=False) # this is a required field.
author = db.Column(db.String(128), nullable=False, default="N/A")
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def __repr__(self):
return "Blog Post" + str(self.id)
# model for the user authentication
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100), unique=True)
name = db.Column(db.String(100))
@app.route("/")
def home():
return render_template("home.html")
@app.route("/posts", methods=["GET", "POST"])
def posts():
if request.method == "POST":
post_title = request.form["title"]
post_content = request.form["content"]
post_author = request.form["author"]
new_post = BlogPost(title=post_title, content=post_content, author=post_author)
db.session.add(new_post) # session.add only saves the data for temporary use.
db.session.commit() # to save the data always commit the database.
return redirect("/posts")
else:
all_posts = BlogPost.query.order_by(BlogPost.date_created).all()
return render_template("posts.html", posts=all_posts)
@app.route("/posts/delete/<int:id>")
def delete(id):
post = BlogPost.query.get_or_404(id)
db.session.delete(post)
db.session.commit()
return redirect("/posts")
@app.route("/posts/edit/<int:id>", methods=["GET", "POST"])
def edit(id):
post = BlogPost.query.get_or_404(id)
if request.method == "POST":
post.title = request.form["title"]
post.author = request.form["author"]
post.content = request.form["content"]
db.session.commit()
return redirect("/posts")
else:
return render_template("edit.html", post=post)
@app.route("/posts/new", methods=["GET", "POST"])
def new_post():
if request.method == "POST":
post_title = request.form["title"]
post_content = request.form["content"]
post_author = request.form["author"]
new_post = BlogPost(title=post_title, content=post_content, author=post_author)
db.session.add(new_post) # session.add only saves the data for temporary use.
db.session.commit() # to save the data always commit the database.
return redirect("/posts")
else:
all_posts = BlogPost.query.order_by(BlogPost.date_created).all()
return render_template("new_post.html")
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
871bd249f549b78762891781da0e7e74582e8d83 | 80be1fa4b81a78e1afafe5092634e8dc318010a9 | /yoggsaron/models/__init__.py | cffbbc69dc45919f05d008a10e9f275d9d8c1321 | [] | no_license | tonicbupt/c-thun | 10ad152e0ce034a1857c8f8d53041fae53cce3ab | e851bfc4fd3e733e038f6ceea90f7b347c3e77cc | refs/heads/master | 2021-01-22T05:28:22.364857 | 2014-08-26T15:30:18 | 2014-08-26T15:30:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | # coding: utf-8
from .dish import Dish
from .order import OrderDish, Order
from .restaurant import Restaurant
__all__ = ['Dish', 'Order', 'Restaurant']
| [
"[email protected]"
] | |
b4cb7d1748afcca8c61756c46770aaefb3bc8952 | 8b6cd902deb20812fba07f1bd51a4460d22adc03 | /back-end/.history/djreact/users/serializers_20191221131333.py | 1e1bb2bcff5539a9496bdade983d74e2f31a8f90 | [] | no_license | vishaldenzil/Django-react- | f3a49d141e0b6882685b7eaa4dc43c84857f335a | 35b6d41f6dacb3bddcf7858aa4dc0d2fe039ff98 | refs/heads/master | 2022-11-08T09:27:02.938053 | 2020-05-29T04:53:52 | 2020-05-29T04:53:52 | 267,768,028 | 0 | 1 | null | 2022-10-15T14:08:30 | 2020-05-29T04:52:20 | Python | UTF-8 | Python | false | false | 191 | py | from rest_framework import serializers
from .models import User
class UserRegistrationSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__
| [
"[email protected]"
] | |
f13c714b2b66ea392c8848f14cedd68993446987 | 852a91492a737e9a2c210df883029b684ca6a448 | /jade2/basic/structure/biopython_util.py | e507bcba04a299110c4659211203f75c5c837ef4 | [
"BSD-3-Clause"
] | permissive | jadolfbr/jade2 | cb33f4a8cbf560f5ebaef4de2789ff50f372ff5a | 91f18d6004f123d11ea8618171154aa25a7444e9 | refs/heads/main | 2022-09-12T06:23:23.356864 | 2022-03-24T20:15:13 | 2022-03-24T20:15:13 | 427,541,475 | 0 | 0 | NOASSERTION | 2021-11-13T01:34:34 | 2021-11-13T01:34:34 | null | UTF-8 | Python | false | false | 4,320 | py | import gzip
import os
import sys
import logging
from pathlib import Path
from typing import Union, List
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.MMCIFParser import MMCIFParser
from Bio.PDB.Residue import Residue
from Bio.PDB.Structure import Structure
from Bio.PDB.Chain import Chain
from Bio.PDB.Model import Model
from jade2.basic.restype_definitions import RestypeDefinitions
from jade2.basic.numeric import *
### NOTE: All Utility function have been replaced by a Bio Structure wrapper: BioPose.
### Please see this new class for future developments!
######## NEW Biopython utility functions ##########
def is_connected_to_next(res1:Residue, res2: Residue):
"""
Return the bond distance between two residues using Numpy array math.
:param res1: Bio.PDB.Residue.Residue
:param res2: Bio.PDB.Residue.Residue
:rtype: float
"""
distance = atomic_distance(res1, res2, 'C', 'N')
if distance <= float(1.8):
return True
else:
return False
def is_connected_to_prev(res1, res2) -> bool:
"""
Return the bond distance between two residues using Numpy array math.
:param res1: Bio.PDB.Residue.Residue
:param res2: Bio.PDB.Residue.Residue
:rtype: float
"""
distance = atomic_distance(res1, res2, 'N', 'C')
if distance <= float(1.8):
return True
else:
return False
def atomic_distance(res1: Residue, res2: Residue, res1_atom_name: str, res2_atom_name: str) -> float:
"""
Return the atomic distance between two arbitrary Bio residues and two arbitrary atom names.
:param res1: Bio.PDB.Residue.Residue
:param res2: Bio.PDB.Residue.Residue
:param res1_atom_name: str
:param res2_atom_name: str
:rtype: float
"""
try:
return distance_numpy(res1[res1_atom_name].get_vector().get_array(), res2[res2_atom_name].get_vector().get_array())
except Exception:
logging.debug("Residue does not have the atom name or there is a problem in the vector. Returning 0")
raise IndexError
######## OLD Biopython Utility Functions replaced by BIOPose ########
def has_id(model, id) -> bool:
"""
Returns true or false if the model has the chain. Because biopython is not updating it's index that has_id is using. WTF.
"""
for i in model:
if i.id == id:
return True
return False
def get_biopython_structure(path: Union[Path, str], model_id = None) -> Structure:
structure = None
path = str(path).strip()
parser = PDBParser()
cif_parser = MMCIFParser()
extSP: List[str] = os.path.basename(path).split('.')
if not model_id:
model_id = os.path.basename(str(path))
if extSP[-1] == "pdb":
structure = parser.get_structure(model_id, path)
elif extSP[-1] == "cif":
structure = cif_parser.get_structure(model_id, path)
elif extSP[-1] == 'gz':
GZ = gzip.open(path, 'rb')
if extSP[-2] == 'pdb':
structure = parser.get_structure(model_id, GZ)
elif extSP[-2] == 'cif':
structure = cif_parser.get_structure(model_id, GZ)
else:
sys.exit("Unknown GZipped extenstion: "+path)
GZ.close()
else:
sys.exit("Unknown extension to read PDB: "+path)
return structure
def get_seq_from_biostructure(structure: Structure, chain_id) -> str:
for biochain in structure[0]:
if get_chain_length(biochain) == 0:
continue
if biochain.id == chain_id:
return get_seq_from_biochain(biochain)
print("Chain not found!")
raise LookupError
def get_seq_from_biochain(bio_chain: Chain) -> str:
if get_chain_length(bio_chain) == 0:
return ""
seq = ""
d = RestypeDefinitions()
for res in bio_chain:
if res.id[0]==' ':
aa = d.get_one_letter_from_three(res.resname)
if not aa:
logging.debug("Skipping non-canonical resname: "+res.resname)
logging.debug("This could pose a problem!")
continue
seq = seq+aa
return seq
def get_chain_length(bio_chain: Chain) -> int:
l = 0
for res in bio_chain:
if res.id[0]==' ':
l+=1
return l
def get_num_biochains(model: Model) -> int:
return len(model[0])
| [
"[email protected]"
] | |
d66bef1246c571ddf493cf587902f23858053b58 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03785/s746303636.py | aef71741efdde9c3454a67489cef28a92a40bd56 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | n, c, k = map(int, input().split())
t_l = []
for _ in range(n):
t_l.append(int(input()))
t_l.sort()
ans = 0
cnt = 0
for t in t_l:
if cnt == 0:
t1 = t
if t <= t1 + k:
cnt += 1
if cnt == c:
cnt = 0
ans += 1
else:
cnt = 1
ans += 1
t1 = t
if cnt != 0:
ans += 1
print(ans) | [
"[email protected]"
] | |
242d70d5677634496d3948cbd845413b2fdd04b8 | 6ecd1efd0af4b5ec05ddc70981387e54a55c424e | /grapy/core/__init__.py | c6ea1d60632ff6676b419d92c04f6a031ccc24ec | [] | no_license | oscar810429/grapy | ab8a3a2717b855c7f11d97a8c28fa3b9a0752591 | 725db528f01a50cc7f88fc3002148eb85d8da740 | refs/heads/master | 2021-05-03T23:15:06.933436 | 2017-12-07T09:03:31 | 2017-12-07T09:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | from .engine import Engine
from .base_spider import BaseSpider
from .base_sched import BaseScheduler
from .base_request import BaseRequest
from .item import Item, dump_item, load_item
__all__ = ['Engine', 'BaseSpider', 'BaseScheduler', 'BaseRequest',
'Item', 'dump_item', 'load_item']
| [
"[email protected]"
] | |
74d4e33203cc25c5e02beea9fc3531b76b8cb52e | a1f2df675cfc595b15f1ca9390b7517989f2d4e0 | /testCase/contacts/testUpdateContact.py | 3229d0b16fcaad3fb6eae0528dab1a62be7cb76c | [] | no_license | GGGYB/crm | d4def2f1abc89451e1c4b11b89ef100a842ed745 | 61932466dd0ac299adc661383d506389d5f0f8e7 | refs/heads/master | 2022-04-15T21:29:48.539311 | 2020-04-14T10:23:41 | 2020-04-14T10:23:41 | 255,575,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,600 | py | # -*- coding: utf-8 -*-
__author__ = 'Jun'
from bs4 import BeautifulSoup
import re
from commons import common
from commons.const import const
from testCase.users import testGetUser as users
class UpdateContacts:
def __init__(self, cookie, csrf):
self.common = common.Common(cookie, csrf)
self.users = users.GetUser(cookie, csrf)
self.base_url = const.BASE_URL
self.base_url2 = const.SIGN_IN_BASE_URL
self.csrf = csrf
self.cookie = cookie
self.response = ''
self.users_id = []
self.customers_id = []
# 批量转移联系人
def update_contacts_by_scope(self, scope, contact_ids):
url = self.base_url + 'contacts/massive_transfer'
body = {}
self.common.get_response_json(url, body, '打开批量转移联系人的创窗口')
#获取用户id
self.users_id = self.users.getUserId()
#转移
url = self.base_url + 'api/contacts/mass_transfer'
params = {
'authenticity_token':self.csrf,
'user_id': self.users_id[0],
'transfer_contracts': 'false',
'transfer_opportunities': 'false',
'nowin_opportunities': 'false',
'contact_ids[]': contact_ids[0],
'contact_ids[]': contact_ids[1]
}
self.common.put_response_json(url, params, '批量转移联系人')
#批量编辑联系人
def batch_update_contacts(self, scope, contact_ids):
url = self.base_url + 'batch_edit/field_form?model=Contact'
params = {}
response = self.common.get_response_json(url, params, '打开批量编辑联系人的页面')
soup = BeautifulSoup(response.content, 'html.parser')
optional_field = soup.find(attrs={'id': 'field_choice'})
fields = re.findall(r"value=\"(.*?)\">", str(optional_field))
selected_fields = soup.findAll(attrs={'class': 'batch-edit-custom-field hidden'})
selected_field_list = []
for i in selected_fields:
selected_field = re.findall(r"<option value=\"(.*?)\">", str(i))
selected_field_list.append(selected_field)
url = self.base_url + 'api/contacts/batch_update'
params = {
'utf8': '✓',
'authenticity_token': self.csrf,
'field_choice': fields[3],
'contact['+fields[3]+']':selected_field_list[2][2],
'ids[]': contact_ids[0],
'ids[]': contact_ids[1]
}
self.common.put_response_json(url, params, '批量编辑联系人')
#快捷编辑联系人
def quick_edit_contacts(self, contact_id):
url = self.base_url + 'quick_edit/field_form?model=Contact&id=250705&field_name=name&page_type=index&_=1534228607097'
params = {
'model':'Contact',
'id':contact_id,
'field_name':'address.phone',
'page_type':'index',
}
self.common.get_response_json(self, params, '快捷编辑联系人获取当前联系人的field name')
url = self.base_url + 'api/contacts/' + str(contact_id)
params = {
'utf8': '✓',
'_method': 'patch',
'authenticity_token': self.csrf,
'contact[id]': contact_id,
'contact[name]': 'contact name'
}
#查重字段
def check_duplicate_field(self, contact_id):
url = self.base_url + 'api/contacts/check_duplicate_field.json'
params = {
'field':'tel',
'field_value':'13512341234',
'contact_id':contact_id
}
response = self.common.post_response_json(url, params, '查询电话是否重复')
#写跟进
def write_revisit_log(self, scope, contact_id):
url = self.base_url + 'contacts/'+str(contact_id)+'/revisit_logs/new'
params = {}
self.common.get_response_json(url, params, '打开写跟进窗口')
url = self.base_url + 'contacts/' +str(contact_id)+'/revisit_logs?contact_id='+str(contact_id)
params = {
'utf8':'✓',
'authenticity_token':self.csrf,
'revisit_log[category]':'89825',
'revisit_log[real_revisit_at]':self.common.get_today_str_yymmddhhmm(),
'revisit_log[content]':'写跟进%s' %self.common.get_random_int(999),
'revisit_log[loggable_attributes][status]':'89822',
'revisit_log[loggable_attributes][id]':str(contact_id),
'revisit_log[remind_at]':''
}
self.common.post_response_json(url, params, '联系人列表页写跟进')
| [
"[email protected]"
] | |
41525bb975f3ec9ef593887575ca6510b54ed9d2 | fdf1e1f4efc51bc024311d44a2fa4524f9b88bce | /girleffect/people/migrations/0037_p1_changes.py | 86a336f4444657780412bce0cc2f7d95aec23acd | [] | no_license | girleffect/girleffect.org | 8327ffd6bbd1103033c92fbd4cbe5461aa1c7f03 | 55731b1c024f207211a161fd6d3ca796beea7a61 | refs/heads/master | 2023-04-07T21:40:43.910892 | 2022-06-14T11:50:21 | 2022-06-14T11:50:21 | 112,452,828 | 1 | 2 | null | 2023-04-01T12:05:55 | 2017-11-29T09:13:18 | Python | UTF-8 | Python | false | false | 16,328 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-01-18 12:13
from __future__ import unicode_literals
from django.db import migrations, models
import girleffect.utils.models
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtaildocs.blocks
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
import wagtail.wagtailsnippets.blocks
class Migration(migrations.Migration):
dependencies = [
('people', '0036_auto_20180105_1522'),
]
operations = [
migrations.AlterField(
model_name='personindexpage',
name='hero_strapline',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='personpage',
name='biography',
field=wagtail.wagtailcore.fields.StreamField((('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('body_text', wagtail.wagtailcore.blocks.StructBlock((('body', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], label='Body Text')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('large_text', wagtail.wagtailcore.blocks.StructBlock((('body', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'link', 'document-link'], label='Large Text', required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('extendable_body', wagtail.wagtailcore.blocks.StructBlock((('body_upper', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], label='Body Text')), ('extend_button_text', wagtail.wagtailcore.blocks.CharBlock(help_text='Customise text for the extend button', max_length=255, required=False)), ('collapse_button_text', wagtail.wagtailcore.blocks.CharBlock(help_text='Customise text for the collapse button', max_length=255, required=False)), ('body_lower', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], help_text='This body field is invisible until the user clicks the expand button', label='Extended body text')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('caption', wagtail.wagtailcore.blocks.CharBlock(required=False))))), ('quote', wagtail.wagtailcore.blocks.StructBlock((('quotes', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=255, required=True)), ('citation', wagtail.wagtailcore.blocks.CharBlock(max_length=255, required=False)), ('link_block', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('drop_shadow_options', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))))), ('quote_mark_hex', wagtail.wagtailcore.blocks.CharBlock(label='Quote Mark Hex Code', max_length=7, required=False)))), icon='openquote', template='blocks/quote_block.html')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('video', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(max_length=30, required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=255, required=False)), ('youtube_embed', wagtail.wagtailembeds.blocks.EmbedBlock(help_text="Your YouTube URL goes here. Only YouTube video URLs will be accepted. The custom 'play' button will be created for valid YouTube URLs.", label='YouTube Video URL')), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Girl Effect YouTube Video')), ('slider', wagtail.wagtailcore.blocks.StructBlock((('slider_delay', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Enter the milliseconds of the delay between each slide', required=False)), ('slider_items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('overview_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Slider item overview title', max_length=255, required=False)), ('overview_title_shadow', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))), required=False)), ('overview_text', wagtail.wagtailcore.blocks.TextBlock(help_text='Slider item overview text', required=False)), ('overview_text_shadow', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))), required=False)), ('textbox_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Slider item textbox title', max_length=255, required=False)), ('textbox_text', wagtail.wagtailcore.blocks.TextBlock(help_text='Slider item textbox text', required=False)), ('textbox_link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False))))))))), ('carousel_block', wagtail.wagtailcore.blocks.StreamBlock((('carousel_item', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('overview_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Slider item overview title', max_length=255, required=False)), ('overview_title_shadow', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))), required=False)), ('overview_text', wagtail.wagtailcore.blocks.TextBlock(help_text='Slider item overview text', required=False)), ('overview_text_shadow', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))), required=False)), ('textbox_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Slider item textbox title', max_length=255, required=False)), ('textbox_text', wagtail.wagtailcore.blocks.TextBlock(help_text='Slider item textbox text', required=False)), ('textbox_link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('slide_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Title to appear at bottom of carousel, for example "Youth Brands"', max_length=255, required=False)), ('slide_logo', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('slide_title_hex', wagtail.wagtailcore.blocks.CharBlock(help_text='Add valid hex for slide title and chevron colours.', max_length=7, required=False))))),), label='Carousel', max_num=3, min_num=2)), ('media_text_overlay', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(help_text='Appears above the module.', label='Title Text', max_length=255, required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('logo', wagtail.wagtailimages.blocks.ImageChooserBlock(label='Title Logo', required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=75, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Full Width Media with Text Overlay')), ('list_block', wagtail.wagtailcore.blocks.StructBlock((('list_block', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('title', wagtail.wagtailcore.blocks.CharBlock(max_length=255, required=False)), ('description', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'link', 'document-link'], icon='pilcrow', max_length=250, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)))))), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('link_row', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False)))), icon='link', template='blocks/inline_link_block.html')), ('anchor', wagtail.wagtailcore.blocks.StructBlock((('anchor', wagtail.wagtailcore.blocks.CharBlock()),))), ('statistic', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=255, required=False)), ('statistics', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailsnippets.blocks.SnippetChooserBlock(girleffect.utils.models.Statistic))), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Statistic Block')), ('call_to_action', wagtail.wagtailsnippets.blocks.SnippetChooserBlock(girleffect.utils.models.CallToActionSnippet, template='blocks/call_to_action.html'))), blank=True),
),
migrations.AlterField(
model_name='socialmediaprofile',
name='service',
field=models.CharField(choices=[('twitter', 'Twitter'), ('linkedin', 'LinkedIn')], max_length=255),
),
]
| [
"[email protected]"
] | |
e0a8d3676179398cc36aa07dce632b41deb7b7a3 | 12abe02e205d3e8dabe78fb5a93ccca89e2c42c4 | /toontown/pets/DistributedPetProxyAI.py | 631ef5d03661d6e1496b796115e90b6cdfc834db | [] | no_license | nate97/toontown-src-py3.0 | 55092b2973b76e6b6d566887f44c52822684394c | f76c515801ae08c40b264b48365211fd44b137eb | refs/heads/master | 2022-07-07T05:23:22.071185 | 2022-06-22T16:36:10 | 2022-06-22T16:36:10 | 187,682,471 | 15 | 8 | null | null | null | null | UTF-8 | Python | false | false | 15,463 | py | from direct.showbase.PythonUtil import contains, lerp
from direct.distributed import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
from toontown.pets import PetTraits, PetTricks
from toontown.pets import PetMood
from toontown.toonbase import ToontownGlobals
import random
import time
import copy
BATTLE_TRICK_HP_MULTIPLIER = 10.0
class DistributedPetProxyAI(DistributedObjectAI.DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPetProxyAI')
def __init__(self, air):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
self.ownerId = 0
self.petName = 'unnamed'
self.traitSeed = 0
self.safeZone = ToontownGlobals.ToontownCentral
self.traitList = [0] * PetTraits.PetTraits.NumTraits
self.head = -1
self.ears = -1
self.nose = -1
self.tail = -1
self.bodyTexture = 0
self.color = 0
self.colorScale = 0
self.eyeColor = 0
self.gender = 0
self.trickAptitudes = []
self.lastSeenTimestamp = self.getCurEpochTimestamp()
self.requiredMoodComponents = {}
self.__funcsToDelete = []
self.__generateDistTraitFuncs()
self.__generateDistMoodFuncs()
def getSetterName(self, valueName, prefix = 'set'):
return '%s%s%s' % (prefix, valueName[0].upper(), valueName[1:])
def setDNA(self, dna):
head, ears, nose, tail, body, color, colorScale, eyes, gender = dna
self.b_setHead(head)
self.b_setEars(ears)
self.b_setNose(nose)
self.b_setTail(tail)
self.b_setBodyTexture(body)
self.b_setColor(color)
self.b_setColorScale(colorScale)
self.b_setEyeColor(eyes)
self.b_setGender(gender)
def getOwnerId(self):
return self.ownerId
def b_setOwnerId(self, ownerId):
self.d_setOwnerId(ownerId)
self.setOwnerId(ownerId)
def d_setOwnerId(self, ownerId):
self.sendUpdate('setOwnerId', [ownerId])
def setOwnerId(self, ownerId):
self.ownerId = ownerId
def getPetName(self):
return self.petName
def b_setPetName(self, petName):
self.d_setPetName(petName)
self.setPetName(petName)
def d_setPetName(self, petName):
self.sendUpdate('setPetName', [petName])
def setPetName(self, petName):
self.petName = petName
def getTraitSeed(self):
return self.traitSeed
def b_setTraitSeed(self, traitSeed):
self.d_setTraitSeed(traitSeed)
self.setTraitSeed(traitSeed)
def d_setTraitSeed(self, traitSeed):
self.sendUpdate('setTraitSeed', [traitSeed])
def setTraitSeed(self, traitSeed):
self.traitSeed = traitSeed
def getSafeZone(self):
return self.safeZone
def b_setSafeZone(self, safeZone):
self.d_setSafeZone(safeZone)
self.setSafeZone(safeZone)
def d_setSafeZone(self, safeZone):
self.sendUpdate('setSafeZone', [safeZone])
def setSafeZone(self, safeZone):
self.safeZone = safeZone
def setTraits(self, traitList):
self.traitList = traitList
def __generateDistTraitFuncs(self):
for i in range(PetTraits.PetTraits.NumTraits):
traitName = PetTraits.getTraitNames()[i]
getterName = self.getSetterName(traitName, 'get')
b_setterName = self.getSetterName(traitName, 'b_set')
d_setterName = self.getSetterName(traitName, 'd_set')
setterName = self.getSetterName(traitName)
def traitGetter(i = i):
return self.traitList[i]
def b_traitSetter(value, setterName = setterName, d_setterName = d_setterName):
self.__dict__[d_setterName](value)
self.__dict__[setterName](value)
def d_traitSetter(value, setterName = setterName):
self.sendUpdate(setterName, [value])
def traitSetter(value, i = i):
self.traitList[i] = value
self.__dict__[getterName] = traitGetter
self.__dict__[b_setterName] = b_traitSetter
self.__dict__[d_setterName] = d_traitSetter
self.__dict__[setterName] = traitSetter
self.__funcsToDelete.append(getterName)
self.__funcsToDelete.append(b_setterName)
self.__funcsToDelete.append(d_setterName)
self.__funcsToDelete.append(setterName)
def getHead(self):
return self.head
def b_setHead(self, head):
self.d_setHead(head)
self.setHead(head)
def d_setHead(self, head):
self.sendUpdate('setHead', [head])
def setHead(self, head):
self.head = head
def getEars(self):
return self.ears
def b_setEars(self, ears):
self.d_setEars(ears)
self.setEars(ears)
def d_setEars(self, ears):
self.sendUpdate('setEars', [ears])
def setEars(self, ears):
self.ears = ears
def getNose(self):
return self.nose
def b_setNose(self, nose):
self.d_setNose(nose)
self.setNose(nose)
def d_setNose(self, nose):
self.sendUpdate('setNose', [nose])
def setNose(self, nose):
self.nose = nose
def getTail(self):
return self.tail
def b_setTail(self, tail):
self.d_setTail(tail)
self.setTail(tail)
def d_setTail(self, tail):
self.sendUpdate('setTail', [tail])
def setTail(self, tail):
self.tail = tail
def getBodyTexture(self):
return self.bodyTexture
def b_setBodyTexture(self, bodyTexture):
self.d_setBodyTexture(bodyTexture)
self.setBodyTexture(bodyTexture)
def d_setBodyTexture(self, bodyTexture):
self.sendUpdate('setBodyTexture', [bodyTexture])
def setBodyTexture(self, bodyTexture):
self.bodyTexture = bodyTexture
def getColor(self):
return self.color
def b_setColor(self, color):
self.d_setColor(color)
self.setColor(color)
def d_setColor(self, color):
self.sendUpdate('setColor', [color])
def setColor(self, color):
self.color = color
def getColorScale(self):
return self.colorScale
def b_setColorScale(self, colorScale):
self.d_setColorScale(colorScale)
self.setColorScale(colorScale)
def d_setColorScale(self, colorScale):
self.sendUpdate('setColorScale', [colorScale])
def setColorScale(self, colorScale):
self.colorScale = colorScale
def getEyeColor(self):
return self.eyeColor
def b_setEyeColor(self, eyeColor):
self.d_setEyeColor(eyeColor)
self.setEyeColor(eyeColor)
def d_setEyeColor(self, eyeColor):
self.sendUpdate('setEyeColor', [eyeColor])
def setEyeColor(self, eyeColor):
self.eyeColor = eyeColor
def getGender(self):
return self.gender
def b_setGender(self, gender):
self.d_setGender(gender)
self.setGender(gender)
def d_setGender(self, gender):
self.sendUpdate('setGender', [gender])
def setGender(self, gender):
self.gender = gender
def getLastSeenTimestamp(self):
return self.lastSeenTimestamp
def b_setLastSeenTimestamp(self, timestamp):
self.d_setLastSeenTimestamp(timestamp)
self.setLastSeenTimestamp(timestamp)
def d_setLastSeenTimestamp(self, timestamp):
self.sendUpdate('setLastSeenTimestamp', [timestamp])
def setLastSeenTimestamp(self, timestamp):
self.lastSeenTimestamp = timestamp
def getCurEpochTimestamp(self):
return int(time.time())
def getTimeSinceLastSeen(self):
t = time.time() - self.lastSeenTimestamp
return max(0.0, t)
def __handleMoodSet(self, component, value):
if self.isGenerated():
self.mood.setComponent(component, value)
else:
self.requiredMoodComponents[component] = value
def __handleMoodGet(self, component):
if self.isGenerated():
return self.mood.getComponent(component)
else:
return 0.0
def __generateDistMoodFuncs(self):
for compName in PetMood.PetMood.Components:
getterName = self.getSetterName(compName, 'get')
setterName = self.getSetterName(compName)
def moodGetter(compName = compName):
return self.__handleMoodGet(compName)
def b_moodSetter(value, setterName = setterName):
self.__dict__[setterName](value)
def d_moodSetter(value, setterName = setterName):
self.sendUpdate(setterName, [value])
def moodSetter(value, compName = compName):
self.__handleMoodSet(compName, value)
self.__dict__[getterName] = moodGetter
self.__dict__['b_%s' % setterName] = b_moodSetter
self.__dict__['d_%s' % setterName] = d_moodSetter
self.__dict__[setterName] = moodSetter
self.__funcsToDelete.append(getterName)
self.__funcsToDelete.append('b_%s' % setterName)
self.__funcsToDelete.append('d_%s' % setterName)
self.__funcsToDelete.append(setterName)
def getTrickAptitudes(self):
return self.trickAptitudes
def b_setTrickAptitudes(self, aptitudes):
self.setTrickAptitudes(aptitudes, local=1)
self.d_setTrickAptitudes(aptitudes)
def d_setTrickAptitudes(self, aptitudes):
if __dev__:
for aptitude in aptitudes:
pass
while len(aptitudes) < len(PetTricks.Tricks) - 1:
aptitudes.append(0.0)
self.sendUpdate('setTrickAptitudes', [aptitudes])
def setTrickAptitudes(self, aptitudes, local = 0):
if not local:
DistributedPetProxyAI.notify.debug('setTrickAptitudes: %s' % aptitudes)
while len(self.trickAptitudes) < len(PetTricks.Tricks) - 1:
self.trickAptitudes.append(0.0)
self.trickAptitudes = aptitudes
def getTrickAptitude(self, trickId):
if trickId > len(self.trickAptitudes) - 1:
return 0.0
return self.trickAptitudes[trickId]
def setTrickAptitude(self, trickId, aptitude, send = 1):
aptitude = min(max(aptitude, 0.0), 1.0)
aptitudes = self.trickAptitudes
while len(aptitudes) - 1 < trickId:
aptitudes.append(0.0)
if aptitudes[trickId] != aptitude:
aptitudes[trickId] = aptitude
if send:
self.b_setTrickAptitudes(aptitudes)
else:
self.setTrickAptitudes(aptitudes, local=1)
def generate(self):
DistributedObjectAI.DistributedObjectAI.generate(self)
self.traits = PetTraits.PetTraits(self.traitSeed, self.safeZone)
print(self.traits.traits)
for i in range(len(self.traitList)):
value = self.traitList[i]
if value == 0.0:
traitName = PetTraits.getTraitNames()[i]
traitValue = self.traits.getTraitValue(traitName)
DistributedPetProxyAI.notify.info("%s: initializing new trait '%s' to %s, seed=%s" % (self.doId,
traitName,
traitValue,
self.traitSeed))
setterName = self.getSetterName(traitName, 'b_set')
self.__dict__[setterName](traitValue)
self.mood = PetMood.PetMood(self)
for mood, value in list(self.requiredMoodComponents.items()):
self.mood.setComponent(mood, value, announce=0)
self.requiredMoodComponents = {}
self.accept(self.mood.getMoodChangeEvent(), self.handleMoodChange)
self.mood.start()
def broadcastDominantMood(self):
self.d_setDominantMood(self.mood.getDominantMood())
def delete(self):
self.ignore(self.mood.getMoodChangeEvent())
self.mood.destroy()
del self.mood
del self.traits
for funcName in self.__funcsToDelete:
del self.__dict__[funcName]
DistributedObjectAI.DistributedObjectAI.delete(self)
def setMoodComponent(self, component, value):
setter = self.getSetterName(component, 'b_set')
self.__dict__[setter](value)
def addToMood(self, component, delta):
value = self.mood.getComponent(component)
value += delta
self.setMoodComponent(component, min(max(value, 0.0), 1.0))
def lerpMood(self, component, factor):
curVal = self.mood.getComponent(component)
if factor < 0:
self.setMoodComponent(component, lerp(curVal, 0.0, -factor))
else:
self.setMoodComponent(component, lerp(curVal, 1.0, factor))
def addToMoods(self, mood2delta):
for mood, delta in list(mood2delta.items()):
self.addToMood(mood, delta)
def lerpMoods(self, mood2factor):
for mood, factor in list(mood2factor.items()):
self.lerpMood(mood, factor)
def isContented(self):
return self.mood.getDominantMood() in PetMood.PetMood.ContentedMoods
def _willDoTrick(self, trickId):
if self.isContented():
minApt = PetTricks.MinActualTrickAptitude
maxApt = PetTricks.MaxActualTrickAptitude
else:
minApt = PetTricks.NonHappyMinActualTrickAptitude
maxApt = PetTricks.NonHappyMaxActualTrickAptitude
randVal = random.random()
cutoff = lerp(minApt, maxApt, self.getTrickAptitude(trickId))
if self.mood.isComponentActive('fatigue'):
cutoff *= 0.5
cutoff *= PetTricks.TrickAccuracies[trickId]
DistributedPetProxyAI.notify.debug('_willDoTrick: %s / %s' % (randVal, cutoff))
return randVal < cutoff
def _handleDidTrick(self, trickId):
DistributedPetProxyAI.notify.debug('_handleDidTrick: %s' % trickId)
if trickId == PetTricks.Tricks.BALK:
return
aptitude = self.getTrickAptitude(trickId)
self.setTrickAptitude(trickId, aptitude + PetTricks.AptitudeIncrementDidTrick)
self.addToMood('fatigue', lerp(PetTricks.MaxTrickFatigue, PetTricks.MinTrickFatigue, aptitude))
self.d_setDominantMood(self.mood.getDominantMood())
def attemptBattleTrick(self, trickId):
self.lerpMoods({'boredom': -.1,
'excitement': 0.05,
'loneliness': -.05})
if self._willDoTrick(trickId):
self._handleDidTrick(trickId)
self.b_setLastSeenTimestamp(self.getCurEpochTimestamp())
return 0
else:
self.b_setLastSeenTimestamp(self.getCurEpochTimestamp())
return 1
def handleMoodChange(self, components = [], distribute = 1):
if len(components) == 0:
components = PetMood.PetMood.Components
if distribute:
if len(components) == len(PetMood.PetMood.Components):
values = []
for comp in PetMood.PetMood.Components:
values.append(self.mood.getComponent(comp))
self.sendUpdate('setMood', values)
else:
for comp in components:
setter = self.getSetterName(comp, 'd_set')
self.__dict__[setter](self.mood.getComponent(comp))
def d_setDominantMood(self, dominantMood):
self.sendUpdate('setDominantMood', [dominantMood])
| [
"[email protected]"
] | |
f1749ee208d511c587005cc25ad3e301acb39f2b | 2b8d4e22d10ca118fba0100cc87af04f3939448f | /ioud/founder_emp_loan/models/hr_loan.py | 90b500c4deff0237b2a3de75f17f118394033899 | [] | no_license | ahmed-amine-ellouze/personal | f10c0a161da709f689a3254ec20486411102a92d | 4fe19ca76523cf274a3a85c8bcad653100ff556f | refs/heads/master | 2023-03-28T23:17:05.402578 | 2021-03-25T13:33:18 | 2021-03-25T13:33:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,219 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo.exceptions import except_orm
from odoo.exceptions import UserError
class HrLoan(models.Model):
_name = 'hr.loan'
_inherit = ['mail.thread', 'mail.activity.mixin']
_description = "Loan Request"
@api.model
def default_get(self, fields):
res = super(HrLoan, self).default_get(fields)
Employee_id = self.env['hr.employee'].search([('user_id','=',self.env.uid)])
if Employee_id:
res['employee_id'] = Employee_id.id
return res
def _compute_loan_amount(self):
total_paid = 0.0
for loan in self:
for line in loan.loan_lines:
if line.status == 'done':
total_paid += line.amount
balance_amount = loan.loan_amount - total_paid
self.total_amount = loan.loan_amount
self.balance_amount = balance_amount
self.total_paid_amount = total_paid
name = fields.Char(string="Loan Name", default="/", readonly=True, track_visibility='onchange')
date = fields.Date(string="Date", default=fields.Date.today(), readonly=True, track_visibility='onchange')
requseter_id = fields.Many2one('res.users', string="Requester", default=lambda self: self.env.user, readonly=True, track_visibility='onchange')
employee_id = fields.Many2one('hr.employee', string="Employee", required=True, copy=False, track_visibility='onchange')
department_id = fields.Many2one(related='employee_id.department_id', string="Department", readonly=True, track_visibility='onchange')
loan_lines = fields.One2many('hr.loan.line', 'loan_id', string="Loan Line", index=True)
company_id = fields.Many2one('res.company', 'Company', readonly=True,
default=lambda self: self.env.user.company_id,
states={'draft': [('readonly', False)]}, track_visibility='onchange')
currency_id = fields.Many2one('res.currency', string='Currency', required=True,
default=lambda self: self.env.user.company_id.currency_id)
job_id = fields.Many2one(related='employee_id.job_idd', string="Job Position", readonly=True, track_visibility='onchange')
loan_amount = fields.Float(string="Loan Amount", required=True, track_visibility='onchange')
total_amount = fields.Float(string="Total Amount", readonly=True, compute='_compute_loan_amount')
balance_amount = fields.Float(string="Balance Amount", compute='_compute_loan_amount')
total_paid_amount = fields.Float(string="Total Paid Amount", compute='_compute_loan_amount')
loan_type_id = fields.Many2one('loan.type',string="Loan Type", copy=False, track_visibility='onchange')
payment_date = fields.Date(string="Installment Date", required=True, default=fields.Date.today(), track_visibility='onchange')
state = fields.Selection([
('draft', 'Draft'),
('to_submit', 'To Submit'),
('in_process', 'In process'),
('approve', 'Approved'),
('reapply', 'Re-Apply'),
('cancelled', 'Cancelled'),
('hold', 'Hold'),
('rejected','Rejected')
], string="State", default='draft', track_visibility='onchange', copy=False)
installment_type = fields.Selection([
('installment_amount', 'Installment Amount'),
('installment_no', 'Installment No.'),
], string="Payment Type", default='installment_amount', track_visibility='onchange', copy=False)
installment = fields.Integer(string="No Of Installments", default=1)
installment_amount = fields.Float(string="Installment Amount")
desciption = fields.Text(string="Description", required=True, copy=False)
@api.constrains('loan_lines')
def action_check_loan_lines(self):
for loan in self:
total = 0.0
for line in loan.loan_lines:
total += line.amount
if loan.loan_amount < total:
raise UserError(
_('Please Check it. loan amount is not more than total of installment.'),
)
def unlink(self):
if self.state not in ['draft']:
raise UserError(
_('You can delete only draft status records...'),
)
return super(HrLoan, self).unlink(values)
@api.model
def create(self, values):
values['name'] = self.env['ir.sequence'].get('hr.loan.seq') or ' '
res = super(HrLoan, self).create(values)
return res
def action_reject(self):
self.state = 'rejected'
def action_reapply(self):
self.state = 'reapply'
def action_submit(self):
self.state = 'to_submit'
self.compute_installment()
@api.onchange('employee_id')
def _onchange_employee_id(self):
for emp in self:
emp.department_id = emp.employee_id.department_id.id if emp.employee_id.department_id else False
emp.job_id = emp.employee_id.job_idd.id if emp.employee_id.job_idd else False
def action_inprocess(self):
self.state = 'in_process'
def action_approved(self):
if len(self.loan_lines.ids) > 1:
self.state = 'approve'
else:
raise UserError(
_('You can not approve...check Installments line'),
)
def action_cancel(self):
self.state = 'cancelled'
self.loan_lines.unlink()
def compute_installment(self):
"""This automatically create the installment the employee need to pay to
company based on payment start date and the no of installments.
"""
for loan in self:
loan.loan_lines.unlink()
amount = 0.0
installment = 1
TotalLastAmount = 0.0
date_start = loan.payment_date
if loan.installment_type == 'installment_no':
amount = loan.loan_amount / loan.installment
installment = loan.installment
else:
amount = loan.installment_amount
installment = loan.loan_amount / loan.installment_amount
if installment == len(self.loan_lines):
raise except_orm('Error!', 'Line Already Filled')
else:
for i in range(1, int(installment) + 1):
self.env['hr.loan.line'].create({
'date': date_start,
'amount': amount,
'employee_id': loan.employee_id.id,
'loan_id': loan.id,
'loan_type_id' : loan.loan_type_id.id,
'installment_type':loan.installment_type,
'desciption' : str(loan.desciption) + '-' + str(date_start)})
date_start = date_start + relativedelta(months=1)
#Last Payment Amonuts CA
for line in loan.loan_lines:
TotalLastAmount += line.amount
if (loan.loan_amount - TotalLastAmount) > 0:
self.env['hr.loan.line'].create({
'date': date_start,
'amount': loan.loan_amount - TotalLastAmount,
'employee_id': loan.employee_id.id,
'loan_id': loan.id,
'loan_type_id' : loan.loan_type_id.id,
'installment_type':loan.installment_type,
'desciption' : str(loan.desciption) + '-' + str(date_start)})
return True
class InstallmentLine(models.Model):
_name = "hr.loan.line"
_description = "Installment Line"
_rec_name = "desciption"
_order = 'date desc'
@api.model
def default_get(self, fields):
res = super(InstallmentLine, self).default_get(fields)
LoanObject = self.env['hr.loan'].browse(self._context.get('default_loan_id'))
res['employee_id'] = LoanObject.employee_id.id
total = 0.0
for line in LoanObject.loan_lines:
total += line.amount
res['amount'] = LoanObject.loan_amount - total
return res
date = fields.Date(string="Payment Date", required=True)
employee_id = fields.Many2one('hr.employee', string="Employee")
amount = fields.Float(string="Amount", required=True)
status = fields.Selection([('pending','Pending'),('done','Done'),('hold', 'Hold')],string="Status", default="pending")
loan_id = fields.Many2one('hr.loan', string="Loan Ref.")
payslip_id = fields.Many2one('hr.payslip', string="Payslip Ref.")
loan_type_id = fields.Many2one('loan.type',string="Loan Type")
desciption = fields.Text(string="Description")
installment_type = fields.Selection([
('installment_amount', 'Installment Amount'),
('installment_no', 'Installment No.'),
], string="Payment Type", copy=False)
@api.onchange('date')
def _onchange_date(self):
for emp in self:
emp.desciption = str(emp.loan_id.desciption) + '-' + str(emp.date)
@api.model
def create(self, values):
LoanObject = self.env['hr.loan'].browse(int(values.get('loan_id')))
desciption = LoanObject.desciption
date = values.get('date')
if date:
values['desciption'] = str(desciption) + '-' + str(date)
return super(InstallmentLine, self).create(values)
def write(self, values):
desciption = self.loan_id.desciption
date = values.get('date')
if date:
values['desciption'] = str(desciption) + '-' + str(date)
return super(InstallmentLine, self).write(values)
class HrEmployee(models.Model):
_inherit = "hr.employee"
loan_count = fields.Integer(string="Loan Count", compute='_compute_employee_loans')
@api.depends('name')
def _compute_employee_loans(self):
"""This compute the loan amount and total loans count of an employee.
"""
for rec in self:
rec.loan_count = self.env['hr.loan'].search_count([('employee_id', '=', rec.id)])
| [
"[email protected]"
] | |
ecbf157c9dc7b470bcd997729fb7a638e168fd37 | bbbfc82402604389fb54136af421e3a41773b2e4 | /third_party/nucleus/io/python/bed_writer_wrap_test.py | d0e7ffc8c620b44786cc67879a21a5892e10812e | [
"BSL-1.0",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | ruif2009/deepvariant | 0a75dcc66a09c3fb8dd5b40a3bf80a5ec610d2d2 | c7fd07016577c253f81ef253aed65c416e4c0ef7 | refs/heads/master | 2020-03-20T15:16:25.918599 | 2018-08-29T17:33:59 | 2018-08-29T17:33:59 | 137,508,815 | 0 | 0 | BSD-3-Clause | 2018-06-15T16:45:39 | 2018-06-15T16:17:25 | Python | UTF-8 | Python | false | false | 4,245 | py | # Copyright 2018 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for BedWriter CLIF python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow as tf
from third_party.nucleus.io.python import bed_writer
from third_party.nucleus.protos import bed_pb2
from third_party.nucleus.testing import test_utils
from third_party.nucleus.util import io_utils
_DOUBLE_CLOSE_ERROR = 'Cannot close an already closed BedWriter'
_WRITE_TO_CLOSED_ERROR = 'Cannot write to closed BED stream'
class WrapBedWriterTest(parameterized.TestCase):
def setUp(self):
out_fname = test_utils.test_tmpfile('output.bed')
self.writer = bed_writer.BedWriter.to_file(
out_fname, bed_pb2.BedHeader(num_fields=12), bed_pb2.BedWriterOptions())
self.expected_bed_content = [
'chr1\t10\t20\tfirst\t100\t+\t12\t18\t255,124,1\t3\t2,6,2\t10,12,18\n',
'chr1\t100\t200\tsecond\t250\t.\t120\t180\t252,122,12\t2\t35,40\t'
'100,160\n'
]
self.record = bed_pb2.BedRecord(
reference_name='chr1', start=20, end=30, name='r')
def test_writing_canned_records(self):
"""Tests writing all the records that are 'canned' in our tfrecord file."""
# This file is in TFRecord format.
tfrecord_file = test_utils.genomics_core_testdata(
'test_regions.bed.tfrecord')
header = bed_pb2.BedHeader(num_fields=12)
writer_options = bed_pb2.BedWriterOptions()
bed_records = list(
io_utils.read_tfrecords(tfrecord_file, proto=bed_pb2.BedRecord))
out_fname = test_utils.test_tmpfile('output.bed')
with bed_writer.BedWriter.to_file(out_fname, header,
writer_options) as writer:
for record in bed_records:
writer.write(record)
with tf.gfile.GFile(out_fname, 'r') as f:
self.assertEqual(f.readlines(), self.expected_bed_content)
def test_context_manager(self):
with self.writer:
# Writing within the context manager succeeds.
self.assertIsNone(self.writer.write(self.record))
# self.writer should be closed, so writing again will fail.
with self.assertRaisesRegexp(ValueError, _WRITE_TO_CLOSED_ERROR):
self.writer.write(self.record)
def test_double_context_manager(self):
with self.writer:
# Writing within the context manager succeeds.
self.assertIsNone(self.writer.write(self.record))
with self.assertRaisesRegexp(ValueError, _DOUBLE_CLOSE_ERROR):
# Entering the closed writer should be fine.
with self.writer:
pass # We want to raise an error on exit, so nothing to do in context.
if __name__ == '__main__':
absltest.main()
| [
"[email protected]"
] | |
b755c369082d791d6f7a3edce83be38b1fe2d9fc | 467bda4ef6b73138d5a55cad0a588021b337e4cb | /Built-inFunctions/lambda.py | af3c48cc6d800caed1560034d817a4e1d34c5927 | [] | no_license | eBLDR/MasterNotes_Python | c118c269de6d4880158382621c7a604e701c6101 | 0f6baadd5e7398c5e74f62ca45e44aa1ed85def0 | refs/heads/master | 2022-11-06T11:03:33.039026 | 2022-10-24T10:42:27 | 2022-10-24T10:42:27 | 143,124,690 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,561 | py | """
map() and filter() work usually together with lambda functions.
lambda was considered was dropped from Python 2 when migrating to
Python 3, but they finally remained.
All functions created with lambda operator can also be created using the
normal way of defining a function.
"""
# lambda operator is a way to create anonymous functions
# syntax is - lambda argument_lit: expression
add = lambda x, y: x + y
print(add(3, 4))
print('=' * 20)
# lambda with map()
C = [39.2, 36.5, 37.0, 38.1, 40.3] # A list with degrees Celsius
# Creating a list with degrees Fahrenheit
F = list(map(lambda x: x * 9 / 5 + 32, C))
print(F)
# Equivalence creating a function and using list comprehension
def converter(n):
return n * 9 / 5 + 32
F_comp = [converter(x) for x in C]
print(F_comp)
print('=' * 20)
# lambda with filter()
fibonacci = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
odd_fib = list(filter(lambda x: x % 2, fibonacci))
# Remember that bool(int!=0) is True
print('bool(2) is {}'.format(bool(2))) # only bool(0) is False
print(odd_fib)
# equivalence using list comprehension
odd_fib_comp = [x for x in fibonacci if x % 2 != 0]
print(odd_fib_comp)
print('=' * 20)
# lambda with reduce()
from functools import reduce
# Returns the largest value
f = lambda a, b: a if (a > b) else b
print(reduce(f, [47, 11, 42, 102, 13]))
# Returns the sum of all numbers
print(reduce(lambda x, y: x + y, range(1, 101)))
print('=' * 20)
class StringConversionMethod:
"""Class for defining string conversion methods."""
def __init__(self, name, execute_func):
self.name = name
self.execute_func = execute_func
def __str__(self):
return self.name
def apply(self, string_to_convert):
converted_string = ''
try:
converted_string = self.execute_func(string_to_convert)
except Exception as exc:
print('Failed to apply conversion: ' + self.name)
print(exc)
return converted_string
my_rules = [
StringConversionMethod(
'Strip everything after 30 characters',
(lambda x: x[:30])
),
StringConversionMethod(
'Add 10 whitespaces before last character',
(lambda x: x[:-1] + ' ' * 10 + x[-1])
)
]
my_string = 'This is a test string for conversion purposes. Feel free to ' \
'update me if you wish to.'
print('Before conversion: ' + my_string)
for rule in my_rules:
print('Applying rule: ' + rule.name + ' ...')
my_string = rule.apply(my_string)
print('After conversion: ' + my_string)
| [
"[email protected]"
] | |
74a70d271549abfa376c0130e321e0d731200223 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02859/s935054625.py | 63bcf2523f8e10da7b99ce4225db4db57c27ddd3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | #from statistics import median
#import collections
#aa = collections.Counter(a) # list to list || .most_common(2)で最大の2個とりだせるお a[0][0]
from fractions import gcd
from itertools import combinations,permutations,accumulate # (string,3) 3回
#from collections import deque
from collections import deque,defaultdict,Counter
import decimal
import re
#import bisect
#
# d = m - k[i] - k[j]
# if kk[bisect.bisect_right(kk,d) - 1] == d:
#
#
#
# pythonで無理なときは、pypyでやると正解するかも!!
#
#
# my_round_int = lambda x:np.round((x*2 + 1)//2)
# 四捨五入
import sys
sys.setrecursionlimit(10000000)
mod = 10**9 + 7
#mod = 9982443453
def readInts():
return list(map(int,input().split()))
def I():
return int(input())
print(I()**2)
| [
"[email protected]"
] | |
9200017a98fe4cc9d74ac5a65d9724c892c0caef | a3c8651bb991d7fd5ae575c8ce3020241fe15a4d | /venv/lib/python3.8/site-packages/virtualenv/create/debug.py | 38f0cc6211d97f92490ffb75a903b45ad2cb3973 | [] | no_license | chunyboy/test | fefff853c244657a5c33fe415d8552fdfcbb9eb7 | 44233736f86910fa934c4fd0f3e261df2f761a2d | refs/heads/master | 2022-12-13T11:33:06.176114 | 2022-11-29T15:32:01 | 2022-11-29T15:32:01 | 78,964,487 | 2 | 1 | null | 2017-01-14T22:34:28 | 2017-01-14T20:21:54 | Python | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/11/33/a4/7bcc3821bf1f8ae7c01d578e907defd33af3b658f0dd68c6bd9c84d0c9 | [
"[email protected]"
] | |
de11da97a5d29e9dbb28648ff004e32c6becb80a | d1ea426d87f68e512df00a555ea10518372cc300 | /google/cloud/bigquery_storage_v1/services/big_query_write/client.py | d25dbfc6e5150e1ae7645519741ca9c99f6dab05 | [
"Apache-2.0"
] | permissive | googleapis/python-bigquery-storage | a3e1f21c926a2230700c00121ece087576d72b57 | 9550ea666721f5dac5b708e2537de29a9bef7cd9 | refs/heads/main | 2023-08-27T22:51:32.069495 | 2023-08-23T22:56:41 | 2023-08-23T22:56:41 | 226,992,483 | 89 | 40 | Apache-2.0 | 2023-09-14T13:41:24 | 2019-12-10T00:09:08 | Python | UTF-8 | Python | false | false | 47,563 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Iterable,
Iterator,
Sequence,
Tuple,
Type,
Union,
cast,
)
from google.cloud.bigquery_storage_v1 import gapic_version as package_version
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.bigquery_storage_v1.types import storage
from google.cloud.bigquery_storage_v1.types import stream
from google.cloud.bigquery_storage_v1.types import table
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import BigQueryWriteTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import BigQueryWriteGrpcTransport
from .transports.grpc_asyncio import BigQueryWriteGrpcAsyncIOTransport
class BigQueryWriteClientMeta(type):
"""Metaclass for the BigQueryWrite client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[BigQueryWriteTransport]]
_transport_registry["grpc"] = BigQueryWriteGrpcTransport
_transport_registry["grpc_asyncio"] = BigQueryWriteGrpcAsyncIOTransport
def get_transport_class(
cls,
label: Optional[str] = None,
) -> Type[BigQueryWriteTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class BigQueryWriteClient(metaclass=BigQueryWriteClientMeta):
"""BigQuery Write API.
The Write API can be used to write data to BigQuery.
For supplementary information about the Write API, see:
https://cloud.google.com/bigquery/docs/write-api
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "bigquerystorage.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryWriteClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryWriteClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> BigQueryWriteTransport:
"""Returns the transport used by the client instance.
Returns:
BigQueryWriteTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def table_path(
project: str,
dataset: str,
table: str,
) -> str:
"""Returns a fully-qualified table string."""
return "projects/{project}/datasets/{dataset}/tables/{table}".format(
project=project,
dataset=dataset,
table=table,
)
@staticmethod
def parse_table_path(path: str) -> Dict[str, str]:
"""Parses a table path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)/tables/(?P<table>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def write_stream_path(
project: str,
dataset: str,
table: str,
stream: str,
) -> str:
"""Returns a fully-qualified write_stream string."""
return "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}".format(
project=project,
dataset=dataset,
table=table,
stream=stream,
)
@staticmethod
def parse_write_stream_path(path: str) -> Dict[str, str]:
"""Parses a write_stream path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)/tables/(?P<table>.+?)/streams/(?P<stream>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variable is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Optional[Union[str, BigQueryWriteTransport]] = None,
client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the big query write client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, BigQueryWriteTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, BigQueryWriteTransport):
# transport is a BigQueryWriteTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def create_write_stream(
self,
request: Optional[Union[storage.CreateWriteStreamRequest, dict]] = None,
*,
parent: Optional[str] = None,
write_stream: Optional[stream.WriteStream] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> stream.WriteStream:
r"""Creates a write stream to the given table. Additionally, every
table has a special stream named '_default' to which data can be
written. This stream doesn't need to be created using
CreateWriteStream. It is a stream that can be used
simultaneously by any number of clients. Data written to this
stream is considered committed as soon as an acknowledgement is
received.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_storage_v1
def sample_create_write_stream():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.CreateWriteStreamRequest(
parent="parent_value",
)
# Make the request
response = client.create_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1.types.CreateWriteStreamRequest, dict]):
The request object. Request message for ``CreateWriteStream``.
parent (str):
Required. Reference to the table to which the stream
belongs, in the format of
``projects/{project}/datasets/{dataset}/tables/{table}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
write_stream (google.cloud.bigquery_storage_v1.types.WriteStream):
Required. Stream to be created.
This corresponds to the ``write_stream`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.WriteStream:
Information about a single stream
that gets data inside the storage
system.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, write_stream])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a storage.CreateWriteStreamRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, storage.CreateWriteStreamRequest):
request = storage.CreateWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if write_stream is not None:
request.write_stream = write_stream
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_write_stream]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def append_rows(
self,
requests: Optional[Iterator[storage.AppendRowsRequest]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[storage.AppendRowsResponse]:
r"""Appends data to the given stream.
If ``offset`` is specified, the ``offset`` is checked against
the end of stream. The server returns ``OUT_OF_RANGE`` in
``AppendRowsResponse`` if an attempt is made to append to an
offset beyond the current end of the stream or
``ALREADY_EXISTS`` if user provides an ``offset`` that has
already been written to. User can retry with adjusted offset
within the same RPC connection. If ``offset`` is not specified,
append happens at the end of the stream.
The response contains an optional offset at which the append
happened. No offset information will be returned for appends to
a default stream.
Responses are received in the same order in which requests are
sent. There will be one response for each successful inserted
request. Responses may optionally embed error information if the
originating AppendRequest was not successfully processed.
The specifics of when successfully appended data is made visible
to the table are governed by the type of stream:
- For COMMITTED streams (which includes the default stream),
data is visible immediately upon successful append.
- For BUFFERED streams, data is made visible via a subsequent
``FlushRows`` rpc which advances a cursor to a newer offset
in the stream.
- For PENDING streams, data is not made visible until the
stream itself is finalized (via the ``FinalizeWriteStream``
rpc), and the stream is explicitly committed via the
``BatchCommitWriteStreams`` rpc.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_storage_v1
def sample_append_rows():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.AppendRowsRequest(
write_stream="write_stream_value",
)
# This method expects an iterator which contains
# 'bigquery_storage_v1.AppendRowsRequest' objects
# Here we create a generator that yields a single `request` for
# demonstrative purposes.
requests = [request]
def request_generator():
for request in requests:
yield request
# Make the request
stream = client.append_rows(requests=request_generator())
# Handle the response
for response in stream:
print(response)
Args:
requests (Iterator[google.cloud.bigquery_storage_v1.types.AppendRowsRequest]):
The request object iterator. Request message for ``AppendRows``.
Because AppendRows is a bidirectional streaming RPC,
certain parts of the AppendRowsRequest need only be
specified for the first request before switching table
destinations. You can also switch table destinations
within the same connection for the default stream.
The size of a single AppendRowsRequest must be less than
10 MB in size. Requests larger than this return an
error, typically ``INVALID_ARGUMENT``.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
Iterable[google.cloud.bigquery_storage_v1.types.AppendRowsResponse]:
Response message for AppendRows.
"""
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.append_rows]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(()),)
# Send the request.
response = rpc(
requests,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_write_stream(
self,
request: Optional[Union[storage.GetWriteStreamRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> stream.WriteStream:
r"""Gets information about a write stream.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_storage_v1
def sample_get_write_stream():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.GetWriteStreamRequest(
name="name_value",
)
# Make the request
response = client.get_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1.types.GetWriteStreamRequest, dict]):
The request object. Request message for ``GetWriteStreamRequest``.
name (str):
Required. Name of the stream to get, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.WriteStream:
Information about a single stream
that gets data inside the storage
system.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a storage.GetWriteStreamRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, storage.GetWriteStreamRequest):
request = storage.GetWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_write_stream]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def finalize_write_stream(
self,
request: Optional[Union[storage.FinalizeWriteStreamRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.FinalizeWriteStreamResponse:
r"""Finalize a write stream so that no new data can be appended to
the stream. Finalize is not supported on the '_default' stream.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_storage_v1
def sample_finalize_write_stream():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.FinalizeWriteStreamRequest(
name="name_value",
)
# Make the request
response = client.finalize_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1.types.FinalizeWriteStreamRequest, dict]):
The request object. Request message for invoking ``FinalizeWriteStream``.
name (str):
Required. Name of the stream to finalize, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.FinalizeWriteStreamResponse:
Response message for FinalizeWriteStream.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a storage.FinalizeWriteStreamRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, storage.FinalizeWriteStreamRequest):
request = storage.FinalizeWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.finalize_write_stream]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def batch_commit_write_streams(
self,
request: Optional[Union[storage.BatchCommitWriteStreamsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.BatchCommitWriteStreamsResponse:
r"""Atomically commits a group of ``PENDING`` streams that belong to
the same ``parent`` table.
Streams must be finalized before commit and cannot be committed
multiple times. Once a stream is committed, data in the stream
becomes available for read operations.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_storage_v1
def sample_batch_commit_write_streams():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.BatchCommitWriteStreamsRequest(
parent="parent_value",
write_streams=['write_streams_value1', 'write_streams_value2'],
)
# Make the request
response = client.batch_commit_write_streams(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1.types.BatchCommitWriteStreamsRequest, dict]):
The request object. Request message for ``BatchCommitWriteStreams``.
parent (str):
Required. Parent table that all the streams should
belong to, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.BatchCommitWriteStreamsResponse:
Response message for BatchCommitWriteStreams.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a storage.BatchCommitWriteStreamsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, storage.BatchCommitWriteStreamsRequest):
request = storage.BatchCommitWriteStreamsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.batch_commit_write_streams
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def flush_rows(
self,
request: Optional[Union[storage.FlushRowsRequest, dict]] = None,
*,
write_stream: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.FlushRowsResponse:
r"""Flushes rows to a BUFFERED stream.
If users are appending rows to BUFFERED stream, flush operation
is required in order for the rows to become available for
reading. A Flush operation flushes up to any previously flushed
offset in a BUFFERED stream, to the offset specified in the
request.
Flush is not supported on the \_default stream, since it is not
BUFFERED.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import bigquery_storage_v1
def sample_flush_rows():
# Create a client
client = bigquery_storage_v1.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1.FlushRowsRequest(
write_stream="write_stream_value",
)
# Make the request
response = client.flush_rows(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1.types.FlushRowsRequest, dict]):
The request object. Request message for ``FlushRows``.
write_stream (str):
Required. The stream that is the
target of the flush operation.
This corresponds to the ``write_stream`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1.types.FlushRowsResponse:
Respond message for FlushRows.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([write_stream])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a storage.FlushRowsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, storage.FlushRowsRequest):
request = storage.FlushRowsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if write_stream is not None:
request.write_stream = write_stream
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.flush_rows]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("write_stream", request.write_stream),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self) -> "BigQueryWriteClient":
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=package_version.__version__
)
__all__ = ("BigQueryWriteClient",)
| [
"[email protected]"
] | |
4a2b0afd6a771da8a5184cdbf223111f516daaa0 | 4e8876d7b29cf9fb05849da77553b8a7e3783bdc | /src/plugins/processing/tools/dataobjects.py | 4b3e244e3fb0bcdc47fed71680ef34b08476f4c3 | [] | no_license | hydrology-tep/hep-qgis-plugin-lite | 48477f504b6fc1a9a9446c7c7f5666f4b2ccfee7 | 781cbaa1b3e9331de6741dd44a22322048ab176c | refs/heads/master | 2021-03-27T17:01:18.284421 | 2018-06-27T12:09:58 | 2018-06-27T12:09:58 | 70,825,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,292 | py | # -*- coding: utf-8 -*-
"""
***************************************************************************
dataobject.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import re
from qgis.core import QGis, QgsProject, QgsVectorFileWriter, QgsMapLayer, QgsRasterLayer, \
QgsVectorLayer, QgsMapLayerRegistry, QgsCoordinateReferenceSystem
from qgis.gui import QgsSublayersDialog
from qgis.PyQt.QtCore import QSettings
from qgis.utils import iface
from processing.core.ProcessingConfig import ProcessingConfig
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import getTempFilenameInTempFolder, getTempFilename, isWindows
ALL_TYPES = [-1]
_loadedLayers = {}
def resetLoadedLayers():
global _loadedLayers
_loadedLayers = {}
def getSupportedOutputVectorLayerExtensions():
formats = QgsVectorFileWriter.supportedFiltersAndFormats()
exts = ['shp'] # shp is the default, should be the first
for extension in formats.keys():
extension = unicode(extension)
extension = extension[extension.find('*.') + 2:]
extension = extension[:extension.find(' ')]
if extension.lower() != 'shp':
exts.append(extension)
return exts
def getSupportedOutputRasterLayerExtensions():
allexts = ['tif']
for exts in GdalUtils.getSupportedRasters().values():
for ext in exts:
if ext not in allexts:
allexts.append(ext)
return allexts
def getSupportedOutputTableExtensions():
exts = ['csv']
return exts
def getRasterLayers(sorting=True):
layers = QgsProject.instance().layerTreeRoot().findLayers()
raster = [lay.layer() for lay in layers if lay.layer() is not None and canUseRasterLayer(lay.layer())]
if sorting:
return sorted(raster, key=lambda layer: layer.name().lower())
else:
return raster
def getVectorLayers(shapetype=[-1], sorting=True):
layers = QgsProject.instance().layerTreeRoot().findLayers()
vector = [lay.layer() for lay in layers if canUseVectorLayer(lay.layer(), shapetype)]
if sorting:
return sorted(vector, key=lambda layer: layer.name().lower())
else:
return vector
def canUseVectorLayer(layer, shapetype):
if layer.type() == QgsMapLayer.VectorLayer and layer.dataProvider().name() != "grass":
if (layer.hasGeometryType() and
(shapetype == ALL_TYPES or layer.geometryType() in shapetype)):
return True
return False
def canUseRasterLayer(layer):
if layer.type() == QgsMapLayer.RasterLayer:
if layer.providerType() == 'gdal': # only gdal file-based layers
return True
return False
def getAllLayers():
layers = []
layers += getRasterLayers()
layers += getVectorLayers()
return sorted(layers, key=lambda layer: layer.name().lower())
def getTables(sorting=True):
layers = QgsProject.instance().layerTreeRoot().findLayers()
tables = []
for layer in layers:
mapLayer = layer.layer()
if mapLayer.type() == QgsMapLayer.VectorLayer:
tables.append(mapLayer)
if sorting:
return sorted(tables, key=lambda table: table.name().lower())
else:
return tables
def extent(layers):
first = True
for layer in layers:
if not isinstance(layer, (QgsMapLayer.QgsRasterLayer, QgsMapLayer.QgsVectorLayer)):
layer = getObjectFromUri(layer)
if layer is None:
continue
if first:
xmin = layer.extent().xMinimum()
xmax = layer.extent().xMaximum()
ymin = layer.extent().yMinimum()
ymax = layer.extent().yMaximum()
else:
xmin = min(xmin, layer.extent().xMinimum())
xmax = max(xmax, layer.extent().xMaximum())
ymin = min(ymin, layer.extent().yMinimum())
ymax = max(ymax, layer.extent().yMaximum())
first = False
if first:
return '0,0,0,0'
else:
return unicode(xmin) + ',' + unicode(xmax) + ',' + unicode(ymin) + ',' + unicode(ymax)
def loadList(layers):
for layer in layers:
load(layer)
def load(fileName, name=None, crs=None, style=None):
"""Loads a layer/table into the current project, given its file.
"""
if fileName is None:
return
prjSetting = None
settings = QSettings()
if crs is not None:
prjSetting = settings.value('/Projections/defaultBehaviour')
settings.setValue('/Projections/defaultBehaviour', '')
if name is None:
name = os.path.split(fileName)[1]
qgslayer = QgsVectorLayer(fileName, name, 'ogr')
if qgslayer.isValid():
if crs is not None and qgslayer.crs() is None:
qgslayer.setCrs(crs, False)
if style is None:
if qgslayer.geometryType() == QGis.Point:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_POINT_STYLE)
elif qgslayer.geometryType() == QGis.Line:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_LINE_STYLE)
else:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_POLYGON_STYLE)
qgslayer.loadNamedStyle(style)
QgsMapLayerRegistry.instance().addMapLayers([qgslayer])
else:
qgslayer = QgsRasterLayer(fileName, name)
if qgslayer.isValid():
if crs is not None and qgslayer.crs() is None:
qgslayer.setCrs(crs, False)
if style is None:
style = ProcessingConfig.getSetting(ProcessingConfig.RASTER_STYLE)
qgslayer.loadNamedStyle(style)
QgsMapLayerRegistry.instance().addMapLayers([qgslayer])
iface.legendInterface().refreshLayerSymbology(qgslayer)
else:
if prjSetting:
settings.setValue('/Projections/defaultBehaviour', prjSetting)
raise RuntimeError('Could not load layer: ' + unicode(fileName)
+ '\nCheck the processing framework log to look for errors')
if prjSetting:
settings.setValue('/Projections/defaultBehaviour', prjSetting)
return qgslayer
def getObjectFromName(name):
layers = getAllLayers()
for layer in layers:
if layer.name() == name:
return layer
def getObject(uriorname):
ret = getObjectFromName(uriorname)
if ret is None:
ret = getObjectFromUri(uriorname)
return ret
def normalizeLayerSource(source):
if isWindows():
source = source.replace('\\', '/')
source = source.replace('"', "'")
return source
def getObjectFromUri(uri, forceLoad=True):
"""Returns an object (layer/table) given a source definition.
if forceLoad is true, it tries to load it if it is not currently open
Otherwise, it will return the object only if it is loaded in QGIS.
"""
if uri is None:
return None
if uri in _loadedLayers:
return _loadedLayers[uri]
layers = getRasterLayers()
for layer in layers:
if normalizeLayerSource(layer.source()) == normalizeLayerSource(uri):
return layer
layers = getVectorLayers()
for layer in layers:
if normalizeLayerSource(layer.source()) == normalizeLayerSource(uri):
return layer
tables = getTables()
for table in tables:
if normalizeLayerSource(table.source()) == normalizeLayerSource(uri):
return table
if forceLoad:
settings = QSettings()
prjSetting = settings.value('/Projections/defaultBehaviour')
settings.setValue('/Projections/defaultBehaviour', '')
# If is not opened, we open it
name = os.path.basename(uri)
for provider in ['ogr', 'postgres', 'spatialite', 'virtual']:
layer = QgsVectorLayer(uri, name, provider)
if layer.isValid():
if prjSetting:
settings.setValue('/Projections/defaultBehaviour', prjSetting)
_loadedLayers[normalizeLayerSource(layer.source())] = layer
return layer
layer = QgsRasterLayer(uri, name)
if layer.isValid():
if prjSetting:
settings.setValue('/Projections/defaultBehaviour', prjSetting)
_loadedLayers[normalizeLayerSource(layer.source())] = layer
return layer
if prjSetting:
settings.setValue('/Projections/defaultBehaviour', prjSetting)
else:
return None
def exportVectorLayer(layer, supported=None):
"""Takes a QgsVectorLayer and returns the filename to refer to it,
which allows external apps which support only file-based layers to
use it. It performs the necessary export in case the input layer
is not in a standard format suitable for most applications, it is
a remote one or db-based (non-file based) one, or if there is a
selection and it should be used, exporting just the selected
features.
Currently, the output is restricted to shapefiles, so anything
that is not in a shapefile will get exported. It also export to
a new file if the original one contains non-ascii characters.
"""
supported = supported or ["shp"]
settings = QSettings()
systemEncoding = settings.value('/UI/encoding', 'System')
output = getTempFilename('shp')
provider = layer.dataProvider()
useSelection = ProcessingConfig.getSetting(ProcessingConfig.USE_SELECTED)
if useSelection and layer.selectedFeatureCount() != 0:
writer = QgsVectorFileWriter(output, systemEncoding,
layer.pendingFields(),
provider.geometryType(), layer.crs())
selection = layer.selectedFeatures()
for feat in selection:
writer.addFeature(feat)
del writer
return output
else:
isASCII = True
try:
unicode(layer.source()).decode('ascii')
except UnicodeEncodeError:
isASCII = False
ext = os.path.splitext(layer.source())[-1].lower()[1:]
if ext not in supported or not isASCII:
writer = QgsVectorFileWriter(
output, systemEncoding,
layer.pendingFields(), provider.geometryType(),
layer.crs()
)
for feat in layer.getFeatures():
writer.addFeature(feat)
del writer
return output
else:
return unicode(layer.source())
def exportRasterLayer(layer):
"""Takes a QgsRasterLayer and returns the filename to refer to it,
which allows external apps which support only file-based layers to
use it. It performs the necessary export in case the input layer
is not in a standard format suitable for most applications, it is
a remote one or db-based (non-file based) one.
Currently, the output is restricted to geotiff, but not all other
formats are exported. Only those formats not supported by GDAL are
exported, so it is assumed that the external app uses GDAL to read
the layer.
"""
# TODO: Do the conversion here
return unicode(layer.source())
def exportTable(table):
"""Takes a QgsVectorLayer and returns the filename to refer to its
attributes table, which allows external apps which support only
file-based layers to use it.
It performs the necessary export in case the input layer is not in
a standard format suitable for most applications, it isa remote
one or db-based (non-file based) one.
Currently, the output is restricted to DBF. It also export to a new
file if the original one contains non-ascii characters.
"""
settings = QSettings()
systemEncoding = settings.value('/UI/encoding', 'System')
output = getTempFilename()
provider = table.dataProvider()
isASCII = True
try:
unicode(table.source()).decode('ascii')
except UnicodeEncodeError:
isASCII = False
isDbf = unicode(table.source()).endswith('dbf') \
or unicode(table.source()).endswith('shp')
if not isDbf or not isASCII:
writer = QgsVectorFileWriter(output, systemEncoding,
provider.fields(), QGis.WKBNoGeometry,
QgsCoordinateReferenceSystem('4326'))
for feat in table.getFeatures():
writer.addFeature(feat)
del writer
return output + '.dbf'
else:
filename = unicode(table.source())
if unicode(table.source()).endswith('shp'):
return filename[:-3] + 'dbf'
else:
return filename
def getRasterSublayer(path, param):
layer = QgsRasterLayer(path)
try:
# If the layer is a raster layer and has multiple sublayers, let the user chose one.
# Based on QgisApp::askUserForGDALSublayers
if layer and param.showSublayersDialog and layer.dataProvider().name() == "gdal" and len(layer.subLayers()) > 1:
layers = []
subLayerNum = 0
# simplify raster sublayer name
for subLayer in layer.subLayers():
# if netcdf/hdf use all text after filename
if bool(re.match('netcdf', subLayer, re.I)) or bool(re.match('hdf', subLayer, re.I)):
subLayer = subLayer.split(path)[1]
subLayer = subLayer[1:]
else:
# remove driver name and file name
subLayer.replace(subLayer.split(":")[0], "")
subLayer.replace(path, "")
# remove any : or " left over
if subLayer.startswith(":"):
subLayer = subLayer[1:]
if subLayer.startswith("\""):
subLayer = subLayer[1:]
if subLayer.endswith(":"):
subLayer = subLayer[:-1]
if subLayer.endswith("\""):
subLayer = subLayer[:-1]
layers.append(unicode(subLayerNum) + "|" + subLayer)
subLayerNum = subLayerNum + 1
# Use QgsSublayersDialog
# Would be good if QgsSublayersDialog had an option to allow only one sublayer to be selected
chooseSublayersDialog = QgsSublayersDialog(QgsSublayersDialog.Gdal, "gdal")
chooseSublayersDialog.populateLayerTable(layers, "|")
if chooseSublayersDialog.exec_():
return layer.subLayers()[chooseSublayersDialog.selectionIndexes()[0]]
else:
# If user pressed cancel then just return the input path
return path
else:
# If the sublayers selection dialog is not to be shown then just return the input path
return path
except:
# If the layer is not a raster layer, then just return the input path
return path
| [
"[email protected]"
] | |
346dab3b4f521560c406a72391886bc26d742ca6 | febb7a4b889c2f40637e2b688eb770cf0809226f | /fython/test/importpec_fort/fort_slice_import_test.py | f267833fb682e1cdab2266b4ad94ed42a1be4a25 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | nicolasessisbreton/fython | 68253552c626640b5efc2a7cea9384c8e0425c08 | 988f5a94cee8b16b0000501a22239195c73424a1 | refs/heads/master | 2021-01-10T07:10:06.793158 | 2017-08-25T17:27:05 | 2017-08-25T17:27:05 | 50,076,320 | 48 | 3 | null | 2016-08-21T17:16:12 | 2016-01-21T02:30:31 | Python | UTF-8 | Python | false | false | 351 | py | s="""
.a.f90
module a
integer :: x_a = 10
integer :: y_a = 20
end module
.b.fy
import .a(
x_a,
y_a = y,
)
int x = 1
print 'in b {:x} {:x_a} {:y}'
"""
from fython.test import *
shell('rm -rf a/ a.* b.* c.*')
writer(s)
w = load('.b', force=1, release=1, verbose=0, run_main=0)
# print(open(w.module.url.fortran_path, 'r').read())
| [
"[email protected]"
] | |
f40d8834fbc04247849b75f9d35c610536b825fc | af7466d6abfcce9e02efe91abe1875fbcf8d04aa | /tests/test_handlers.py | 08958e97ccff36b29be6c13a70d7d8ae978fcf34 | [] | no_license | NYPL/sfr-oclc-catalog-lookup | eb1472d1a6cab85734b4c0ac6648de846e5b00fb | 4bf3bde518211870d6c20cde840c57bd83c1816c | refs/heads/development | 2020-04-15T07:45:59.184860 | 2020-02-05T21:22:32 | 2020-02-05T21:22:32 | 164,501,003 | 1 | 1 | null | 2020-02-05T21:22:34 | 2019-01-07T21:43:14 | Python | UTF-8 | Python | false | false | 2,023 | py | import json
import unittest
from unittest.mock import patch, mock_open, call
# Set this variable here as it gets checked at loadtime
import os
os.environ['OUTPUT_REGION'] = 'us-test-1'
from service import handler
from helpers.errorHelpers import NoRecordsReceived, OCLCError, DataError
from lib.outPutManager import OutputManager
class TestHandler(unittest.TestCase):
@patch('service.fetchData', return_value='oclcResponse')
@patch.object(OutputManager, 'formatResponse', return_value='outObject')
def test_handler_clean(self, mockResponse, mockFetch):
testRec = {
'queryStringParameters': {
'identifier': '000000000',
'type': 'oclc'
}
}
resp = handler(testRec, None)
self.assertEqual(resp, 'outObject')
mockFetch.assert_called_once_with('000000000', 'oclc')
mockResponse.assert_called_once_with(200, 'oclcResponse')
@patch.object(OutputManager, 'formatResponse', return_value='outObject')
def test_handler_error_bad_parameters(self, mockResponse):
testRec = {
'queryStringParameters': {
'identifier': '000000000',
}
}
resp = handler(testRec, None)
self.assertEqual(resp, 'outObject')
mockResponse.assert_called_once_with(
400,
{'message': 'GET query must include identifier and type'}
)
@patch('service.fetchData', side_effect=OCLCError('Test Error'))
@patch.object(OutputManager, 'formatResponse', return_value='outObject')
def test_handler_internal_error(self, mockResponse, mockFetch):
testRec = {
'queryStringParameters': {
'identifier': '000000000',
'type': 'oclc'
}
}
resp = handler(testRec, None)
self.assertEqual(resp, 'outObject')
mockFetch.assert_called_once_with('000000000', 'oclc')
mockResponse.assert_called_once_with(500, {'message': 'Test Error'})
| [
"[email protected]"
] | |
4f89c0d7604b00adf91883ce24a5ba05046c75d8 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/powerplatform/enterprise_policy.py | c047284e41d10afb065e0322ff9338fa8b28705c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 16,325 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['EnterprisePolicyArgs', 'EnterprisePolicy']
@pulumi.input_type
class EnterprisePolicyArgs:
def __init__(__self__, *,
kind: pulumi.Input[Union[str, 'EnterprisePolicyKind']],
resource_group_name: pulumi.Input[str],
encryption: Optional[pulumi.Input['PropertiesEncryptionArgs']] = None,
enterprise_policy_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['EnterprisePolicyIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
lockbox: Optional[pulumi.Input['PropertiesLockboxArgs']] = None,
network_injection: Optional[pulumi.Input['PropertiesNetworkInjectionArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a EnterprisePolicy resource.
:param pulumi.Input[Union[str, 'EnterprisePolicyKind']] kind: The kind (type) of Enterprise Policy.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input['PropertiesEncryptionArgs'] encryption: The encryption settings for a configuration store.
:param pulumi.Input[str] enterprise_policy_name: Name of the EnterprisePolicy.
:param pulumi.Input['EnterprisePolicyIdentityArgs'] identity: The identity of the EnterprisePolicy.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input['PropertiesLockboxArgs'] lockbox: Settings concerning lockbox.
:param pulumi.Input['PropertiesNetworkInjectionArgs'] network_injection: Settings concerning network injection.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if enterprise_policy_name is not None:
pulumi.set(__self__, "enterprise_policy_name", enterprise_policy_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if lockbox is not None:
pulumi.set(__self__, "lockbox", lockbox)
if network_injection is not None:
pulumi.set(__self__, "network_injection", network_injection)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[Union[str, 'EnterprisePolicyKind']]:
"""
The kind (type) of Enterprise Policy.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[Union[str, 'EnterprisePolicyKind']]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input['PropertiesEncryptionArgs']]:
"""
The encryption settings for a configuration store.
"""
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input['PropertiesEncryptionArgs']]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter(name="enterprisePolicyName")
def enterprise_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the EnterprisePolicy.
"""
return pulumi.get(self, "enterprise_policy_name")
@enterprise_policy_name.setter
def enterprise_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "enterprise_policy_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['EnterprisePolicyIdentityArgs']]:
"""
The identity of the EnterprisePolicy.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['EnterprisePolicyIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def lockbox(self) -> Optional[pulumi.Input['PropertiesLockboxArgs']]:
"""
Settings concerning lockbox.
"""
return pulumi.get(self, "lockbox")
@lockbox.setter
def lockbox(self, value: Optional[pulumi.Input['PropertiesLockboxArgs']]):
pulumi.set(self, "lockbox", value)
@property
@pulumi.getter(name="networkInjection")
def network_injection(self) -> Optional[pulumi.Input['PropertiesNetworkInjectionArgs']]:
"""
Settings concerning network injection.
"""
return pulumi.get(self, "network_injection")
@network_injection.setter
def network_injection(self, value: Optional[pulumi.Input['PropertiesNetworkInjectionArgs']]):
pulumi.set(self, "network_injection", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class EnterprisePolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['PropertiesEncryptionArgs']]] = None,
enterprise_policy_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['EnterprisePolicyIdentityArgs']]] = None,
kind: Optional[pulumi.Input[Union[str, 'EnterprisePolicyKind']]] = None,
location: Optional[pulumi.Input[str]] = None,
lockbox: Optional[pulumi.Input[pulumi.InputType['PropertiesLockboxArgs']]] = None,
network_injection: Optional[pulumi.Input[pulumi.InputType['PropertiesNetworkInjectionArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Definition of the EnterprisePolicy.
API Version: 2020-10-30-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PropertiesEncryptionArgs']] encryption: The encryption settings for a configuration store.
:param pulumi.Input[str] enterprise_policy_name: Name of the EnterprisePolicy.
:param pulumi.Input[pulumi.InputType['EnterprisePolicyIdentityArgs']] identity: The identity of the EnterprisePolicy.
:param pulumi.Input[Union[str, 'EnterprisePolicyKind']] kind: The kind (type) of Enterprise Policy.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[pulumi.InputType['PropertiesLockboxArgs']] lockbox: Settings concerning lockbox.
:param pulumi.Input[pulumi.InputType['PropertiesNetworkInjectionArgs']] network_injection: Settings concerning network injection.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EnterprisePolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Definition of the EnterprisePolicy.
API Version: 2020-10-30-preview.
:param str resource_name: The name of the resource.
:param EnterprisePolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EnterprisePolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['PropertiesEncryptionArgs']]] = None,
enterprise_policy_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['EnterprisePolicyIdentityArgs']]] = None,
kind: Optional[pulumi.Input[Union[str, 'EnterprisePolicyKind']]] = None,
location: Optional[pulumi.Input[str]] = None,
lockbox: Optional[pulumi.Input[pulumi.InputType['PropertiesLockboxArgs']]] = None,
network_injection: Optional[pulumi.Input[pulumi.InputType['PropertiesNetworkInjectionArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EnterprisePolicyArgs.__new__(EnterprisePolicyArgs)
__props__.__dict__["encryption"] = encryption
__props__.__dict__["enterprise_policy_name"] = enterprise_policy_name
__props__.__dict__["identity"] = identity
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = kind
__props__.__dict__["location"] = location
__props__.__dict__["lockbox"] = lockbox
__props__.__dict__["network_injection"] = network_injection
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:powerplatform:EnterprisePolicy"), pulumi.Alias(type_="azure-native:powerplatform/v20201030preview:EnterprisePolicy"), pulumi.Alias(type_="azure-nextgen:powerplatform/v20201030preview:EnterprisePolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(EnterprisePolicy, __self__).__init__(
'azure-native:powerplatform:EnterprisePolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'EnterprisePolicy':
"""
Get an existing EnterprisePolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = EnterprisePolicyArgs.__new__(EnterprisePolicyArgs)
__props__.__dict__["encryption"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["lockbox"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_injection"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return EnterprisePolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def encryption(self) -> pulumi.Output[Optional['outputs.PropertiesResponseEncryption']]:
"""
The encryption settings for a configuration store.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.EnterprisePolicyIdentityResponse']]:
"""
The identity of the EnterprisePolicy.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
The kind (type) of Enterprise Policy.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def lockbox(self) -> pulumi.Output[Optional['outputs.PropertiesResponseLockbox']]:
"""
Settings concerning lockbox.
"""
return pulumi.get(self, "lockbox")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInjection")
def network_injection(self) -> pulumi.Output[Optional['outputs.PropertiesResponseNetworkInjection']]:
"""
Settings concerning network injection.
"""
return pulumi.get(self, "network_injection")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
9b2c0ade2bac3fb04dd982ba529d79dd7992559d | 5db0fab37c2b8a618d85d3b60fab9f806c416474 | /src/python/pants/backend/codegen/export_codegen_goal_test.py | 98f758e8e1525846c7e650945edeebd056882484 | [
"Apache-2.0"
] | permissive | pantsbuild/pants | 4988d1ac5474ec95f94ce2218aeb759401e4b011 | 98cbda8545f0d58c586ed2daa76fefd729d5e0d5 | refs/heads/main | 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 | Apache-2.0 | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | Python | UTF-8 | Python | false | false | 3,804 | py | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pathlib import Path
import pytest
from pants.backend.codegen.export_codegen_goal import ExportCodegen
from pants.backend.codegen.export_codegen_goal import rules as write_codegen_rules
from pants.core.target_types import FileSourceField, ResourceSourceField
from pants.core.util_rules import distdir
from pants.engine.fs import CreateDigest, FileContent, Snapshot
from pants.engine.rules import Get, rule
from pants.engine.target import (
GeneratedSources,
GenerateSourcesRequest,
MultipleSourcesField,
SingleSourceField,
Target,
)
from pants.engine.unions import UnionRule
from pants.testutil.rule_runner import RuleRunner
class Gen1Sources(MultipleSourcesField):
pass
class Gen2Sources(SingleSourceField):
pass
class Gen1Target(Target):
alias = "gen1"
core_fields = (Gen1Sources,)
class Gen2Target(Target):
alias = "gen2"
core_fields = (Gen2Sources,)
class Gen1Request(GenerateSourcesRequest):
input = Gen1Sources
output = FileSourceField
class Gen2Request(GenerateSourcesRequest):
input = Gen2Sources
output = ResourceSourceField
class GenNoExportRequest(GenerateSourcesRequest):
"""The presence of this generator is simply to verify that is not used when running the export-
codegen goal."""
input = Gen1Sources
output = Gen2Sources
exportable = False
class Gen1DuplicatedRequest(GenerateSourcesRequest):
input = Gen1Sources
output = ResourceSourceField
@rule
async def gen1(_: Gen1Request) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("assets/README.md", b"Hello!")]))
return GeneratedSources(result)
@rule
async def gen2(_: Gen2Request) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("src/haskell/app.hs", b"10 * 4")]))
return GeneratedSources(result)
@rule
async def gen_no_export(_: GenNoExportRequest) -> GeneratedSources:
assert False, "Should not ever get here as `GenNoExportRequest.exportable==False`"
@rule
async def gen1_duplicated(_: Gen1DuplicatedRequest) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("assets/DUPLICATED.md", b"Hello!")]))
return GeneratedSources(result)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*write_codegen_rules(),
gen1,
gen2,
gen_no_export,
gen1_duplicated,
UnionRule(GenerateSourcesRequest, Gen1Request),
UnionRule(GenerateSourcesRequest, Gen2Request),
UnionRule(GenerateSourcesRequest, GenNoExportRequest),
UnionRule(GenerateSourcesRequest, Gen1DuplicatedRequest),
*distdir.rules(),
],
target_types=[Gen1Target, Gen2Target],
)
def test_no_codegen_targets(rule_runner: RuleRunner, caplog) -> None:
result = rule_runner.run_goal_rule(ExportCodegen)
assert result.exit_code == 0
assert len(caplog.records) == 1
assert "No codegen files/targets matched. All codegen target types: gen1, gen2" in caplog.text
def test_export_codegen(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{"BUILD": "gen1(name='gen1')\ngen2(name='gen2', source='foo.ext')\n", "foo.ext": ""}
)
result = rule_runner.run_goal_rule(ExportCodegen, args=["::"])
assert result.exit_code == 0
parent_dir = Path(rule_runner.build_root, "dist", "codegen")
assert (parent_dir / "assets" / "README.md").read_text() == "Hello!"
assert (parent_dir / "assets" / "DUPLICATED.md").read_text() == "Hello!"
assert (parent_dir / "src" / "haskell" / "app.hs").read_text() == "10 * 4"
| [
"[email protected]"
] | |
133365e71eb1d8396eba4ae1e26a95f054b6856d | bd5e611a2d177b3f3ca58965b53e8ce0e6d2000b | /assignments/A5/A5Part3.py | 1ae4028afbeec415b763cf726b263acd06b4652a | [] | no_license | jiemojiemo/audio_signal_processing_assignments | 680f921ad8984d1869f10fab0eae183ef19cb808 | 0d26ada375c3a54fe9dda3e490880168a17769a2 | refs/heads/master | 2020-04-04T14:07:36.649933 | 2018-11-09T13:50:26 | 2018-11-09T13:50:26 | 155,987,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,403 | py | import numpy as np
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../software/models/'))
import stft
import utilFunctions as UF
import sineModel as SM
import matplotlib.pyplot as plt
"""
A5-Part-3: Tracking sinusoids of different amplitudes
Perform a "good" sinusoidal analysis of a signal with two sinusoidal components of different amplitudes
by specifying the parameters 'window type' and 'peak picking threshold' in the function mainlobeTracker()
below. The function should return the parameters used, true and estimated tracks of frequency components,
and their associated time stamps.
We will consider a signal that has two sinusoidal components with a very large difference in their
amplitude. We will use a synthetically generated signal with frequency components 440 Hz and 602 Hz,
s = sin(2*pi*440*t) + 2e-3*sin(2*pi*602*t). As you see the amplitude difference is large. You will
use the sound sines-440-602-hRange.wav. Listen to the sound and use sms-tools GUI or sonic visualizer
to see its spectrogram. Notice the difference in the amplitudes of its components.
You will not write any additional code in this question, but modify the parameters of the code to obtain
the best possible results. There are three functions we have written for you. Please go through each
function and understand it, but do not modify any of it.
1. mainlobeTracker(): This is the main function. Uses sineModel.py for sinusoidal analysis of the input
sound. It takes an input audio file and uses the function sineModel.sineModelAnal(), tracks the mainlobes
of the two sinusoids to obtain the two frequency tracks (fTrackEst) in the signal. It also computes the
estimation error (meanErr) in frequency using the true frequency tracks obtained using genTrueFreqTracks().
mainlobeTracker() calls the following two functions:
2. genTimeStamps(): Generates the time stamps at which the sinuosid frequencies are estimated (one
value per audio frame)
3. genTrueFreqTracks(): For the input sound sines-440-602-hRange.wav, the function generates the true
frequency values, so that we can compare the true and the estimated frequency values.
We will use sinusoidal Model to analyse this sound and extract the two components. We will use the
sineModel.sineModelAnal() function for analysis. The code for analysis is already provided below with
some parameters we have fixed. For analysis, we will use a window length (M) of 2047 samples, an FFT
size (N) of 4096 samples and hop size (H) of 128 samples. For sine tracking, we set the minimum sine
duration (minSineDur) to 0.02 seconds, freqDevOffset to 10 Hz and freqDevSlope to its default value of
0.001. Since we need only two frequency component estimates at every frame, we set maxnSines = 2.
Choose the parameters window and the peak picking threshold (t) such that the mean estimation error of
each frequency components is less than 2 Hz. There is a range of values of M and t for which this is
true and all of those values will be considered correct answers. You can plot the estimated and true
frequency tracks to visualize the accuracy of estimation. The output is the set of parameters you used:
window, t, the time stamps, estimated and the true frequency tracks. Note that choosing the wrong window
might lead to tracking of one of the sidelobes of the high amplitude sinusoid instead of the mainlobe of
the low amplitude sinusoid.
We have written the function mainlobeTracker() and you have to edit the window and t values. For the window, choose
one of 'boxcar', 'hanning', 'hamming', 'blackman', or 'blackmanharris'. t is specified in negative dB. These two
parameters are marked as XX and you can edit the values as needed.
As an example, choosing window = 'boxcar', t = -80.0, the mean estimation error is [0.142, 129.462] Hz.
"""
def mainlobeTracker(inputFile = '../../sounds/sines-440-602-hRange.wav'):
"""
Input:
inputFile (string): wav file including the path
Output:
window (string): The window type used for analysis
t (float) = peak picking threshold (negative dB)
tStamps (numpy array) = A Kx1 numpy array of time stamps at which the frequency components were estimated
fTrackEst = A Kx2 numpy array of estimated frequency values, one row per time frame, one column per component
fTrackTrue = A Kx2 numpy array of true frequency values, one row per time frame, one column per component
"""
# Analysis parameters: Modify values of the parameters marked XX
window = 'blackmanharris' # Window type
t = -80 # threshold (negative dB)
### Go through the code below and understand it, do not modify anything ###
M = 2047 # Window size
N = 4096 # FFT Size
H = 128 # Hop size in samples
maxnSines = 2
minSineDur = 0.02
freqDevOffset = 10
freqDevSlope = 0.001
# read input sound
fs, x = UF.wavread(inputFile)
w = get_window(window, M) # Compute analysis window
tStamps = genTimeStamps(x.size, M, fs, H) # Generate the tStamps to return
# analyze the sound with the sinusoidal model
fTrackEst, mTrackEst, pTrackEst = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
fTrackTrue = genTrueFreqTracks(tStamps) # Generate the true frequency tracks
tailF = 20
# Compute mean estimation error. 20 frames at the beginning and end not used to compute error
meanErr = np.mean(np.abs(fTrackTrue[tailF:-tailF,:] - fTrackEst[tailF:-tailF,:]),axis=0)
print ("Mean estimation error = " + str(meanErr) + ' Hz') # Print the error to terminal
# Plot the estimated and true frequency tracks
mX, pX = stft.stftAnal(x, w, N, H)
maxplotfreq = 900.0
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(tStamps, binFreq, np.transpose(mX[:,:int(N*maxplotfreq/fs+1)]), cmap='hot_r')
plt.plot(tStamps,fTrackTrue, 'o-', color = 'c', linewidth=3.0)
plt.plot(tStamps,fTrackEst, color = 'y', linewidth=2.0)
plt.legend(('True f1', 'True f2', 'Estimated f1', 'Estimated f2'))
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.autoscale(tight=True)
return window, float(t), tStamps, fTrackEst, fTrackTrue # Output returned
### Do not modify this function
def genTimeStamps(xlen, M, fs, H):
# Generates the timeStamps as needed for output
hM1 = int(np.floor((M+1)/2))
hM2 = int(np.floor(M/2))
xlen = xlen + 2*hM2
pin = hM1
pend = xlen - hM1
tStamps = np.arange(pin,pend,H)/float(fs)
return tStamps
### Do not modify this function
def genTrueFreqTracks(tStamps):
# Generates the true frequency values to compute estimation error
# Specifically to sines-440-602-hRange.wav
fTrack = np.zeros((len(tStamps),2))
fTrack[:,0] = np.transpose(440*np.ones((len(tStamps),1)))
fTrack[:,1] = np.transpose(602*np.ones((len(tStamps),1)))
return fTrack
| [
"[email protected]"
] | |
60cc252d5efc1565cc179eb8a6d0416f4faf6e6c | 422e3fb2aeaa9853ba6ac809bc3d0f1ee507ae27 | /funcionario/urls.py | 20fd28ee4e17eb598a1defa4a92c7f3292785a4c | [] | no_license | redcliver/oldRodao | e279343de24952b7b86df50ff07f34be83881db6 | 0acc4440f9b7f92599fe4ef7a1d1af5bed52a81b | refs/heads/master | 2020-03-07T19:12:14.066837 | 2018-07-25T20:15:50 | 2018-07-25T20:15:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.func),
url(r'^busca', views.busca),
url(r'^editar', views.editar),
] | [
"[email protected]"
] | |
76f610cdb180210a08cc572c6227d53a33bab208 | fcde32709c62b8ee86da459bb7c8eee52c848118 | /爬虫1905/day07/京东.py | 564987878e9830e8db1ae8ffd2b55bd530e805a6 | [] | no_license | klaus2015/py_base | 6b92d362c3d7dc0e09205a037f4d580381dac94d | ec32c731c1c2f6a0dab87f1d167397e4fa86b8de | refs/heads/master | 2022-07-28T15:49:30.383648 | 2020-05-11T15:31:43 | 2020-05-11T15:31:43 | 261,777,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | from selenium import webdriver
import time
class JDSpider:
def __init__(self):
self.url = 'https://www.jd.com/'
self.browser = webdriver.Chrome()
def get_html(self):
so = '//*[@id="key"]'
button = '//*[@id="search"]/div/div[2]/button'
self.browser.get(self.url)
self.browser.find_element_by_xpath(so).send_keys('爬虫书')
self.browser.find_element_by_xpath(button).click()
time.sleep(4)
def parse_html(self):
li_list = self.browser.find_elements_by_xpath('//*[@id="J_goodsList"]/ul/li')
for li in li_list:
item = {}
item['price'] = li.find_element_by_xpath('.//div[@class="p-price"]').text.strip()
item['name'] = li.find_element_by_xpath('.//div[@class="p-name"]/a/em').text.strip()
item['comment'] = li.find_element_by_xpath('.//div[@class="p-commit"]/strong').text.strip()
item['market'] = li.find_element_by_xpath('.//div[@class="p-shopnum"]').text.strip()
print(item)
def main(self):
self.get_html()
while True:
self.parse_html()
if self.browser.page_source.find('pn-next disabled') == -1:
# 不是最后1页,找到下一页按钮
self.browser.find_element_by_class_name('pn-next').click()
time.sleep(3)
else:
break
if __name__ == '__main__':
j = JDSpider()
j.main()
| [
"[email protected]"
] | |
e145f42d9f464a5eefd91e30bdb2bcfe77fba0d7 | caaf1b0754db1e676c37a6f1e58f19183754e654 | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2023_01_01/operations/_private_link_resources_operations.py | ee2978f9ad1fbe77a9f34143f1f9ed6b203ac70f | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | rdomenzain/azure-sdk-for-python | 45dfb39121a0abda048c22e7309733a56259f525 | 58984255aeb904346b6958c5ba742749a2cc7d1b | refs/heads/master | 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 | MIT | 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null | UTF-8 | Python | false | false | 6,733 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class PrivateLinkResourcesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2023_01_01.ContainerServiceClient`'s
:attr:`private_link_resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> _models.PrivateLinkResourcesListResult:
"""Gets a list of private link resources in the specified managed cluster.
To learn more about private clusters, see:
https://docs.microsoft.com/azure/aks/private-clusters.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourcesListResult or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2023_01_01.models.PrivateLinkResourcesListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-01"))
cls: ClsType[_models.PrivateLinkResourcesListResult] = kwargs.pop("cls", None)
request = build_list_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateLinkResourcesListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources"
}
| [
"[email protected]"
] | |
836f58f4da328567c3d68ec5c6b81c8bb68ba7b9 | 7393ac624a6d6fda7d427ed58d6100a3e0fc8f53 | /workflows/launcher/launch_gruneisen_lammps_gan.py | b74df62c40fe577e7a1c3398e66569e85c688bb2 | [
"MIT"
] | permissive | abelcarreras/aiida_extensions | 9f460efe81bbf1817ccbd630ba253fbb2b074f3f | cce3fe537d041fdae87c5a60ce433e6de3fc30cf | refs/heads/master | 2020-12-30T14:20:16.919507 | 2017-10-12T10:19:29 | 2017-10-12T10:19:29 | 91,312,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,873 | py | from aiida import load_dbenv
load_dbenv()
from aiida.orm import Code, DataFactory, load_node, WorkflowFactory
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
import numpy as np
# GaN [-37000 bar <-> 23000 bar]
cell = [[ 3.1900000572, 0, 0],
[-1.5950000286, 2.762621076, 0],
[ 0.0, 0, 5.1890001297]]
scaled_positions=[(0.6666669, 0.3333334, 0.0000000),
(0.3333331, 0.6666663, 0.5000000),
(0.6666669, 0.3333334, 0.3750000),
(0.3333331, 0.6666663, 0.8750000)]
symbols=['Ga', 'Ga', 'N', 'N']
structure = StructureData(cell=cell)
positions = np.dot(scaled_positions, cell)
for i, scaled_position in enumerate(scaled_positions):
structure.append_atom(position=np.dot(scaled_position, cell).tolist(),
symbols=symbols[i])
structure.store()
lammps_machine = {
'num_machines': 1,
'parallel_env': 'mpi*',
'tot_num_mpiprocs': 16}
parameters_opt = {'relaxation': 'tri', # iso/aniso/tri
# 'pressure': 0.0, # In Gruneisen workflow this is ignored. Pressure is set in workflow arguments
'vmax': 0.000001, # Angstrom^3
'energy_tolerance': 1.0e-25, # eV
'force_tolerance': 1.0e-25, # eV angstrom
'max_evaluations': 1000000,
'max_iterations': 500000}
# Cluster information
machine_dict = {
'num_machines': 1,
'parallel_env':'mpi*',
'tot_num_mpiprocs' : 16}
# Phonopy input parameters
phonopy_parameters = {'supercell': [[3, 0, 0],
[0, 3, 0],
[0, 0, 3]],
'primitive': [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]],
'distance': 0.01,
'mesh': [40, 40, 40],
'symmetry_precision': 1e-5}
# GaN Tersoff
tersoff_gan = {'Ga Ga Ga': '1.0 0.007874 1.846 1.918000 0.75000 -0.301300 1.0 1.0 1.44970 410.132 2.87 0.15 1.60916 535.199',
'N N N' : '1.0 0.766120 0.000 0.178493 0.20172 -0.045238 1.0 1.0 2.38426 423.769 2.20 0.20 3.55779 1044.77',
'Ga Ga N' : '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 0.0 0.00000 0.00000 2.90 0.20 0.00000 0.00000',
'Ga N N' : '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 1.0 2.63906 3864.27 2.90 0.20 2.93516 6136.44',
'N Ga Ga': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 1.0 2.63906 3864.27 2.90 0.20 2.93516 6136.44',
'N Ga N ': '1.0 0.766120 0.000 0.178493 0.20172 -0.045238 1.0 0.0 0.00000 0.00000 2.20 0.20 0.00000 0.00000',
'N N Ga': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 0.0 0.00000 0.00000 2.90 0.20 0.00000 0.00000',
'Ga N Ga': '1.0 0.007874 1.846 1.918000 0.75000 -0.301300 1.0 0.0 0.00000 0.00000 2.87 0.15 0.00000 0.00000'}
potential ={'pair_style': 'tersoff',
'data': tersoff_gan}
# Collect workflow input data
wf_parameters = {
'structure': structure,
'phonopy_input': {'parameters': phonopy_parameters},
'input_force': {'code': 'lammps_force@boston',
'potential': potential,
'resources': lammps_machine},
'input_optimize': {'code': 'lammps_optimize@boston',
'potential': potential,
'parameters': parameters_opt,
'resources': lammps_machine},
}
#Submit workflow
WorkflowGruneisen = WorkflowFactory('wf_gruneisen_pressure')
wf = WorkflowGruneisen(params=wf_parameters, pre_optimize=False) # pressure in kb
wf.label = 'Gruneisen GaN'
wf.start()
print ('pk: {}'.format(wf.pk))
| [
"[email protected]"
] | |
5642ee169b46ea3d6ea0401b70298635c483407b | 304033f60097c489cbc60aab639be45ccdbef1a5 | /algorithms/boj/backtracking/15654.py | 8c2f51d67360895663b0b2f353ca4a026687c41b | [] | no_license | pgw928/TIL | 3d0c47c07bd1f5c73826daf8579a2b0e3f93cb95 | 765906f1e6eecad4ad8ec9bf704041433d7eb304 | refs/heads/master | 2023-06-29T05:46:30.039815 | 2021-08-10T17:38:11 | 2021-08-10T17:38:11 | 288,923,095 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | import sys
n, m = map(int, sys.stdin.readline().split())
nums = sorted(map(int, sys.stdin.readline().split()))
check = { i:None for i in range(1, m+1)}
def sol(count):
for j in nums:
if is_promising(check, j):
if count == m:
print(' '.join(tuple(map(str, check.values()))))
return
check[count+1] = j
sol(count+1)
check[count+1] = None
def is_promising(check, j):
i = 1
while check[i]!=None and i<m:
if check[i]==j:
return False
i += 1
return True
sol(0) | [
"[email protected]"
] | |
42e0ee2ee493ebde7c93afaf2deefaac986dbdec | ac5d55e43eb2f1fb8c47d5d2a68336eda181d222 | /DynamicProgramming/97. Interleaving String.py | 0e401e739d87b2005763a77deaff14da8c93d611 | [] | no_license | tinkle1129/Leetcode_Solution | 7a68b86faa37a3a8019626e947d86582549374b3 | 1520e1e9bb0c428797a3e5234e5b328110472c20 | refs/heads/master | 2021-01-11T22:06:45.260616 | 2018-05-28T03:10:50 | 2018-05-28T03:10:50 | 78,925,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | # - * - coding:utf8 - * - -
###########################################
# Author: Tinkle
# E-mail: [email protected]
# Name: Interleaving String.py
# Creation Time: 2017/7/29
###########################################
'''
Given s1, s2, s3, find whether s3 is formed by the interleaving of s1 and s2.
For example,
Given:
s1 = "aabcc",
s2 = "dbbca",
When s3 = "aadbbcbcac", return true.
When s3 = "aadbbbaccc", return false.
'''
class Solution(object):
def isInterleave(self, s1, s2, s3):
"""
:type s1: str
:type s2: str
:type s3: str
:rtype: bool
"""
if len(s1)+len(s2)!=len(s3): return False
dp=[[False for i in range(len(s2)+1)] for j in range(len(s1)+1)]
dp[0][0]=True
for i in range(1,len(s1)+1):
dp[i][0] = dp[i-1][0] and s3[i-1]==s1[i-1]
for i in range(1,len(s2)+1):
dp[0][i] = dp[0][i-1] and s3[i-1]==s2[i-1]
for i in range(1,len(s1)+1):
for j in range(1,len(s2)+1):
dp[i][j] = (dp[i-1][j] and s1[i-1]==s3[i+j-1]) or (dp[i][j-1] and s2[j-1]==s3[i+j-1])
return dp[len(s1)][len(s2)]
s1 = 'aabcc'
s2 = 'dbbca'
s3 = 'aadbbcbcac'
S = Solution()
print S.isInterleave(s1,s2,s3) | [
"[email protected]"
] | |
f3bd4f44ced66abe1c50595c93e708d92e8a4c3d | d8fa0ed226e6dbc0f607961c8b711362942b120a | /maskrcnn_benchmark/layers/dcn/__init__.py | 5c6f9b3fefa0fdd9e778240fade7936f39573569 | [] | no_license | ltnghia/video-maskrcnn | 70d003f038f82156ec9a8dca4ce1b8ea1190792c | b0bc8eb8b43a8b45335625525eba620b389ba591 | refs/heads/master | 2021-06-19T11:13:29.058747 | 2021-04-01T02:19:07 | 2021-04-01T02:19:07 | 199,971,172 | 8 | 4 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | #
# Copied From [mmdetection](https://github.com/open-mmlab/mmdetection/tree/master/mmdet/ops/dcn)
# | [
"[email protected]"
] | |
1befb1e6f1472bf9f42e6013cb12d0bdc26e42e5 | c92f43835821d8df2b93dfd781f890e56891f849 | /Python3/8. String to Integer (atoi).py | 1cee13b16a12801e2e6ea04e8dba1f0ee1846ad4 | [] | no_license | iamdoublewei/Leetcode | f4ae87ed8c31537098790842a72cafa5747d8588 | e36f343aab109b051a9c3a96956c50b5580c7c15 | refs/heads/master | 2022-11-06T01:31:56.181800 | 2022-11-04T20:07:35 | 2022-11-04T20:07:35 | 71,944,123 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,819 | py | '''
Implement atoi which converts a string to an integer.
The function first discards as many whitespace characters as necessary until the first non-whitespace character is found. Then, starting from this character, takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.
The string can contain additional characters after those that form the integral number, which are ignored and have no effect on the behavior of this function.
If the first sequence of non-whitespace characters in str is not a valid integral number, or if no such sequence exists because either str is empty or it contains only whitespace characters, no conversion is performed.
If no valid conversion could be performed, a zero value is returned.
Note:
Only the space character ' ' is considered as whitespace character.
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−2E31, 2E31 − 1]. If the numerical value is out of the range of representable values, INT_MAX (2E31 − 1) or INT_MIN (−2E31) is returned.
Example 1:
Input: "42"
Output: 42
Example 2:
Input: " -42"
Output: -42
Explanation: The first non-whitespace character is '-', which is the minus sign.
Then take as many numerical digits as possible, which gets 42.
Example 3:
Input: "4193 with words"
Output: 4193
Explanation: Conversion stops at digit '3' as the next character is not a numerical digit.
Example 4:
Input: "words and 987"
Output: 0
Explanation: The first non-whitespace character is 'w', which is not a numerical
digit or a +/- sign. Therefore no valid conversion could be performed.
Example 5:
Input: "-91283472332"
Output: -2147483648
Explanation: The number "-91283472332" is out of the range of a 32-bit signed integer.
Thefore INT_MIN (−2E31) is returned.
'''
class Solution:
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.strip()
if len(str) == 0: return 0
res = 0
sign = 0
for i, v in enumerate(str):
if not sign:
if v == '+':
sign = 1
elif v == '-':
sign = -1
elif v.isdigit():
sign = 1
res = res * 10 + int(v)
else:
return 0
continue
if sign:
if v.isdigit():
res = res * 10 + int(v)
else:
break
if sign * res >= 2147483647:
return 2147483647
elif sign * res <= -2147483648:
return -2147483648
else:
return sign * res
| [
"[email protected]"
] | |
e57044e23794af6adb39ae2d9931be5168ad2322 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/sympy/2017/8/test_numbers.py | a76459908eb0f939477bb65b03f7114c552993ea | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 57,354 | py | import decimal
from sympy import (Rational, Symbol, Float, I, sqrt, oo, nan, pi, E, Integer,
S, factorial, Catalan, EulerGamma, GoldenRatio, cos, exp,
Number, zoo, log, Mul, Pow, Tuple, latex, Gt, Lt, Ge, Le,
AlgebraicNumber, simplify, sin, fibonacci, RealField)
from sympy.core.compatibility import long
from sympy.core.power import integer_nthroot, isqrt
from sympy.core.logic import fuzzy_not
from sympy.core.numbers import (igcd, ilcm, igcdex, seterr, _intcache,
igcd2, igcd_lehmer, mpf_norm, comp, mod_inverse)
from sympy.core.mod import Mod
from sympy.utilities.decorator import conserve_mpmath_dps
from sympy.utilities.iterables import permutations
from sympy.utilities.pytest import XFAIL, raises
from mpmath import mpf
import mpmath
t = Symbol('t', real=False)
def same_and_same_prec(a, b):
# stricter matching for Floats
return a == b and a._prec == b._prec
def test_integers_cache():
python_int = 2**65 + 3175259
while python_int in _intcache or hash(python_int) in _intcache:
python_int += 1
sympy_int = Integer(python_int)
assert python_int in _intcache
assert hash(python_int) not in _intcache
sympy_int_int = Integer(sympy_int)
assert python_int in _intcache
assert hash(python_int) not in _intcache
sympy_hash_int = Integer(hash(python_int))
assert python_int in _intcache
assert hash(python_int) in _intcache
def test_seterr():
seterr(divide=True)
raises(ValueError, lambda: S.Zero/S.Zero)
seterr(divide=False)
assert S.Zero / S.Zero == S.NaN
def test_mod():
x = Rational(1, 2)
y = Rational(3, 4)
z = Rational(5, 18043)
assert x % x == 0
assert x % y == 1/S(2)
assert x % z == 3/S(36086)
assert y % x == 1/S(4)
assert y % y == 0
assert y % z == 9/S(72172)
assert z % x == 5/S(18043)
assert z % y == 5/S(18043)
assert z % z == 0
a = Float(2.6)
assert (a % .2) == 0
assert (a % 2).round(15) == 0.6
assert (a % 0.5).round(15) == 0.1
p = Symbol('p', infinite=True)
assert zoo % 0 == nan
assert oo % oo == nan
assert zoo % oo == nan
assert 5 % oo == nan
assert p % 5 == nan
# In these two tests, if the precision of m does
# not match the precision of the ans, then it is
# likely that the change made now gives an answer
# with degraded accuracy.
r = Rational(500, 41)
f = Float('.36', 3)
m = r % f
ans = Float(r % Rational(f), 3)
assert m == ans and m._prec == ans._prec
f = Float('8.36', 3)
m = f % r
ans = Float(Rational(f) % r, 3)
assert m == ans and m._prec == ans._prec
s = S.Zero
assert s % float(1) == S.Zero
# No rounding required since these numbers can be represented
# exactly.
assert Rational(3, 4) % Float(1.1) == 0.75
assert Float(1.5) % Rational(5, 4) == 0.25
assert Rational(5, 4).__rmod__(Float('1.5')) == 0.25
assert Float('1.5').__rmod__(Float('2.75')) == Float('1.25')
assert 2.75 % Float('1.5') == Float('1.25')
a = Integer(7)
b = Integer(4)
assert type(a % b) == Integer
assert a % b == Integer(3)
assert Integer(1) % Rational(2, 3) == Rational(1, 3)
assert Rational(7, 5) % Integer(1) == Rational(2, 5)
assert Integer(2) % 1.5 == 0.5
assert Integer(3).__rmod__(Integer(10)) == Integer(1)
assert Integer(10) % 4 == Integer(2)
assert 15 % Integer(4) == Integer(3)
h = Symbol('h')
m = h ** 2 % h
k = h ** -2 % h
l = Symbol('l', integer=True)
p = Symbol('p', integer=True, positive=True)
q = Symbol('q', integer=True, negative=True)
assert m == h * (h % 1)
assert k == Mod(h ** -2, h, evaluate=False)
assert Mod(l ** p, l) == 0
assert Mod(l ** 2, l) == 0
assert (l ** q % l) == Mod(l ** q, l, evaluate=False)
assert (l ** -2 % l) == Mod(l ** -2, l, evaluate=False)
def test_divmod():
assert divmod(S(12), S(8)) == Tuple(1, 4)
assert divmod(-S(12), S(8)) == Tuple(-2, 4)
assert divmod(S(0), S(1)) == Tuple(0, 0)
raises(ZeroDivisionError, lambda: divmod(S(0), S(0)))
raises(ZeroDivisionError, lambda: divmod(S(1), S(0)))
assert divmod(S(12), 8) == Tuple(1, 4)
assert divmod(12, S(8)) == Tuple(1, 4)
assert divmod(S("2"), S("3/2")) == Tuple(S("1"), S("1/2"))
assert divmod(S("3/2"), S("2")) == Tuple(S("0"), S("3/2"))
assert divmod(S("2"), S("3.5")) == Tuple(S("0"), S("2"))
assert divmod(S("3.5"), S("2")) == Tuple(S("1"), S("1.5"))
assert divmod(S("2"), S("1/3")) == Tuple(S("6"), S("0"))
assert divmod(S("1/3"), S("2")) == Tuple(S("0"), S("1/3"))
assert divmod(S("2"), S("0.1")) == Tuple(S("20"), S("0"))
assert divmod(S("0.1"), S("2")) == Tuple(S("0"), S("0.1"))
assert divmod(S("2"), 2) == Tuple(S("1"), S("0"))
assert divmod(2, S("2")) == Tuple(S("1"), S("0"))
assert divmod(S("2"), 1.5) == Tuple(S("1"), S("0.5"))
assert divmod(1.5, S("2")) == Tuple(S("0"), S("1.5"))
assert divmod(0.3, S("2")) == Tuple(S("0"), S("0.3"))
assert divmod(S("3/2"), S("3.5")) == Tuple(S("0"), S("3/2"))
assert divmod(S("3.5"), S("3/2")) == Tuple(S("2"), S("0.5"))
assert divmod(S("3/2"), S("1/3")) == Tuple(S("4"), Float("1/6"))
assert divmod(S("1/3"), S("3/2")) == Tuple(S("0"), S("1/3"))
assert divmod(S("3/2"), S("0.1")) == Tuple(S("15"), S("0"))
assert divmod(S("0.1"), S("3/2")) == Tuple(S("0"), S("0.1"))
assert divmod(S("3/2"), 2) == Tuple(S("0"), S("3/2"))
assert divmod(2, S("3/2")) == Tuple(S("1"), S("0.5"))
assert divmod(S("3/2"), 1.5) == Tuple(S("1"), S("0"))
assert divmod(1.5, S("3/2")) == Tuple(S("1"), S("0"))
assert divmod(S("3/2"), 0.3) == Tuple(S("5"), S("0"))
assert divmod(0.3, S("3/2")) == Tuple(S("0"), S("0.3"))
assert divmod(S("1/3"), S("3.5")) == Tuple(S("0"), S("1/3"))
assert divmod(S("3.5"), S("0.1")) == Tuple(S("35"), S("0"))
assert divmod(S("0.1"), S("3.5")) == Tuple(S("0"), S("0.1"))
assert divmod(S("3.5"), 2) == Tuple(S("1"), S("1.5"))
assert divmod(2, S("3.5")) == Tuple(S("0"), S("2"))
assert divmod(S("3.5"), 1.5) == Tuple(S("2"), S("0.5"))
assert divmod(1.5, S("3.5")) == Tuple(S("0"), S("1.5"))
assert divmod(0.3, S("3.5")) == Tuple(S("0"), S("0.3"))
assert divmod(S("0.1"), S("1/3")) == Tuple(S("0"), S("0.1"))
assert divmod(S("1/3"), 2) == Tuple(S("0"), S("1/3"))
assert divmod(2, S("1/3")) == Tuple(S("6"), S("0"))
assert divmod(S("1/3"), 1.5) == Tuple(S("0"), S("1/3"))
assert divmod(0.3, S("1/3")) == Tuple(S("0"), S("0.3"))
assert divmod(S("0.1"), 2) == Tuple(S("0"), S("0.1"))
assert divmod(2, S("0.1")) == Tuple(S("20"), S("0"))
assert divmod(S("0.1"), 1.5) == Tuple(S("0"), S("0.1"))
assert divmod(1.5, S("0.1")) == Tuple(S("15"), S("0"))
assert divmod(S("0.1"), 0.3) == Tuple(S("0"), S("0.1"))
assert str(divmod(S("2"), 0.3)) == '(6, 0.2)'
assert str(divmod(S("3.5"), S("1/3"))) == '(10, 0.166666666666667)'
assert str(divmod(S("3.5"), 0.3)) == '(11, 0.2)'
assert str(divmod(S("1/3"), S("0.1"))) == '(3, 0.0333333333333333)'
assert str(divmod(1.5, S("1/3"))) == '(4, 0.166666666666667)'
assert str(divmod(S("1/3"), 0.3)) == '(1, 0.0333333333333333)'
assert str(divmod(0.3, S("0.1"))) == '(2, 0.1)'
assert divmod(-3, S(2)) == (-2, 1)
assert divmod(S(-3), S(2)) == (-2, 1)
assert divmod(S(-3), 2) == (-2, 1)
def test_igcd():
assert igcd(0, 0) == 0
assert igcd(0, 1) == 1
assert igcd(1, 0) == 1
assert igcd(0, 7) == 7
assert igcd(7, 0) == 7
assert igcd(7, 1) == 1
assert igcd(1, 7) == 1
assert igcd(-1, 0) == 1
assert igcd(0, -1) == 1
assert igcd(-1, -1) == 1
assert igcd(-1, 7) == 1
assert igcd(7, -1) == 1
assert igcd(8, 2) == 2
assert igcd(4, 8) == 4
assert igcd(8, 16) == 8
assert igcd(7, -3) == 1
assert igcd(-7, 3) == 1
assert igcd(-7, -3) == 1
assert igcd(*[10, 20, 30]) == 10
raises(TypeError, lambda: igcd())
raises(TypeError, lambda: igcd(2))
raises(ValueError, lambda: igcd(0, None))
raises(ValueError, lambda: igcd(1, 2.2))
for args in permutations((45.1, 1, 30)):
raises(ValueError, lambda: igcd(*args))
for args in permutations((1, 2, None)):
raises(ValueError, lambda: igcd(*args))
def test_igcd_lehmer():
a, b = fibonacci(10001), fibonacci(10000)
# len(str(a)) == 2090
# small divisors, long Euclidean sequence
assert igcd_lehmer(a, b) == 1
c = fibonacci(100)
assert igcd_lehmer(a*c, b*c) == c
# big divisor
assert igcd_lehmer(a, 10**1000) == 1
def test_igcd2():
# short loop
assert igcd2(2**100 - 1, 2**99 - 1) == 1
# Lehmer's algorithm
a, b = int(fibonacci(10001)), int(fibonacci(10000))
assert igcd2(a, b) == 1
def test_ilcm():
assert ilcm(0, 0) == 0
assert ilcm(1, 0) == 0
assert ilcm(0, 1) == 0
assert ilcm(1, 1) == 1
assert ilcm(2, 1) == 2
assert ilcm(8, 2) == 8
assert ilcm(8, 6) == 24
assert ilcm(8, 7) == 56
assert ilcm(*[10, 20, 30]) == 60
raises(ValueError, lambda: ilcm(8.1, 7))
raises(ValueError, lambda: ilcm(8, 7.1))
def test_igcdex():
assert igcdex(2, 3) == (-1, 1, 1)
assert igcdex(10, 12) == (-1, 1, 2)
assert igcdex(100, 2004) == (-20, 1, 4)
def _strictly_equal(a, b):
return (a.p, a.q, type(a.p), type(a.q)) == \
(b.p, b.q, type(b.p), type(b.q))
def _test_rational_new(cls):
"""
Tests that are common between Integer and Rational.
"""
assert cls(0) is S.Zero
assert cls(1) is S.One
assert cls(-1) is S.NegativeOne
# These look odd, but are similar to int():
assert cls('1') is S.One
assert cls(u'-1') is S.NegativeOne
i = Integer(10)
assert _strictly_equal(i, cls('10'))
assert _strictly_equal(i, cls(u'10'))
assert _strictly_equal(i, cls(long(10)))
assert _strictly_equal(i, cls(i))
raises(TypeError, lambda: cls(Symbol('x')))
def test_Integer_new():
"""
Test for Integer constructor
"""
_test_rational_new(Integer)
assert _strictly_equal(Integer(0.9), S.Zero)
assert _strictly_equal(Integer(10.5), Integer(10))
raises(ValueError, lambda: Integer("10.5"))
assert Integer(Rational('1.' + '9'*20)) == 1
def test_Rational_new():
""""
Test for Rational constructor
"""
_test_rational_new(Rational)
n1 = Rational(1, 2)
assert n1 == Rational(Integer(1), 2)
assert n1 == Rational(Integer(1), Integer(2))
assert n1 == Rational(1, Integer(2))
assert n1 == Rational(Rational(1, 2))
assert 1 == Rational(n1, n1)
assert Rational(3, 2) == Rational(Rational(1, 2), Rational(1, 3))
assert Rational(3, 1) == Rational(1, Rational(1, 3))
n3_4 = Rational(3, 4)
assert Rational('3/4') == n3_4
assert -Rational('-3/4') == n3_4
assert Rational('.76').limit_denominator(4) == n3_4
assert Rational(19, 25).limit_denominator(4) == n3_4
assert Rational('19/25').limit_denominator(4) == n3_4
assert Rational(1.0, 3) == Rational(1, 3)
assert Rational(1, 3.0) == Rational(1, 3)
assert Rational(Float(0.5)) == Rational(1, 2)
assert Rational('1e2/1e-2') == Rational(10000)
assert Rational(-1, 0) == S.ComplexInfinity
assert Rational(1, 0) == S.ComplexInfinity
# Make sure Rational doesn't lose precision on Floats
assert Rational(pi.evalf(100)).evalf(100) == pi.evalf(100)
raises(TypeError, lambda: Rational('3**3'))
raises(TypeError, lambda: Rational('1/2 + 2/3'))
# handle fractions.Fraction instances
try:
import fractions
assert Rational(fractions.Fraction(1, 2)) == Rational(1, 2)
except ImportError:
pass
def test_Number_new():
""""
Test for Number constructor
"""
# Expected behavior on numbers and strings
assert Number(1) is S.One
assert Number(2).__class__ is Integer
assert Number(-622).__class__ is Integer
assert Number(5, 3).__class__ is Rational
assert Number(5.3).__class__ is Float
assert Number('1') is S.One
assert Number('2').__class__ is Integer
assert Number('-622').__class__ is Integer
assert Number('5/3').__class__ is Rational
assert Number('5.3').__class__ is Float
raises(ValueError, lambda: Number('cos'))
raises(TypeError, lambda: Number(cos))
a = Rational(3, 5)
assert Number(a) is a # Check idempotence on Numbers
def test_Rational_cmp():
n1 = Rational(1, 4)
n2 = Rational(1, 3)
n3 = Rational(2, 4)
n4 = Rational(2, -4)
n5 = Rational(0)
n6 = Rational(1)
n7 = Rational(3)
n8 = Rational(-3)
assert n8 < n5
assert n5 < n6
assert n6 < n7
assert n8 < n7
assert n7 > n8
assert (n1 + 1)**n2 < 2
assert ((n1 + n6)/n7) < 1
assert n4 < n3
assert n2 < n3
assert n1 < n2
assert n3 > n1
assert not n3 < n1
assert not (Rational(-1) > 0)
assert Rational(-1) < 0
raises(TypeError, lambda: n1 < S.NaN)
raises(TypeError, lambda: n1 <= S.NaN)
raises(TypeError, lambda: n1 > S.NaN)
raises(TypeError, lambda: n1 >= S.NaN)
def test_Float():
def eq(a, b):
t = Float("1.0E-15")
return (-t < a - b < t)
a = Float(2) ** Float(3)
assert eq(a.evalf(), Float(8))
assert eq((pi ** -1).evalf(), Float("0.31830988618379067"))
a = Float(2) ** Float(4)
assert eq(a.evalf(), Float(16))
assert (S(.3) == S(.5)) is False
x_str = Float((0, '13333333333333', -52, 53))
x2_str = Float((0, '26666666666666', -53, 53))
x_hex = Float((0, long(0x13333333333333), -52, 53))
x_dec = Float((0, 5404319552844595, -52, 53))
assert x_str == x_hex == x_dec == Float(1.2)
# This looses a binary digit of precision, so it isn't equal to the above,
# but check that it normalizes correctly
x2_hex = Float((0, long(0x13333333333333)*2, -53, 53))
assert x2_hex._mpf_ == (0, 5404319552844595, -52, 52)
# XXX: Should this test also hold?
# assert x2_hex._prec == 52
# x2_str and 1.2 are superficially the same
assert str(x2_str) == str(Float(1.2))
# but are different at the mpf level
assert Float(1.2)._mpf_ == (0, long(5404319552844595), -52, 53)
assert x2_str._mpf_ == (0, long(10808639105689190), -53, 53)
assert Float((0, long(0), -123, -1)) == Float('nan')
assert Float((0, long(0), -456, -2)) == Float('inf') == Float('+inf')
assert Float((1, long(0), -789, -3)) == Float('-inf')
raises(ValueError, lambda: Float((0, 7, 1, 3), ''))
assert Float('+inf').is_finite is False
assert Float('+inf').is_negative is False
assert Float('+inf').is_positive is True
assert Float('+inf').is_infinite is True
assert Float('+inf').is_zero is False
assert Float('-inf').is_finite is False
assert Float('-inf').is_negative is True
assert Float('-inf').is_positive is False
assert Float('-inf').is_infinite is True
assert Float('-inf').is_zero is False
assert Float('0.0').is_finite is True
assert Float('0.0').is_negative is False
assert Float('0.0').is_positive is False
assert Float('0.0').is_infinite is False
assert Float('0.0').is_zero is True
# rationality properties
assert Float(1).is_rational is None
assert Float(1).is_irrational is None
assert sqrt(2).n(15).is_rational is None
assert sqrt(2).n(15).is_irrational is None
# do not automatically evalf
def teq(a):
assert (a.evalf() == a) is False
assert (a.evalf() != a) is True
assert (a == a.evalf()) is False
assert (a != a.evalf()) is True
teq(pi)
teq(2*pi)
teq(cos(0.1, evaluate=False))
# long integer
i = 12345678901234567890
assert same_and_same_prec(Float(12, ''), Float('12', ''))
assert same_and_same_prec(Float(Integer(i), ''), Float(i, ''))
assert same_and_same_prec(Float(i, ''), Float(str(i), 20))
assert same_and_same_prec(Float(str(i)), Float(i, ''))
assert same_and_same_prec(Float(i), Float(i, ''))
# inexact floats (repeating binary = denom not multiple of 2)
# cannot have precision greater than 15
assert Float(.125, 22) == .125
assert Float(2.0, 22) == 2
assert float(Float('.12500000000000001', '')) == .125
raises(ValueError, lambda: Float(.12500000000000001, ''))
# allow spaces
Float('123 456.123 456') == Float('123456.123456')
Integer('123 456') == Integer('123456')
Rational('123 456.123 456') == Rational('123456.123456')
assert Float(' .3e2') == Float('0.3e2')
# allow auto precision detection
assert Float('.1', '') == Float(.1, 1)
assert Float('.125', '') == Float(.125, 3)
assert Float('.100', '') == Float(.1, 3)
assert Float('2.0', '') == Float('2', 2)
raises(ValueError, lambda: Float("12.3d-4", ""))
raises(ValueError, lambda: Float(12.3, ""))
raises(ValueError, lambda: Float('.'))
raises(ValueError, lambda: Float('-.'))
zero = Float('0.0')
assert Float('-0') == zero
assert Float('.0') == zero
assert Float('-.0') == zero
assert Float('-0.0') == zero
assert Float(0.0) == zero
assert Float(0) == zero
assert Float(0, '') == Float('0', '')
assert Float(1) == Float(1.0)
assert Float(S.Zero) == zero
assert Float(S.One) == Float(1.0)
assert Float(decimal.Decimal('0.1'), 3) == Float('.1', 3)
assert Float(decimal.Decimal('nan')) == S.NaN
assert Float(decimal.Decimal('Infinity')) == S.Infinity
assert Float(decimal.Decimal('-Infinity')) == S.NegativeInfinity
assert '{0:.3f}'.format(Float(4.236622)) == '4.237'
assert '{0:.35f}'.format(Float(pi.n(40), 40)) == \
'3.14159265358979323846264338327950288'
assert Float(oo) == Float('+inf')
assert Float(-oo) == Float('-inf')
# unicode
assert Float(u'0.73908513321516064100000000') == \
Float('0.73908513321516064100000000')
assert Float(u'0.73908513321516064100000000', 28) == \
Float('0.73908513321516064100000000', 28)
# binary precision
# Decimal value 0.1 cannot be expressed precisely as a base 2 fraction
a = Float(S(1)/10, dps=15)
b = Float(S(1)/10, dps=16)
p = Float(S(1)/10, precision=53)
q = Float(S(1)/10, precision=54)
assert a._mpf_ == p._mpf_
assert not a._mpf_ == q._mpf_
assert not b._mpf_ == q._mpf_
# Precision specifying errors
raises(ValueError, lambda: Float("1.23", dps=3, precision=10))
raises(ValueError, lambda: Float("1.23", dps="", precision=10))
raises(ValueError, lambda: Float("1.23", dps=3, precision=""))
raises(ValueError, lambda: Float("1.23", dps="", precision=""))
@conserve_mpmath_dps
def test_float_mpf():
import mpmath
mpmath.mp.dps = 100
mp_pi = mpmath.pi()
assert Float(mp_pi, 100) == Float(mp_pi._mpf_, 100) == pi.evalf(100)
mpmath.mp.dps = 15
assert Float(mp_pi, 100) == Float(mp_pi._mpf_, 100) == pi.evalf(100)
def test_Float_RealElement():
repi = RealField(dps=100)(pi.evalf(100))
# We still have to pass the precision because Float doesn't know what
# RealElement is, but make sure it keeps full precision from the result.
assert Float(repi, 100) == pi.evalf(100)
def test_Float_default_to_highprec_from_str():
s = str(pi.evalf(128))
assert same_and_same_prec(Float(s), Float(s, ''))
def test_Float_eval():
a = Float(3.2)
assert (a**2).is_Float
def test_Float_issue_2107():
a = Float(0.1, 10)
b = Float("0.1", 10)
assert a - a == 0
assert a + (-a) == 0
assert S.Zero + a - a == 0
assert S.Zero + a + (-a) == 0
assert b - b == 0
assert b + (-b) == 0
assert S.Zero + b - b == 0
assert S.Zero + b + (-b) == 0
def test_Infinity():
assert oo != 1
assert 1*oo == oo
assert 1 != oo
assert oo != -oo
assert oo != Symbol("x")**3
assert oo + 1 == oo
assert 2 + oo == oo
assert 3*oo + 2 == oo
assert S.Half**oo == 0
assert S.Half**(-oo) == oo
assert -oo*3 == -oo
assert oo + oo == oo
assert -oo + oo*(-5) == -oo
assert 1/oo == 0
assert 1/(-oo) == 0
assert 8/oo == 0
assert oo % 2 == nan
assert 2 % oo == nan
assert oo/oo == nan
assert oo/-oo == nan
assert -oo/oo == nan
assert -oo/-oo == nan
assert oo - oo == nan
assert oo - -oo == oo
assert -oo - oo == -oo
assert -oo - -oo == nan
assert oo + -oo == nan
assert -oo + oo == nan
assert oo + oo == oo
assert -oo + oo == nan
assert oo + -oo == nan
assert -oo + -oo == -oo
assert oo*oo == oo
assert -oo*oo == -oo
assert oo*-oo == -oo
assert -oo*-oo == oo
assert oo/0 == oo
assert -oo/0 == -oo
assert 0/oo == 0
assert 0/-oo == 0
assert oo*0 == nan
assert -oo*0 == nan
assert 0*oo == nan
assert 0*-oo == nan
assert oo + 0 == oo
assert -oo + 0 == -oo
assert 0 + oo == oo
assert 0 + -oo == -oo
assert oo - 0 == oo
assert -oo - 0 == -oo
assert 0 - oo == -oo
assert 0 - -oo == oo
assert oo/2 == oo
assert -oo/2 == -oo
assert oo/-2 == -oo
assert -oo/-2 == oo
assert oo*2 == oo
assert -oo*2 == -oo
assert oo*-2 == -oo
assert 2/oo == 0
assert 2/-oo == 0
assert -2/oo == 0
assert -2/-oo == 0
assert 2*oo == oo
assert 2*-oo == -oo
assert -2*oo == -oo
assert -2*-oo == oo
assert 2 + oo == oo
assert 2 - oo == -oo
assert -2 + oo == oo
assert -2 - oo == -oo
assert 2 + -oo == -oo
assert 2 - -oo == oo
assert -2 + -oo == -oo
assert -2 - -oo == oo
assert S(2) + oo == oo
assert S(2) - oo == -oo
assert oo/I == -oo*I
assert -oo/I == oo*I
assert oo*float(1) == Float('inf') and (oo*float(1)).is_Float
assert -oo*float(1) == Float('-inf') and (-oo*float(1)).is_Float
assert oo/float(1) == Float('inf') and (oo/float(1)).is_Float
assert -oo/float(1) == Float('-inf') and (-oo/float(1)).is_Float
assert oo*float(-1) == Float('-inf') and (oo*float(-1)).is_Float
assert -oo*float(-1) == Float('inf') and (-oo*float(-1)).is_Float
assert oo/float(-1) == Float('-inf') and (oo/float(-1)).is_Float
assert -oo/float(-1) == Float('inf') and (-oo/float(-1)).is_Float
assert oo + float(1) == Float('inf') and (oo + float(1)).is_Float
assert -oo + float(1) == Float('-inf') and (-oo + float(1)).is_Float
assert oo - float(1) == Float('inf') and (oo - float(1)).is_Float
assert -oo - float(1) == Float('-inf') and (-oo - float(1)).is_Float
assert float(1)*oo == Float('inf') and (float(1)*oo).is_Float
assert float(1)*-oo == Float('-inf') and (float(1)*-oo).is_Float
assert float(1)/oo == 0
assert float(1)/-oo == 0
assert float(-1)*oo == Float('-inf') and (float(-1)*oo).is_Float
assert float(-1)*-oo == Float('inf') and (float(-1)*-oo).is_Float
assert float(-1)/oo == 0
assert float(-1)/-oo == 0
assert float(1) + oo == Float('inf')
assert float(1) + -oo == Float('-inf')
assert float(1) - oo == Float('-inf')
assert float(1) - -oo == Float('inf')
assert Float('nan') == nan
assert nan*1.0 == nan
assert -1.0*nan == nan
assert nan*oo == nan
assert nan*-oo == nan
assert nan/oo == nan
assert nan/-oo == nan
assert nan + oo == nan
assert nan + -oo == nan
assert nan - oo == nan
assert nan - -oo == nan
assert -oo * S.Zero == nan
assert oo*nan == nan
assert -oo*nan == nan
assert oo/nan == nan
assert -oo/nan == nan
assert oo + nan == nan
assert -oo + nan == nan
assert oo - nan == nan
assert -oo - nan == nan
assert S.Zero * oo == nan
assert oo.is_Rational is False
assert isinstance(oo, Rational) is False
assert S.One/oo == 0
assert -S.One/oo == 0
assert S.One/-oo == 0
assert -S.One/-oo == 0
assert S.One*oo == oo
assert -S.One*oo == -oo
assert S.One*-oo == -oo
assert -S.One*-oo == oo
assert S.One/nan == nan
assert S.One - -oo == oo
assert S.One + nan == nan
assert S.One - nan == nan
assert nan - S.One == nan
assert nan/S.One == nan
assert -oo - S.One == -oo
def test_Infinity_2():
x = Symbol('x')
assert oo*x != oo
assert oo*(pi - 1) == oo
assert oo*(1 - pi) == -oo
assert (-oo)*x != -oo
assert (-oo)*(pi - 1) == -oo
assert (-oo)*(1 - pi) == oo
assert (-1)**S.NaN is S.NaN
assert oo - Float('inf') is S.NaN
assert oo + Float('-inf') is S.NaN
assert oo*0 is S.NaN
assert oo/Float('inf') is S.NaN
assert oo/Float('-inf') is S.NaN
assert oo**S.NaN is S.NaN
assert -oo + Float('inf') is S.NaN
assert -oo - Float('-inf') is S.NaN
assert -oo*S.NaN is S.NaN
assert -oo*0 is S.NaN
assert -oo/Float('inf') is S.NaN
assert -oo/Float('-inf') is S.NaN
assert -oo/S.NaN is S.NaN
assert abs(-oo) == oo
assert all((-oo)**i is S.NaN for i in (oo, -oo, S.NaN))
assert (-oo)**3 == -oo
assert (-oo)**2 == oo
assert abs(S.ComplexInfinity) == oo
def test_Mul_Infinity_Zero():
assert 0*Float('inf') == nan
assert 0*Float('-inf') == nan
assert 0*Float('inf') == nan
assert 0*Float('-inf') == nan
assert Float('inf')*0 == nan
assert Float('-inf')*0 == nan
assert Float('inf')*0 == nan
assert Float('-inf')*0 == nan
assert Float(0)*Float('inf') == nan
assert Float(0)*Float('-inf') == nan
assert Float(0)*Float('inf') == nan
assert Float(0)*Float('-inf') == nan
assert Float('inf')*Float(0) == nan
assert Float('-inf')*Float(0) == nan
assert Float('inf')*Float(0) == nan
assert Float('-inf')*Float(0) == nan
def test_Div_By_Zero():
assert 1/S(0) == zoo
assert 1/Float(0) == Float('inf')
assert 0/S(0) == nan
assert 0/Float(0) == nan
assert S(0)/0 == nan
assert Float(0)/0 == nan
assert -1/S(0) == zoo
assert -1/Float(0) == Float('-inf')
def test_Infinity_inequations():
assert oo > pi
assert not (oo < pi)
assert exp(-3) < oo
assert Float('+inf') > pi
assert not (Float('+inf') < pi)
assert exp(-3) < Float('+inf')
raises(TypeError, lambda: oo < I)
raises(TypeError, lambda: oo <= I)
raises(TypeError, lambda: oo > I)
raises(TypeError, lambda: oo >= I)
raises(TypeError, lambda: -oo < I)
raises(TypeError, lambda: -oo <= I)
raises(TypeError, lambda: -oo > I)
raises(TypeError, lambda: -oo >= I)
raises(TypeError, lambda: I < oo)
raises(TypeError, lambda: I <= oo)
raises(TypeError, lambda: I > oo)
raises(TypeError, lambda: I >= oo)
raises(TypeError, lambda: I < -oo)
raises(TypeError, lambda: I <= -oo)
raises(TypeError, lambda: I > -oo)
raises(TypeError, lambda: I >= -oo)
assert oo > -oo and oo >= -oo
assert (oo < -oo) == False and (oo <= -oo) == False
assert -oo < oo and -oo <= oo
assert (-oo > oo) == False and (-oo >= oo) == False
assert (oo < oo) == False # issue 7775
assert (oo > oo) == False
assert (-oo > -oo) == False and (-oo < -oo) == False
assert oo >= oo and oo <= oo and -oo >= -oo and -oo <= -oo
assert (-oo < -Float('inf')) == False
assert (oo > Float('inf')) == False
assert -oo >= -Float('inf')
assert oo <= Float('inf')
x = Symbol('x')
b = Symbol('b', finite=True, real=True)
assert (x < oo) == Lt(x, oo) # issue 7775
assert b < oo and b > -oo and b <= oo and b >= -oo
assert oo > b and oo >= b and (oo < b) == False and (oo <= b) == False
assert (-oo > b) == False and (-oo >= b) == False and -oo < b and -oo <= b
assert (oo < x) == Lt(oo, x) and (oo > x) == Gt(oo, x)
assert (oo <= x) == Le(oo, x) and (oo >= x) == Ge(oo, x)
assert (-oo < x) == Lt(-oo, x) and (-oo > x) == Gt(-oo, x)
assert (-oo <= x) == Le(-oo, x) and (-oo >= x) == Ge(-oo, x)
def test_NaN():
assert nan == nan
assert nan != 1
assert 1*nan == nan
assert 1 != nan
assert nan == -nan
assert oo != Symbol("x")**3
assert nan + 1 == nan
assert 2 + nan == nan
assert 3*nan + 2 == nan
assert -nan*3 == nan
assert nan + nan == nan
assert -nan + nan*(-5) == nan
assert 1/nan == nan
assert 1/(-nan) == nan
assert 8/nan == nan
raises(TypeError, lambda: nan > 0)
raises(TypeError, lambda: nan < 0)
raises(TypeError, lambda: nan >= 0)
raises(TypeError, lambda: nan <= 0)
raises(TypeError, lambda: 0 < nan)
raises(TypeError, lambda: 0 > nan)
raises(TypeError, lambda: 0 <= nan)
raises(TypeError, lambda: 0 >= nan)
assert S.One + nan == nan
assert S.One - nan == nan
assert S.One*nan == nan
assert S.One/nan == nan
assert nan - S.One == nan
assert nan*S.One == nan
assert nan + S.One == nan
assert nan/S.One == nan
assert nan**0 == 1 # as per IEEE 754
assert 1**nan == nan # IEEE 754 is not the best choice for symbolic work
# test Pow._eval_power's handling of NaN
assert Pow(nan, 0, evaluate=False)**2 == 1
def test_special_numbers():
assert isinstance(S.NaN, Number) is True
assert isinstance(S.Infinity, Number) is True
assert isinstance(S.NegativeInfinity, Number) is True
assert S.NaN.is_number is True
assert S.Infinity.is_number is True
assert S.NegativeInfinity.is_number is True
assert S.ComplexInfinity.is_number is True
assert isinstance(S.NaN, Rational) is False
assert isinstance(S.Infinity, Rational) is False
assert isinstance(S.NegativeInfinity, Rational) is False
assert S.NaN.is_rational is not True
assert S.Infinity.is_rational is not True
assert S.NegativeInfinity.is_rational is not True
def test_powers():
assert integer_nthroot(1, 2) == (1, True)
assert integer_nthroot(1, 5) == (1, True)
assert integer_nthroot(2, 1) == (2, True)
assert integer_nthroot(2, 2) == (1, False)
assert integer_nthroot(2, 5) == (1, False)
assert integer_nthroot(4, 2) == (2, True)
assert integer_nthroot(123**25, 25) == (123, True)
assert integer_nthroot(123**25 + 1, 25) == (123, False)
assert integer_nthroot(123**25 - 1, 25) == (122, False)
assert integer_nthroot(1, 1) == (1, True)
assert integer_nthroot(0, 1) == (0, True)
assert integer_nthroot(0, 3) == (0, True)
assert integer_nthroot(10000, 1) == (10000, True)
assert integer_nthroot(4, 2) == (2, True)
assert integer_nthroot(16, 2) == (4, True)
assert integer_nthroot(26, 2) == (5, False)
assert integer_nthroot(1234567**7, 7) == (1234567, True)
assert integer_nthroot(1234567**7 + 1, 7) == (1234567, False)
assert integer_nthroot(1234567**7 - 1, 7) == (1234566, False)
b = 25**1000
assert integer_nthroot(b, 1000) == (25, True)
assert integer_nthroot(b + 1, 1000) == (25, False)
assert integer_nthroot(b - 1, 1000) == (24, False)
c = 10**400
c2 = c**2
assert integer_nthroot(c2, 2) == (c, True)
assert integer_nthroot(c2 + 1, 2) == (c, False)
assert integer_nthroot(c2 - 1, 2) == (c - 1, False)
assert integer_nthroot(2, 10**10) == (1, False)
p, r = integer_nthroot(int(factorial(10000)), 100)
assert p % (10**10) == 5322420655
assert not r
# Test that this is fast
assert integer_nthroot(2, 10**10) == (1, False)
# output should be int if possible
assert type(integer_nthroot(2**61, 2)[0]) is int
def test_integer_nthroot_overflow():
assert integer_nthroot(10**(50*50), 50) == (10**50, True)
assert integer_nthroot(10**100000, 10000) == (10**10, True)
def test_isqrt():
from math import sqrt as _sqrt
limit = 17984395633462800708566937239551
assert int(_sqrt(limit)) == integer_nthroot(limit, 2)[0]
assert int(_sqrt(limit + 1)) != integer_nthroot(limit + 1, 2)[0]
assert isqrt(limit + 1) == integer_nthroot(limit + 1, 2)[0]
assert isqrt(limit + 1 + S.Half) == integer_nthroot(limit + 1, 2)[0]
def test_powers_Integer():
"""Test Integer._eval_power"""
# check infinity
assert S(1) ** S.Infinity == S.NaN
assert S(-1)** S.Infinity == S.NaN
assert S(2) ** S.Infinity == S.Infinity
assert S(-2)** S.Infinity == S.Infinity + S.Infinity * S.ImaginaryUnit
assert S(0) ** S.Infinity == 0
# check Nan
assert S(1) ** S.NaN == S.NaN
assert S(-1) ** S.NaN == S.NaN
# check for exact roots
assert S(-1) ** Rational(6, 5) == - (-1)**(S(1)/5)
assert sqrt(S(4)) == 2
assert sqrt(S(-4)) == I * 2
assert S(16) ** Rational(1, 4) == 2
assert S(-16) ** Rational(1, 4) == 2 * (-1)**Rational(1, 4)
assert S(9) ** Rational(3, 2) == 27
assert S(-9) ** Rational(3, 2) == -27*I
assert S(27) ** Rational(2, 3) == 9
assert S(-27) ** Rational(2, 3) == 9 * (S(-1) ** Rational(2, 3))
assert (-2) ** Rational(-2, 1) == Rational(1, 4)
# not exact roots
assert sqrt(-3) == I*sqrt(3)
assert (3) ** (S(3)/2) == 3 * sqrt(3)
assert (-3) ** (S(3)/2) == - 3 * sqrt(-3)
assert (-3) ** (S(5)/2) == 9 * I * sqrt(3)
assert (-3) ** (S(7)/2) == - I * 27 * sqrt(3)
assert (2) ** (S(3)/2) == 2 * sqrt(2)
assert (2) ** (S(-3)/2) == sqrt(2) / 4
assert (81) ** (S(2)/3) == 9 * (S(3) ** (S(2)/3))
assert (-81) ** (S(2)/3) == 9 * (S(-3) ** (S(2)/3))
assert (-3) ** Rational(-7, 3) == \
-(-1)**Rational(2, 3)*3**Rational(2, 3)/27
assert (-3) ** Rational(-2, 3) == \
-(-1)**Rational(1, 3)*3**Rational(1, 3)/3
# join roots
assert sqrt(6) + sqrt(24) == 3*sqrt(6)
assert sqrt(2) * sqrt(3) == sqrt(6)
# separate symbols & constansts
x = Symbol("x")
assert sqrt(49 * x) == 7 * sqrt(x)
assert sqrt((3 - sqrt(pi)) ** 2) == 3 - sqrt(pi)
# check that it is fast for big numbers
assert (2**64 + 1) ** Rational(4, 3)
assert (2**64 + 1) ** Rational(17, 25)
# negative rational power and negative base
assert (-3) ** Rational(-7, 3) == \
-(-1)**Rational(2, 3)*3**Rational(2, 3)/27
assert (-3) ** Rational(-2, 3) == \
-(-1)**Rational(1, 3)*3**Rational(1, 3)/3
assert S(1234).factors() == {617: 1, 2: 1}
assert Rational(2*3, 3*5*7).factors() == {2: 1, 5: -1, 7: -1}
# test that eval_power factors numbers bigger than
# the current limit in factor_trial_division (2**15)
from sympy import nextprime
n = nextprime(2**15)
assert sqrt(n**2) == n
assert sqrt(n**3) == n*sqrt(n)
assert sqrt(4*n) == 2*sqrt(n)
# check that factors of base with powers sharing gcd with power are removed
assert (2**4*3)**Rational(1, 6) == 2**Rational(2, 3)*3**Rational(1, 6)
assert (2**4*3)**Rational(5, 6) == 8*2**Rational(1, 3)*3**Rational(5, 6)
# check that bases sharing a gcd are exptracted
assert 2**Rational(1, 3)*3**Rational(1, 4)*6**Rational(1, 5) == \
2**Rational(8, 15)*3**Rational(9, 20)
assert sqrt(8)*24**Rational(1, 3)*6**Rational(1, 5) == \
4*2**Rational(7, 10)*3**Rational(8, 15)
assert sqrt(8)*(-24)**Rational(1, 3)*(-6)**Rational(1, 5) == \
4*(-3)**Rational(8, 15)*2**Rational(7, 10)
assert 2**Rational(1, 3)*2**Rational(8, 9) == 2*2**Rational(2, 9)
assert 2**Rational(2, 3)*6**Rational(1, 3) == 2*3**Rational(1, 3)
assert 2**Rational(2, 3)*6**Rational(8, 9) == \
2*2**Rational(5, 9)*3**Rational(8, 9)
assert (-2)**Rational(2, S(3))*(-4)**Rational(1, S(3)) == -2*2**Rational(1, 3)
assert 3*Pow(3, 2, evaluate=False) == 3**3
assert 3*Pow(3, -1/S(3), evaluate=False) == 3**(2/S(3))
assert (-2)**(1/S(3))*(-3)**(1/S(4))*(-5)**(5/S(6)) == \
-(-1)**Rational(5, 12)*2**Rational(1, 3)*3**Rational(1, 4) * \
5**Rational(5, 6)
assert Integer(-2)**Symbol('', even=True) == \
Integer(2)**Symbol('', even=True)
assert (-1)**Float(.5) == 1.0*I
def test_powers_Rational():
"""Test Rational._eval_power"""
# check infinity
assert Rational(1, 2) ** S.Infinity == 0
assert Rational(3, 2) ** S.Infinity == S.Infinity
assert Rational(-1, 2) ** S.Infinity == 0
assert Rational(-3, 2) ** S.Infinity == \
S.Infinity + S.Infinity * S.ImaginaryUnit
# check Nan
assert Rational(3, 4) ** S.NaN == S.NaN
assert Rational(-2, 3) ** S.NaN == S.NaN
# exact roots on numerator
assert sqrt(Rational(4, 3)) == 2 * sqrt(3) / 3
assert Rational(4, 3) ** Rational(3, 2) == 8 * sqrt(3) / 9
assert sqrt(Rational(-4, 3)) == I * 2 * sqrt(3) / 3
assert Rational(-4, 3) ** Rational(3, 2) == - I * 8 * sqrt(3) / 9
assert Rational(27, 2) ** Rational(1, 3) == 3 * (2 ** Rational(2, 3)) / 2
assert Rational(5**3, 8**3) ** Rational(4, 3) == Rational(5**4, 8**4)
# exact root on denominator
assert sqrt(Rational(1, 4)) == Rational(1, 2)
assert sqrt(Rational(1, -4)) == I * Rational(1, 2)
assert sqrt(Rational(3, 4)) == sqrt(3) / 2
assert sqrt(Rational(3, -4)) == I * sqrt(3) / 2
assert Rational(5, 27) ** Rational(1, 3) == (5 ** Rational(1, 3)) / 3
# not exact roots
assert sqrt(Rational(1, 2)) == sqrt(2) / 2
assert sqrt(Rational(-4, 7)) == I * sqrt(Rational(4, 7))
assert Rational(-3, 2)**Rational(-7, 3) == \
-4*(-1)**Rational(2, 3)*2**Rational(1, 3)*3**Rational(2, 3)/27
assert Rational(-3, 2)**Rational(-2, 3) == \
-(-1)**Rational(1, 3)*2**Rational(2, 3)*3**Rational(1, 3)/3
# negative integer power and negative rational base
assert Rational(-2, 3) ** Rational(-2, 1) == Rational(9, 4)
a = Rational(1, 10)
assert a**Float(a, 2) == Float(a, 2)**Float(a, 2)
assert Rational(-2, 3)**Symbol('', even=True) == \
Rational(2, 3)**Symbol('', even=True)
def test_powers_Float():
assert str((S('-1/10')**S('3/10')).n()) == str(Float(-.1)**(.3))
def test_abs1():
assert Rational(1, 6) != Rational(-1, 6)
assert abs(Rational(1, 6)) == abs(Rational(-1, 6))
def test_accept_int():
assert Float(4) == 4
def test_dont_accept_str():
assert Float("0.2") != "0.2"
assert not (Float("0.2") == "0.2")
def test_int():
a = Rational(5)
assert int(a) == 5
a = Rational(9, 10)
assert int(a) == int(-a) == 0
assert 1/(-1)**Rational(2, 3) == -(-1)**Rational(1, 3)
assert int(pi) == 3
assert int(E) == 2
assert int(GoldenRatio) == 1
# issue 10368
a = S(32442016954)/78058255275
assert type(int(a)) is type(int(-a)) is int
def test_long():
a = Rational(5)
assert long(a) == 5
a = Rational(9, 10)
assert long(a) == long(-a) == 0
a = Integer(2**100)
assert long(a) == a
assert long(pi) == 3
assert long(E) == 2
assert long(GoldenRatio) == 1
def test_real_bug():
x = Symbol("x")
assert str(2.0*x*x) in ["(2.0*x)*x", "2.0*x**2", "2.00000000000000*x**2"]
assert str(2.1*x*x) != "(2.0*x)*x"
def test_bug_sqrt():
assert ((sqrt(Rational(2)) + 1)*(sqrt(Rational(2)) - 1)).expand() == 1
def test_pi_Pi():
"Test that pi (instance) is imported, but Pi (class) is not"
from sympy import pi
with raises(ImportError):
from sympy import Pi
def test_no_len():
# there should be no len for numbers
raises(TypeError, lambda: len(Rational(2)))
raises(TypeError, lambda: len(Rational(2, 3)))
raises(TypeError, lambda: len(Integer(2)))
def test_issue_3321():
assert sqrt(Rational(1, 5)) == sqrt(Rational(1, 5))
assert 5 * sqrt(Rational(1, 5)) == sqrt(5)
def test_issue_3692():
assert ((-1)**Rational(1, 6)).expand(complex=True) == I/2 + sqrt(3)/2
assert ((-5)**Rational(1, 6)).expand(complex=True) == \
5**Rational(1, 6)*I/2 + 5**Rational(1, 6)*sqrt(3)/2
assert ((-64)**Rational(1, 6)).expand(complex=True) == I + sqrt(3)
def test_issue_3423():
x = Symbol("x")
assert sqrt(x - 1).as_base_exp() == (x - 1, S.Half)
assert sqrt(x - 1) != I*sqrt(1 - x)
def test_issue_3449():
x = Symbol("x")
assert sqrt(x - 1).subs(x, 5) == 2
def test_Integer_factors():
def F(i):
return Integer(i).factors()
assert F(1) == {}
assert F(2) == {2: 1}
assert F(3) == {3: 1}
assert F(4) == {2: 2}
assert F(5) == {5: 1}
assert F(6) == {2: 1, 3: 1}
assert F(7) == {7: 1}
assert F(8) == {2: 3}
assert F(9) == {3: 2}
assert F(10) == {2: 1, 5: 1}
assert F(11) == {11: 1}
assert F(12) == {2: 2, 3: 1}
assert F(13) == {13: 1}
assert F(14) == {2: 1, 7: 1}
assert F(15) == {3: 1, 5: 1}
assert F(16) == {2: 4}
assert F(17) == {17: 1}
assert F(18) == {2: 1, 3: 2}
assert F(19) == {19: 1}
assert F(20) == {2: 2, 5: 1}
assert F(21) == {3: 1, 7: 1}
assert F(22) == {2: 1, 11: 1}
assert F(23) == {23: 1}
assert F(24) == {2: 3, 3: 1}
assert F(25) == {5: 2}
assert F(26) == {2: 1, 13: 1}
assert F(27) == {3: 3}
assert F(28) == {2: 2, 7: 1}
assert F(29) == {29: 1}
assert F(30) == {2: 1, 3: 1, 5: 1}
assert F(31) == {31: 1}
assert F(32) == {2: 5}
assert F(33) == {3: 1, 11: 1}
assert F(34) == {2: 1, 17: 1}
assert F(35) == {5: 1, 7: 1}
assert F(36) == {2: 2, 3: 2}
assert F(37) == {37: 1}
assert F(38) == {2: 1, 19: 1}
assert F(39) == {3: 1, 13: 1}
assert F(40) == {2: 3, 5: 1}
assert F(41) == {41: 1}
assert F(42) == {2: 1, 3: 1, 7: 1}
assert F(43) == {43: 1}
assert F(44) == {2: 2, 11: 1}
assert F(45) == {3: 2, 5: 1}
assert F(46) == {2: 1, 23: 1}
assert F(47) == {47: 1}
assert F(48) == {2: 4, 3: 1}
assert F(49) == {7: 2}
assert F(50) == {2: 1, 5: 2}
assert F(51) == {3: 1, 17: 1}
def test_Rational_factors():
def F(p, q, visual=None):
return Rational(p, q).factors(visual=visual)
assert F(2, 3) == {2: 1, 3: -1}
assert F(2, 9) == {2: 1, 3: -2}
assert F(2, 15) == {2: 1, 3: -1, 5: -1}
assert F(6, 10) == {3: 1, 5: -1}
def test_issue_4107():
assert pi*(E + 10) + pi*(-E - 10) != 0
assert pi*(E + 10**10) + pi*(-E - 10**10) != 0
assert pi*(E + 10**20) + pi*(-E - 10**20) != 0
assert pi*(E + 10**80) + pi*(-E - 10**80) != 0
assert (pi*(E + 10) + pi*(-E - 10)).expand() == 0
assert (pi*(E + 10**10) + pi*(-E - 10**10)).expand() == 0
assert (pi*(E + 10**20) + pi*(-E - 10**20)).expand() == 0
assert (pi*(E + 10**80) + pi*(-E - 10**80)).expand() == 0
def test_IntegerInteger():
a = Integer(4)
b = Integer(a)
assert a == b
def test_Rational_gcd_lcm_cofactors():
assert Integer(4).gcd(2) == Integer(2)
assert Integer(4).lcm(2) == Integer(4)
assert Integer(4).gcd(Integer(2)) == Integer(2)
assert Integer(4).lcm(Integer(2)) == Integer(4)
assert Integer(4).gcd(3) == Integer(1)
assert Integer(4).lcm(3) == Integer(12)
assert Integer(4).gcd(Integer(3)) == Integer(1)
assert Integer(4).lcm(Integer(3)) == Integer(12)
assert Rational(4, 3).gcd(2) == Rational(2, 3)
assert Rational(4, 3).lcm(2) == Integer(4)
assert Rational(4, 3).gcd(Integer(2)) == Rational(2, 3)
assert Rational(4, 3).lcm(Integer(2)) == Integer(4)
assert Integer(4).gcd(Rational(2, 9)) == Rational(2, 9)
assert Integer(4).lcm(Rational(2, 9)) == Integer(4)
assert Rational(4, 3).gcd(Rational(2, 9)) == Rational(2, 9)
assert Rational(4, 3).lcm(Rational(2, 9)) == Rational(4, 3)
assert Rational(4, 5).gcd(Rational(2, 9)) == Rational(2, 45)
assert Rational(4, 5).lcm(Rational(2, 9)) == Integer(4)
assert Integer(4).cofactors(2) == (Integer(2), Integer(2), Integer(1))
assert Integer(4).cofactors(Integer(2)) == \
(Integer(2), Integer(2), Integer(1))
assert Integer(4).gcd(Float(2.0)) == S.One
assert Integer(4).lcm(Float(2.0)) == Float(8.0)
assert Integer(4).cofactors(Float(2.0)) == (S.One, Integer(4), Float(2.0))
assert Rational(1, 2).gcd(Float(2.0)) == S.One
assert Rational(1, 2).lcm(Float(2.0)) == Float(1.0)
assert Rational(1, 2).cofactors(Float(2.0)) == \
(S.One, Rational(1, 2), Float(2.0))
def test_Float_gcd_lcm_cofactors():
assert Float(2.0).gcd(Integer(4)) == S.One
assert Float(2.0).lcm(Integer(4)) == Float(8.0)
assert Float(2.0).cofactors(Integer(4)) == (S.One, Float(2.0), Integer(4))
assert Float(2.0).gcd(Rational(1, 2)) == S.One
assert Float(2.0).lcm(Rational(1, 2)) == Float(1.0)
assert Float(2.0).cofactors(Rational(1, 2)) == \
(S.One, Float(2.0), Rational(1, 2))
def test_issue_4611():
assert abs(pi._evalf(50) - 3.14159265358979) < 1e-10
assert abs(E._evalf(50) - 2.71828182845905) < 1e-10
assert abs(Catalan._evalf(50) - 0.915965594177219) < 1e-10
assert abs(EulerGamma._evalf(50) - 0.577215664901533) < 1e-10
assert abs(GoldenRatio._evalf(50) - 1.61803398874989) < 1e-10
x = Symbol("x")
assert (pi + x).evalf() == pi.evalf() + x
assert (E + x).evalf() == E.evalf() + x
assert (Catalan + x).evalf() == Catalan.evalf() + x
assert (EulerGamma + x).evalf() == EulerGamma.evalf() + x
assert (GoldenRatio + x).evalf() == GoldenRatio.evalf() + x
@conserve_mpmath_dps
def test_conversion_to_mpmath():
assert mpmath.mpmathify(Integer(1)) == mpmath.mpf(1)
assert mpmath.mpmathify(Rational(1, 2)) == mpmath.mpf(0.5)
assert mpmath.mpmathify(Float('1.23', 15)) == mpmath.mpf('1.23')
assert mpmath.mpmathify(I) == mpmath.mpc(1j)
assert mpmath.mpmathify(1 + 2*I) == mpmath.mpc(1 + 2j)
assert mpmath.mpmathify(1.0 + 2*I) == mpmath.mpc(1 + 2j)
assert mpmath.mpmathify(1 + 2.0*I) == mpmath.mpc(1 + 2j)
assert mpmath.mpmathify(1.0 + 2.0*I) == mpmath.mpc(1 + 2j)
assert mpmath.mpmathify(Rational(1, 2) + Rational(1, 2)*I) == mpmath.mpc(0.5 + 0.5j)
assert mpmath.mpmathify(2*I) == mpmath.mpc(2j)
assert mpmath.mpmathify(2.0*I) == mpmath.mpc(2j)
assert mpmath.mpmathify(Rational(1, 2)*I) == mpmath.mpc(0.5j)
mpmath.mp.dps = 100
assert mpmath.mpmathify(pi.evalf(100) + pi.evalf(100)*I) == mpmath.pi + mpmath.pi*mpmath.j
assert mpmath.mpmathify(pi.evalf(100)*I) == mpmath.pi*mpmath.j
def test_relational():
# real
x = S(.1)
assert (x != cos) is True
assert (x == cos) is False
# rational
x = Rational(1, 3)
assert (x != cos) is True
assert (x == cos) is False
# integer defers to rational so these tests are omitted
# number symbol
x = pi
assert (x != cos) is True
assert (x == cos) is False
def test_Integer_as_index():
assert 'hello'[Integer(2):] == 'llo'
def test_Rational_int():
assert int( Rational(7, 5)) == 1
assert int( Rational(1, 2)) == 0
assert int(-Rational(1, 2)) == 0
assert int(-Rational(7, 5)) == -1
def test_zoo():
b = Symbol('b', finite=True)
nz = Symbol('nz', nonzero=True)
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
im = Symbol('i', imaginary=True)
c = Symbol('c', complex=True)
pb = Symbol('pb', positive=True, finite=True)
nb = Symbol('nb', negative=True, finite=True)
imb = Symbol('ib', imaginary=True, finite=True)
for i in [I, S.Infinity, S.NegativeInfinity, S.Zero, S.One, S.Pi, S.Half, S(3), log(3),
b, nz, p, n, im, pb, nb, imb, c]:
if i.is_finite and (i.is_real or i.is_imaginary):
assert i + zoo is zoo
assert i - zoo is zoo
assert zoo + i is zoo
assert zoo - i is zoo
elif i.is_finite is not False:
assert (i + zoo).is_Add
assert (i - zoo).is_Add
assert (zoo + i).is_Add
assert (zoo - i).is_Add
else:
assert (i + zoo) is S.NaN
assert (i - zoo) is S.NaN
assert (zoo + i) is S.NaN
assert (zoo - i) is S.NaN
if fuzzy_not(i.is_zero) and (i.is_real or i.is_imaginary):
assert i*zoo is zoo
assert zoo*i is zoo
elif i.is_zero:
assert i*zoo is S.NaN
assert zoo*i is S.NaN
else:
assert (i*zoo).is_Mul
assert (zoo*i).is_Mul
if fuzzy_not((1/i).is_zero) and (i.is_real or i.is_imaginary):
assert zoo/i is zoo
elif (1/i).is_zero:
assert zoo/i is S.NaN
elif i.is_zero:
assert zoo/i is zoo
else:
assert (zoo/i).is_Mul
assert (I*oo).is_Mul # allow directed infinity
assert zoo + zoo is S.NaN
assert zoo * zoo is zoo
assert zoo - zoo is S.NaN
assert zoo/zoo is S.NaN
assert zoo**zoo is S.NaN
assert zoo**0 is S.One
assert zoo**2 is zoo
assert 1/zoo is S.Zero
assert Mul.flatten([S(-1), oo, S(0)]) == ([S.NaN], [], None)
def test_issue_4122():
x = Symbol('x', nonpositive=True)
assert (oo + x).is_Add
x = Symbol('x', finite=True)
assert (oo + x).is_Add # x could be imaginary
x = Symbol('x', nonnegative=True)
assert oo + x == oo
x = Symbol('x', finite=True, real=True)
assert oo + x == oo
# similarily for negative infinity
x = Symbol('x', nonnegative=True)
assert (-oo + x).is_Add
x = Symbol('x', finite=True)
assert (-oo + x).is_Add
x = Symbol('x', nonpositive=True)
assert -oo + x == -oo
x = Symbol('x', finite=True, real=True)
assert -oo + x == -oo
def test_GoldenRatio_expand():
assert GoldenRatio.expand(func=True) == S.Half + sqrt(5)/2
def test_as_content_primitive():
assert S.Zero.as_content_primitive() == (1, 0)
assert S.Half.as_content_primitive() == (S.Half, 1)
assert (-S.Half).as_content_primitive() == (S.Half, -1)
assert S(3).as_content_primitive() == (3, 1)
assert S(3.1).as_content_primitive() == (1, 3.1)
def test_hashing_sympy_integers():
# Test for issue 5072
assert set([Integer(3)]) == set([int(3)])
assert hash(Integer(4)) == hash(int(4))
def test_issue_4172():
assert int((E**100).round()) == \
26881171418161354484126255515800135873611119
assert int((pi**100).round()) == \
51878483143196131920862615246303013562686760680406
assert int((Rational(1)/EulerGamma**100).round()) == \
734833795660954410469466
@XFAIL
def test_mpmath_issues():
from mpmath.libmp.libmpf import _normalize
import mpmath.libmp as mlib
rnd = mlib.round_nearest
mpf = (0, long(0), -123, -1, 53, rnd) # nan
assert _normalize(mpf, 53) != (0, long(0), 0, 0)
mpf = (0, long(0), -456, -2, 53, rnd) # +inf
assert _normalize(mpf, 53) != (0, long(0), 0, 0)
mpf = (1, long(0), -789, -3, 53, rnd) # -inf
assert _normalize(mpf, 53) != (0, long(0), 0, 0)
from mpmath.libmp.libmpf import fnan
assert mlib.mpf_eq(fnan, fnan)
def test_Catalan_EulerGamma_prec():
n = GoldenRatio
f = Float(n.n(), 5)
assert f._mpf_ == (0, long(212079), -17, 18)
assert f._prec == 20
assert n._as_mpf_val(20) == f._mpf_
n = EulerGamma
f = Float(n.n(), 5)
assert f._mpf_ == (0, long(302627), -19, 19)
assert f._prec == 20
assert n._as_mpf_val(20) == f._mpf_
def test_Float_eq():
assert Float(.12, 3) != Float(.12, 4)
assert Float(.12, 3) == .12
assert 0.12 == Float(.12, 3)
assert Float('.12', 22) != .12
def test_int_NumberSymbols():
assert [int(i) for i in [pi, EulerGamma, E, GoldenRatio, Catalan]] == \
[3, 0, 2, 1, 0]
def test_issue_6640():
from mpmath.libmp.libmpf import finf, fninf
# fnan is not included because Float no longer returns fnan,
# but otherwise, the same sort of test could apply
assert Float(finf).is_zero is False
assert Float(fninf).is_zero is False
assert bool(Float(0)) is False
def test_issue_6349():
assert Float('23.e3', '')._prec == 10
assert Float('23e3', '')._prec == 20
assert Float('23000', '')._prec == 20
assert Float('-23000', '')._prec == 20
def test_mpf_norm():
assert mpf_norm((1, 0, 1, 0), 10) == mpf('0')._mpf_
assert Float._new((1, 0, 1, 0), 10)._mpf_ == mpf('0')._mpf_
def test_latex():
assert latex(pi) == r"\pi"
assert latex(E) == r"e"
assert latex(GoldenRatio) == r"\phi"
assert latex(EulerGamma) == r"\gamma"
assert latex(oo) == r"\infty"
assert latex(-oo) == r"-\infty"
assert latex(zoo) == r"\tilde{\infty}"
assert latex(nan) == r"\mathrm{NaN}"
assert latex(I) == r"i"
def test_issue_7742():
assert -oo % 1 == nan
def test_simplify_AlgebraicNumber():
A = AlgebraicNumber
e = 3**(S(1)/6)*(3 + (135 + 78*sqrt(3))**(S(2)/3))/(45 + 26*sqrt(3))**(S(1)/3)
assert simplify(A(e)) == A(12) # wester test_C20
e = (41 + 29*sqrt(2))**(S(1)/5)
assert simplify(A(e)) == A(1 + sqrt(2)) # wester test_C21
e = (3 + 4*I)**(Rational(3, 2))
assert simplify(A(e)) == A(2 + 11*I) # issue 4401
def test_Float_idempotence():
x = Float('1.23', '')
y = Float(x)
z = Float(x, 15)
assert same_and_same_prec(y, x)
assert not same_and_same_prec(z, x)
x = Float(10**20)
y = Float(x)
z = Float(x, 15)
assert same_and_same_prec(y, x)
assert not same_and_same_prec(z, x)
def test_comp():
# sqrt(2) = 1.414213 5623730950...
a = sqrt(2).n(7)
assert comp(a, 1.41421346) is False
assert comp(a, 1.41421347)
assert comp(a, 1.41421366)
assert comp(a, 1.41421367) is False
assert comp(sqrt(2).n(2), '1.4')
assert comp(sqrt(2).n(2), Float(1.4, 2), '')
raises(ValueError, lambda: comp(sqrt(2).n(2), 1.4, ''))
assert comp(sqrt(2).n(2), Float(1.4, 3), '') is False
def test_issue_9491():
assert oo**zoo == nan
def test_issue_10063():
assert 2**Float(3) == Float(8)
def test_issue_10020():
assert oo**I is S.NaN
assert oo**(1 + I) is S.ComplexInfinity
assert oo**(-1 + I) is S.Zero
assert (-oo)**I is S.NaN
assert (-oo)**(-1 + I) is S.Zero
assert oo**t == Pow(oo, t, evaluate=False)
assert (-oo)**t == Pow(-oo, t, evaluate=False)
def test_invert_numbers():
assert S(2).invert(5) == 3
assert S(2).invert(S(5)/2) == S.Half
assert S(2).invert(5.) == 3
assert S(2).invert(S(5)) == 3
assert S(2.).invert(5) == 3
assert S(sqrt(2)).invert(5) == 1/sqrt(2)
assert S(sqrt(2)).invert(sqrt(3)) == 1/sqrt(2)
def test_mod_inverse():
assert mod_inverse(3, 11) == 4
assert mod_inverse(5, 11) == 9
assert mod_inverse(21124921, 521512) == 7713
assert mod_inverse(124215421, 5125) == 2981
assert mod_inverse(214, 12515) == 1579
assert mod_inverse(5823991, 3299) == 1442
assert mod_inverse(123, 44) == 39
assert mod_inverse(2, 5) == 3
assert mod_inverse(-2, 5) == -3
x = Symbol('x')
assert S(2).invert(x) == S.Half
raises(TypeError, lambda: mod_inverse(2, x))
raises(ValueError, lambda: mod_inverse(2, S.Half))
raises(ValueError, lambda: mod_inverse(2, cos(1)**2 + sin(1)**2))
def test_golden_ratio_rewrite_as_sqrt():
assert GoldenRatio.rewrite(sqrt) == S.Half + sqrt(5)*S.Half
def test_comparisons_with_unknown_type():
class Foo(object):
"""
Class that is unaware of Basic, and relies on both classes returning
the NotImplemented singleton for equivalence to evaluate to False.
"""
ni, nf, nr = Integer(3), Float(1.0), Rational(1, 3)
foo = Foo()
for n in ni, nf, nr, oo, -oo, zoo, nan:
assert n != foo
assert foo != n
assert not n == foo
assert not foo == n
raises(TypeError, lambda: n < foo)
raises(TypeError, lambda: foo > n)
raises(TypeError, lambda: n > foo)
raises(TypeError, lambda: foo < n)
raises(TypeError, lambda: n <= foo)
raises(TypeError, lambda: foo >= n)
raises(TypeError, lambda: n >= foo)
raises(TypeError, lambda: foo <= n)
class Bar(object):
"""
Class that considers itself equal to any instance of Number except
infinities and nans, and relies on sympy types returning the
NotImplemented singleton for symmetric equality relations.
"""
def __eq__(self, other):
if other in (oo, -oo, zoo, nan):
return False
if isinstance(other, Number):
return True
return NotImplemented
def __ne__(self, other):
return not self == other
bar = Bar()
for n in ni, nf, nr:
assert n == bar
assert bar == n
assert not n != bar
assert not bar != n
for n in oo, -oo, zoo, nan:
assert n != bar
assert bar != n
assert not n == bar
assert not bar == n
for n in ni, nf, nr, oo, -oo, zoo, nan:
raises(TypeError, lambda: n < bar)
raises(TypeError, lambda: bar > n)
raises(TypeError, lambda: n > bar)
raises(TypeError, lambda: bar < n)
raises(TypeError, lambda: n <= bar)
raises(TypeError, lambda: bar >= n)
raises(TypeError, lambda: n >= bar)
raises(TypeError, lambda: bar <= n)
def test_NumberSymbol_comparison():
rpi = Rational('905502432259640373/288230376151711744')
fpi = Float(float(pi))
assert (rpi == pi) == (pi == rpi)
assert (rpi != pi) == (pi != rpi)
assert (rpi < pi) == (pi > rpi)
assert (rpi <= pi) == (pi >= rpi)
assert (rpi > pi) == (pi < rpi)
assert (rpi >= pi) == (pi <= rpi)
assert (fpi == pi) == (pi == fpi)
assert (fpi != pi) == (pi != fpi)
assert (fpi < pi) == (pi > fpi)
assert (fpi <= pi) == (pi >= fpi)
assert (fpi > pi) == (pi < fpi)
assert (fpi >= pi) == (pi <= fpi)
| [
"[email protected]"
] | |
58bb679db3a7eb38d6e8e0ecd200d684801d97e7 | 81d635211686b1bc87af5892bd9e0fb95cc2ddb8 | /adwords api/googleads-python-lib-master/examples/dfp/v201511/user_team_association_service/create_user_team_associations.py | e98d6a7766208d4ef56d9b401e19d87e3a2da24d | [
"Apache-2.0"
] | permissive | analyticsbot/Python-Code---Part-2 | de2f0581258b6c8b8808b4ef2884fe7e323876f0 | 12bdcfdef4472bcedc77ae61707c25a4a09cba8a | refs/heads/master | 2021-06-04T05:10:33.185766 | 2016-08-31T13:45:45 | 2016-08-31T13:45:45 | 66,679,512 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,304 | py | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a user to a team by creating an association between them.
To determine which teams exists, run get_all_teams.py. To determine which
users exist, run get_all_users.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
TEAM_ID = 'INSERT_TEAM_ID_HERE'
USER_IDS = ['INSERT_USER_IDS_TO_ASSOCIATE_TO_TEAM_HERE']
def main(client, team_id, user_ids):
# Initialize appropriate service.
user_team_association_service = client.GetService(
'UserTeamAssociationService', version='v201511')
user_team_associations = []
for user_id in user_ids:
user_team_associations.append(
{
'teamId': team_id,
'userId': user_id
})
# Create the user team association on the server.
user_team_associations = (
user_team_association_service.createUserTeamAssociations(
user_team_associations))
# Display results.
if user_team_associations:
for user_team_association in user_team_associations:
print ('A user team association between user with ID \'%s\' and team with'
' ID \'%s\'was created.' % (user_team_association['userId'],
user_team_association['teamId']))
else:
print 'No user team associations created.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, TEAM_ID, USER_IDS)
| [
"[email protected]"
] | |
d9ac40a91577793d0c1810e9bab0ba59b898beac | b6e5d86b1212103c41fed600c937afab6b19e438 | /setup.py | 2ff8a57dbf9db9e47d19b46a2cb9a77a8fe039aa | [
"MIT"
] | permissive | 4064w007/gamma_index | 42e59554f84c8af389679705cd94b7017f10e141 | 887d3d83ab8779fc9f4ec73090ad032edec7ea73 | refs/heads/master | 2020-04-09T16:59:02.745028 | 2017-07-22T06:34:18 | 2017-07-22T06:34:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | #!/usr/bin/env python
from setuptools import setup, find_packages, Extension
import itertools
gamma_c = Extension('gamma', sources = ['gamma_index/gamma.c'])
options = dict(
name='gamma_index',
version='0.1.3',
packages=find_packages(),
license='MIT',
include_package_data = True,
description='gamma_index - calculation of gamma index on multi-dimensional distributions',
long_description=open('README.rst').read(),
author='Jan Pipek',
author_email='[email protected]',
url='https://github.com/janpipek/gamma_index',
install_requires = ['numpy'],
ext_modules = [gamma_c]
)
setup(**options)
| [
"[email protected]"
] | |
841412bbb38497468d17c4c073b67c60d60b2d67 | 48aa5cc42d4af35470a4ba0545dd55a0812986c7 | /mth5/groups/reports.py | 96261296b15466d3312fe39f881f1daee7142de8 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | kujaku11/mth5 | 4fb6e156bd93c13b558d69b866025a29abe8bae7 | ce814702c7116f5f0034f5d43a1392f61a2c3cd5 | refs/heads/master | 2023-08-03T17:27:37.074071 | 2023-04-18T00:22:27 | 2023-04-18T00:22:27 | 283,883,448 | 16 | 2 | MIT | 2023-09-12T23:44:45 | 2020-07-30T21:49:56 | Python | UTF-8 | Python | false | false | 1,607 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 17:03:53 2020
:copyright:
Jared Peacock ([email protected])
:license: MIT
"""
# =============================================================================
# Imports
# =============================================================================
import numpy as np
import h5py
from mth5.groups.base import BaseGroup
# =============================================================================
# Reports Group
# =============================================================================
class ReportsGroup(BaseGroup):
"""
Not sure how to handle this yet
"""
def __init__(self, group, **kwargs):
super().__init__(group, **kwargs)
# summary of reports
self._defaults_summary_attrs = {
"name": "summary",
"max_shape": (1000,),
"dtype": np.dtype(
[
("name", "S5"),
("type", "S32"),
("summary", "S200"),
("hdf5_reference", h5py.ref_dtype),
]
),
}
def add_report(self, report_name, report_metadata=None, report_data=None):
"""
:param report_name: DESCRIPTION
:type report_name: TYPE
:param report_metadata: DESCRIPTION, defaults to None
:type report_metadata: TYPE, optional
:param report_data: DESCRIPTION, defaults to None
:type report_data: TYPE, optional
:return: DESCRIPTION
:rtype: TYPE
"""
self.logger.error("Not Implemented yet")
| [
"[email protected]"
] | |
71df871980ae414b88f015be80bace9bc42fcd93 | 51b630da92fe715af6d0b8d156c3492faa93a9c2 | /task.py | 26b8a23956729efd3ebde74fd98833c67773e71e | [
"MIT"
] | permissive | rezer0dai/rewheeler | 54dc12941e65b291388875197491c248468f8c8a | 548e5784999657199f1bc51d5a8b94d12cb27fce | refs/heads/master | 2020-06-03T00:15:27.327197 | 2019-06-18T09:09:30 | 2019-06-18T09:09:30 | 191,357,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,758 | py | import numpy as np
import random
CLOSE_ENOUGH = 1.15
def extract_goal(state):
return state[-4-3:-1-3]
# https://github.com/Unity-Technologies/ml-agents/blob/master/UnitySDK/Assets/ML-Agents/Examples/Reacher/Scripts/ReacherAgent.cs
def transform(obs):
return np.hstack([
obs[3+4+3+3:3+4+3+3+3], #pendulumB position
obs[:3+4+3+3], # pendulumA info
obs[3+4+3+3+3:-4-3], # pundulumB rest of info
obs[-1-3:] #speed + hand position
])
def goal_distance(goal_a, goal_b):
# assert goal_a.shape == goal_b.shape
return np.linalg.norm(goal_a[:3] - goal_b[:3])
def fun_reward(s, n, goal, her): # 3D navigation
return (
-.01 * (5 * CLOSE_ENOUGH < np.abs(s[0] - goal[0])),
-.01 * (5 * CLOSE_ENOUGH < np.abs(s[1] - goal[1])),
-.01 * (5 * CLOSE_ENOUGH < np.abs(s[2] - goal[2])),
+.01 * (3 * CLOSE_ENOUGH > np.abs(s[0] - goal[0])),
+.01 * (3 * CLOSE_ENOUGH > np.abs(s[1] - goal[1])),
+.01 * (3 * CLOSE_ENOUGH > np.abs(s[2] - goal[2])),
-.01 * (1 * CLOSE_ENOUGH > np.abs(s[0] - goal[0])),
-.01 * (1 * CLOSE_ENOUGH > np.abs(s[1] - goal[1])),
-.01 * (1 * CLOSE_ENOUGH > np.abs(s[2] - goal[2])),
)
def goal_select(s, trajectory, gid):
return random.randint(0, len(trajectory)-1)
def update_goal_curyctor(n_step):
MAX_HER_STEP = 1
def update_goal(rewards, goals, states, n_goals, n_states, update, n_steps):
gid = 0
delta = 0
for i, (g, s, n_g, n, u, step) in enumerate(zip(goals, states, n_goals, n_states, update, n_steps)):
her_active = bool(sum(update[(i-MAX_HER_STEP) if MAX_HER_STEP < i else 0:i]))
if not her_active and u: # only here we do HER approach and setuping new goal
# last n-steps are by design *NOT selected* to replay anyway
gid = goal_select(s, goals[:-n_step-MAX_HER_STEP], 0)
delta = 0
if her_active or u:
if gid>=0 and gid+delta+n_step<len(goals) and i<len(states)-n_step: # actually HER goal was assigned
assert step is not None, "step is none ... {} {} {} {}".format(i, gid, delta, len(states))# 1 11 0 50
g, n_g = goals[gid+delta], goals[gid+delta+step]
delta += 1
yield (
fun_reward(s, n, g, True),
g, s,
n_g, n,
gid<0 or gid+delta+MAX_HER_STEP<len(goals)-n_step
)
return update_goal
# TEMPORARY IMPLMENTATION ~ testing on Tennis environment from UnityML framework
class Task:
def __init__(self):
from unityagents import UnityEnvironment
self.ENV = UnityEnvironment(file_name='./reach/Reacher.x86_64')
# self.ENV = UnityEnvironment(file_name='./data/Tennis.x86_64')
self.BRAIN_NAME = self.ENV.brain_names[0]
self.random_cut = None
def reset(self, seed, learn_mode):
# einfo = self.ENV.reset(config={"goal_size":4.4 * CLOSE_ENOUGH, "goal_speed":.3})[self.BRAIN_NAME]
einfo = self.ENV.reset()[self.BRAIN_NAME]
self.random_cut = random.randint(0, len(einfo.vector_observations) - 1)
states = self._reflow(einfo.vector_observations)
self._decouple(states)
return self.states
def _reflow(self, data):
return np.vstack([ data[self.random_cut:], data[:self.random_cut] ])
def _deflow(self, data):
return np.vstack([ data[-self.random_cut:], data[:-self.random_cut] ])
def _decouple(self, states):
self.goals, self.states = zip(
*[ (extract_goal(s), transform(s)) for s in states ])
self.goals = np.vstack(self.goals)
self.states = np.vstack(self.states)
def goal(self):
return self.goals#.reshape(len(self.goals), -1)
return np.zeros([20, 1])
return np.zeros([2, 1])
def step(self, actions, learn_mode):
act_env = self._deflow(actions).reshape(-1)
einfo = self.ENV.step(act_env)[self.BRAIN_NAME]
states = self._reflow(einfo.vector_observations)
dones = self._reflow(np.asarray(einfo.local_done).reshape(len(states), -1))
rewards = self._reflow(np.asarray(einfo.rewards).reshape(len(states), -1))
goods = np.ones([len(rewards), 1])
self._decouple(states)
if not learn_mode:#True:#
return actions, self.states, rewards, dones, goods
rewards = np.vstack([
fun_reward(s, None, g, False) for i, (s, g) in enumerate(zip(self.states, self.goals))
])
return actions, self.states, rewards, dones, goods
def goal_met(self, rewards):
return rewards > 30.
return rewards > .5
| [
"[email protected]"
] | |
3e4394a0646eda47d6f9ce72abd3a4216e14eb9e | 9d24ead596fd7a4135b3ed2ed840a85ceef1cd56 | /COVIDPandasUppgift/balls/new.py | 07b66f9613e31b55fc20047c6eb620171a4789ec | [] | no_license | blarpet/PRO-AXEL-JOBSON | 656daaf2cceabc4916312fa43d3f5c050fdd513a | dd9a073cdc6e8ba0cd0237b0a25945c31528a53e | refs/heads/master | 2023-05-08T19:56:44.803866 | 2021-05-31T11:29:07 | 2021-05-31T11:29:07 | 290,454,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | import pandas as pd
import plotly.express as px
df = pd.read_csv("National_Total_Deaths_by_Age_Group.csv")
options = []
for age in df.Age_Group:
options.append(dict(label = age, value = age))
fig = px.bar(df,x = "Age_Group", y = "Total_Cases")
fig.write_html('first_figure.html', auto_open=True)
print(df.head())
#df_AG = df[df["Age_Group"] == "0-9"]
#df_AG = df_AG.transpose().iloc[1:]
#print(df_AG)
| [
"[email protected]"
] | |
722d4642986e50ffbe82058abcf1195bc73e7946 | 5f51d41baa66867c48694e633d8ac1c757b385af | /0x00-python_variable_annotations/6-sum_mixed_list.py | 95b9903a8bb6235692db2cb5b3bdc725c94ceaa4 | [] | no_license | JuanOlivares1/holbertonschool-web_back_end | cd7f53fbaffc837b5c569ce740542a0ef22d2363 | 43dee7a118424d8e0a12f4c2a7109f331ac73d5c | refs/heads/main | 2023-08-20T14:16:28.028464 | 2021-10-07T22:55:33 | 2021-10-07T22:55:33 | 387,531,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | #!/usr/bin/env python3
""" Module """
from typing import List, Union
def sum_mixed_list(mxd_lst: List[Union[int, float]]) -> float:
""" Returns the sum of all list's items """
sum: float = 0
for n in mxd_lst:
sum += n
return sum
| [
"[email protected]"
] | |
af89d27822a95aad81e261d0a5997807ad93618b | d115cf7a1b374d857f6b094d4b4ccd8e9b1ac189 | /tags/pyplusplus_dev_0.9.5/pyplusplus/module_creator/creator.py | 6b1699cf983a159a8044bc084e0af529bf9f74b8 | [
"BSL-1.0"
] | permissive | gatoatigrado/pyplusplusclone | 30af9065fb6ac3dcce527c79ed5151aade6a742f | a64dc9aeeb718b2f30bd6a5ff8dcd8bfb1cd2ede | refs/heads/master | 2016-09-05T23:32:08.595261 | 2010-05-16T10:53:45 | 2010-05-16T10:53:45 | 700,369 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 35,704 | py | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import types_database
import creators_wizard
import sort_algorithms
import dependencies_manager
import opaque_types_manager
import call_policies_resolver
from pygccxml import declarations
from pyplusplus import decl_wrappers
from pyplusplus import code_creators
from pyplusplus import code_repository
from pyplusplus import _logging_
ACCESS_TYPES = declarations.ACCESS_TYPES
VIRTUALITY_TYPES = declarations.VIRTUALITY_TYPES
#TODO: don't export functions that returns non const pointer to fundamental types
#TODO: add print decl_wrapper.readme messages
#class Foo{
# union {
# struct {
# float r,g,b,a;
# };
# float val[4];
# };
# };
class creator_t( declarations.decl_visitor_t ):
"""Creating code creators.
This class takes a set of declarations as input and creates a code
creator tree that contains the Boost.Python C++ source code for the
final extension module. Each node in the code creators tree represents
a block of text (C++ source code).
Usage of this class: Create an instance and pass all relevant input
data to the constructor. Then call L{create()} to obtain the code
creator tree whose root node is a L{module_t<code_creators.module_t>}
object representing the source code for the entire extension module.
"""
def __init__( self
, decls
, module_name
, boost_python_ns_name='bp'
, call_policies_resolver_=None
, types_db=None
, target_configuration=None
, enable_indexing_suite=True
, doc_extractor=None ):
"""Constructor.
@param decls: Declarations that should be exposed in the final module.
@param module_name: The name of the final module.
@param boost_python_ns_name: The alias for the boost::python namespace.
@param call_policies_resolver_: Callable that takes one declaration (calldef_t) as input and returns a call policy object which should be used for this declaration.
@param types_db: ...todo...
@param target_configuration: A target configuration object can be used to customize the generated source code to a particular compiler or a particular version of Boost.Python.
@param doc_extractor: callable, that takes as argument declaration reference and returns documentation string
@param already_exposed_dbs: list of files/directories other modules, this module depends on, generated their code too
@type decls: list of declaration_t
@type module_name: str
@type boost_python_ns_name: str
@type call_policies_resolver_: callable
@type types_db: L{types_database_t<types_database.types_database_t>}
@type target_configuration: L{target_configuration_t<code_creators.target_configuration_t>}
@type doc_extractor: callable
@type already_exposed_dbs: list of strings
"""
declarations.decl_visitor_t.__init__(self)
self.logger = _logging_.loggers.module_builder
self.decl_logger = _logging_.loggers.declarations
self.__enable_indexing_suite = enable_indexing_suite
self.__target_configuration = target_configuration
if not self.__target_configuration:
self.__target_configuration = code_creators.target_configuration_t()
self.__call_policies_resolver = call_policies_resolver_
if not self.__call_policies_resolver:
self.__call_policies_resolver \
= call_policies_resolver.built_in_resolver_t(self.__target_configuration)
self.__types_db = types_db
if not self.__types_db:
self.__types_db = types_database.types_database_t()
self.__extmodule = code_creators.module_t( declarations.get_global_namespace(decls) )
if boost_python_ns_name:
bp_ns_alias = code_creators.namespace_alias_t( alias=boost_python_ns_name
, full_namespace_name='::boost::python' )
self.__extmodule.adopt_creator( bp_ns_alias )
self.__module_body = code_creators.module_body_t( name=module_name )
self.__extmodule.adopt_creator( self.__module_body )
self.__opaque_types_manager = opaque_types_manager.manager_t( self.__extmodule )
self.__dependencies_manager = dependencies_manager.manager_t(self.decl_logger)
prepared_decls = self._prepare_decls( decls, doc_extractor )
self.__decls = sort_algorithms.sort( prepared_decls )
self.curr_code_creator = self.__module_body
self.curr_decl = None
self.__array_1_registered = set() #(type.decl_string,size)
self.__free_operators = []
self.__exposed_free_fun_overloads = set()
def __print_readme( self, decl ):
readme = decl.readme()
if not readme:
return
if not decl.exportable:
reason = readme[0]
readme = readme[1:]
self.decl_logger.warn( "%s;%s" % ( decl, reason ) )
for msg in readme:
self.decl_logger.warn( "%s;%s" % ( decl, msg ) )
def _prepare_decls( self, decls, doc_extractor ):
to_be_exposed = []
for decl in declarations.make_flatten( decls ):
if decl.ignore:
continue
if isinstance( decl, declarations.namespace_t ):
continue
if not decl.exportable:
#leave only decls that user wants to export and that could be exported
self.__print_readme( decl )
continue
if decl.already_exposed:
#check wether this is already exposed in other module
continue
if isinstance( decl.parent, declarations.namespace_t ):
#leave only declarations defined under namespace, but remove namespaces
to_be_exposed.append( decl )
#Right now this functionality introduce a bug: declarations that should
#not be exported for some reason are not marked as such. I will need to
#find out.
#if isinstance( decl, declarations.calldef_t ) and not isinstance( decl, declarations.destructor_t ):
#self.__types_db.update( decl )
#if None is decl.call_policies:
#decl.call_policies = self.__call_policies_resolver( decl )
#if isinstance( decl, declarations.variable_t ):
#self.__types_db.update( decl )
if doc_extractor:
decl.documentation = doc_extractor( decl )
self.__print_readme( decl )
return to_be_exposed
def _adopt_free_operator( self, operator ):
def adopt_operator_impl( operator, found_creators ):
creator = filter( lambda creator: isinstance( creator, code_creators.class_t )
, found_creators )
if len(creator) == 1:
creator = creator[0]
#I think I don't need this condition any more
if not find( lambda creator: isinstance( creator, code_creators.declaration_based_t )
and operator is creator.declaration
, creator.creators ):
#expose operator only once
self.__dependencies_manager.add_exported( operator )
creator.adopt_creator( code_creators.operator_t( operator=operator ) )
elif not creator:
pass
else:
assert not "Found %d class code creators" % len(creator)
find = code_creators.creator_finder.find_by_declaration
if isinstance( operator.parent, declarations.class_t ):
found = find( lambda decl: operator.parent is decl
, self.__extmodule.body.creators )
adopt_operator_impl( operator, found )
else:
#select all to be exposed declarations
included = filter( lambda decl: decl.ignore == False, operator.class_types )
if not included:
msg = 'Py++ bug found!' \
' For some reason Py++ decided to expose free operator "%s", when all class types related to the operator definition are excluded.' \
' Please report this bug. Thanks! '
raise RuntimeError( msg % str( operator ) )
found = find( lambda decl: included[0] is decl, self.__extmodule.body.creators )
adopt_operator_impl( operator, found )
def _is_registered_smart_pointer_creator( self, creator, db ):
for registered in db:
if not isinstance( creator, registered.__class__ ):
continue
elif registered.smart_ptr != creator.smart_ptr:
continue
elif isinstance( creator, code_creators.smart_pointer_registrator_t ):
if creator.declaration is registered.declaration:
return True
elif isinstance( creator, code_creators.smart_pointers_converter_t ):
if ( creator.source is registered.source ) \
and ( creator.target is registered.target ):
return True
else:
assert not "unknown instace of registrator: " % str( registered )
def _treat_smart_pointers( self ):
""" Go to all class creators and apply held_type and creator registrators
as needed.
"""
find_classes = code_creators.creator_finder.find_by_class_instance
class_creators = find_classes( what=code_creators.class_t
, where=self.__extmodule.body.creators
, recursive=True )
registrators_db = []
for creator in class_creators:
if None is creator.held_type:
if not creator.declaration.is_abstract:
creator.held_type = self.__types_db.create_holder( creator.declaration )
registrators = self.__types_db.create_registrators( creator )
for r in registrators:
if not self._is_registered_smart_pointer_creator( r, registrators_db ):
creator.adopt_creator(r)
registrators_db.append(r)
def _append_user_code( self ):
find_classes = code_creators.creator_finder.find_by_class_instance
class_creators = find_classes( what=code_creators.class_t
, where=self.__extmodule.body.creators
, recursive=True )
ctext_t = code_creators.custom_text_t
for cls_creator in class_creators:
cls_decl = cls_creator.declaration
#uc = user code
uc_creators = map( lambda uc: ctext_t( uc.text, uc.works_on_instance )
, cls_decl.registration_code )
cls_creator.adopt_creators( uc_creators )
uc_creators = map( lambda uc: ctext_t( uc.text ), cls_decl.wrapper_code )
if uc_creators:
cls_creator.wrapper.adopt_creators( uc_creators )
uc_creators = map( lambda uc: ctext_t( uc.text ), cls_decl.declaration_code )
insert_pos = self.__extmodule.creators.index( self.__module_body )
self.__extmodule.adopt_creators( uc_creators, insert_pos )
cls_creator.associated_decl_creators.extend( uc_creators )
def _treat_indexing_suite( self ):
def create_explanation(cls):
msg = '//WARNING: the next line of code will not compile, because "%s" does not have operator== !'
msg = msg % cls.indexing_suite.element_type.decl_string
return code_creators.custom_text_t( msg, False )
def create_cls_cc( cls ):
if isinstance( cls, declarations.class_t ):
return code_creators.class_t( class_inst=cls )
else:
return code_creators.class_declaration_t( class_inst=cls )
if not self.__types_db.used_containers:
return
creators = []
created_value_traits = set()
cmp_by_name = lambda cls1, cls2: cmp( cls1.decl_string, cls2.decl_string )
used_containers = list( self.__types_db.used_containers )
used_containers = filter( lambda cls: cls.indexing_suite.include_files
, used_containers )
used_containers.sort( cmp_by_name )
for cls in used_containers:
self.__print_readme( cls )
if cls.already_exposed:
continue
cls_creator = create_cls_cc( cls )
self.__dependencies_manager.add_exported( cls )
creators.append( cls_creator )
try:
element_type = cls.indexing_suite.element_type
except:
element_type = None
if isinstance( cls.indexing_suite, decl_wrappers.indexing_suite1_t ):
if not ( None is element_type ) \
and declarations.is_class( element_type ) \
and not declarations.has_public_equal( element_type ):
cls_creator.adopt_creator( create_explanation( cls ) )
cls_creator.adopt_creator( code_creators.indexing_suite1_t(cls) )
else:
class_traits = declarations.class_traits
if not ( None is element_type ) and class_traits.is_my_case( element_type ):
value_cls = class_traits.get_declaration( element_type )
if value_cls not in created_value_traits:
created_value_traits.add( value_cls )
element_type_cc = code_creators.value_traits_t( value_cls )
self.__extmodule.adopt_declaration_creator( element_type_cc )
cls_creator.adopt_creator( code_creators.indexing_suite2_t(cls) )
creators.reverse()
self.__module_body.adopt_creators( creators, 0 )
def create(self, decl_headers=None):
"""Create and return the module for the extension.
@param decl_headers: If None the headers for the wrapped decls are automatically found.
But you can pass a list of headers here to override that search.
@returns: Returns the root of the code creators tree
@rtype: L{module_t<code_creators.module_t>}
"""
# Invoke the appropriate visit_*() method on all decls
for decl in self.__decls:
self.curr_decl = decl
declarations.apply_visitor( self, decl )
for operator in self.__free_operators:
self._adopt_free_operator( operator )
self._treat_smart_pointers()
if self.__enable_indexing_suite:
self._treat_indexing_suite()
for creator in code_creators.make_flatten_generator( self.__extmodule ):
creator.target_configuration = self.__target_configuration
#last action.
self._append_user_code()
add_include = self.__extmodule.add_include
#add system headers
system_headers = self.__extmodule.get_system_headers( recursive=True, unique=True )
map( lambda header: add_include( header, user_defined=False, system=True )
, system_headers )
#add user defined header files
if decl_headers is None:
decl_headers = declarations.declaration_files( self.__decls )
map( lambda header: add_include( header, user_defined=False, system=False )
, decl_headers )
self.__dependencies_manager.inform_user()
return self.__extmodule
def visit_member_function( self ):
fwrapper = None
self.__types_db.update( self.curr_decl )
self.__dependencies_manager.add_exported( self.curr_decl )
if None is self.curr_decl.call_policies:
self.curr_decl.call_policies = self.__call_policies_resolver( self.curr_decl )
maker_cls, fwrapper_cls = creators_wizard.find_out_mem_fun_creator_classes( self.curr_decl )
maker = None
fwrapper = None
if fwrapper_cls:
fwrapper = fwrapper_cls( function=self.curr_decl )
if fwrapper_cls is code_creators.mem_fun_transformed_wrapper_t:
if self.curr_code_creator.wrapper:
class_wrapper = self.curr_code_creator.wrapper
class_wrapper.adopt_creator( fwrapper )
else:
self.__extmodule.adopt_declaration_creator( fwrapper )
self.curr_code_creator.associated_decl_creators.append(fwrapper)
else:
class_wrapper = self.curr_code_creator.wrapper
class_wrapper.adopt_creator( fwrapper )
if maker_cls:
if fwrapper:
maker = maker_cls( function=self.curr_decl, wrapper=fwrapper )
else:
maker = maker_cls( function=self.curr_decl )
self.curr_code_creator.adopt_creator( maker )
self.__opaque_types_manager.register_opaque( maker, self.curr_decl )
if self.curr_decl.has_static:
#static_method should be created only once.
found = filter( lambda creator: isinstance( creator, code_creators.static_method_t )
and creator.declaration.name == self.curr_decl.name
, self.curr_code_creator.creators )
if not found:
static_method = code_creators.static_method_t( function=self.curr_decl
, function_code_creator=maker )
self.curr_code_creator.adopt_creator( static_method )
def visit_constructor( self ):
self.__types_db.update( self.curr_decl )
self.__dependencies_manager.add_exported( self.curr_decl )
if self.curr_decl.allow_implicit_conversion:
maker = code_creators.casting_constructor_t( constructor=self.curr_decl )
self.__module_body.adopt_creator( maker )
cwrapper = None
if self.curr_decl.parent.is_wrapper_needed():
class_wrapper = self.curr_code_creator.wrapper
cwrapper = code_creators.constructor_wrapper_t( constructor=self.curr_decl )
class_wrapper.adopt_creator( cwrapper )
#TODO: FT for constructor
#~ if self.curr_decl.transformations:
#~ cwrapper = code_creators.constructor_transformed_wrapper_t( constructor=self.curr_decl )
#~ class_wrapper.adopt_creator( cwrapper )
#~ else:
#~ if self.curr_decl.transformations:
#~ cwrapper = code_creators.constructor_transformed_wrapper_t( constructor=self.curr_decl )
#~ class_wrapper.adopt_creator( cwrapper )
#~ self.__module_body.adopt_creator( cwrapper )
#~ self.curr_code_creator.associated_decl_creators.append( cwrapper )
#~ maker = None
#~ if self.curr_decl.transformations:
#~ maker = code_creators.constructor_transformed_t( constructor=self.curr_decl )
#~ else:
maker = code_creators.constructor_t( constructor=self.curr_decl, wrapper=cwrapper )
if None is self.curr_decl.call_policies:
self.curr_decl.call_policies = self.__call_policies_resolver( self.curr_decl )
self.curr_code_creator.adopt_creator( maker )
def visit_destructor( self ):
pass
def visit_member_operator( self ):
if self.curr_decl.symbol in ( '()', '[]', '=' ):
self.visit_member_function()
else:
self.__types_db.update( self.curr_decl )
maker = code_creators.operator_t( operator=self.curr_decl )
self.curr_code_creator.adopt_creator( maker )
self.__dependencies_manager.add_exported( self.curr_decl )
def visit_casting_operator( self ):
self.__dependencies_manager.add_exported( self.curr_decl )
if None is self.curr_decl.call_policies:
self.curr_decl.call_policies = self.__call_policies_resolver( self.curr_decl )
self.__types_db.update( self.curr_decl )
if not self.curr_decl.parent.is_abstract and not declarations.is_reference( self.curr_decl.return_type ):
maker = code_creators.casting_operator_t( operator=self.curr_decl )
self.__module_body.adopt_creator( maker )
self.__opaque_types_manager.register_opaque( maker, self.curr_decl )
#what to do if class is abstract
maker = code_creators.casting_member_operator_t( operator=self.curr_decl )
self.curr_code_creator.adopt_creator( maker )
self.__opaque_types_manager.register_opaque( maker, self.curr_decl )
def visit_free_function( self ):
if self.curr_decl in self.__exposed_free_fun_overloads:
return
elif self.curr_decl.use_overload_macro:
parent_decl = self.curr_decl.parent
names = set( map( lambda decl: decl.name
, parent_decl.free_functions( allow_empty=True, recursive=False ) ) )
for name in names:
overloads = parent_decl.free_functions( name, allow_empty=True, recursive=False )
overloads = filter( lambda decl: decl.ignore == False and decl.use_overload_macro, overloads )
if not overloads:
continue
else:
self.__exposed_free_fun_overloads.update( overloads )
for f in overloads:
self.__types_db.update( f )
self.__dependencies_manager.add_exported( f )
if None is f.call_policies:
f.call_policies = self.__call_policies_resolver( f )
overloads_cls_creator = code_creators.free_fun_overloads_class_t( overloads )
self.__extmodule.adopt_declaration_creator( overloads_cls_creator )
overloads_reg = code_creators.free_fun_overloads_t( overloads_cls_creator )
self.curr_code_creator.adopt_creator( overloads_reg )
overloads_reg.associated_decl_creators.append( overloads_cls_creator )
self.__opaque_types_manager.register_opaque( overloads_reg, overloads )
ctext_t = code_creators.custom_text_t
for f in overloads:
uc_creators = map( lambda uc: ctext_t( uc.text ), f.declaration_code )
insert_pos = self.__extmodule.creators.index( self.__module_body )
self.__extmodule.adopt_creators( uc_creators, insert_pos )
overloads_reg.associated_decl_creators.extend( uc_creators )
else:
self.__types_db.update( self.curr_decl )
self.__dependencies_manager.add_exported( self.curr_decl )
if None is self.curr_decl.call_policies:
self.curr_decl.call_policies = self.__call_policies_resolver( self.curr_decl )
maker = None
if self.curr_decl.transformations:
wrapper = code_creators.free_fun_transformed_wrapper_t( self.curr_decl )
self.__extmodule.adopt_declaration_creator( wrapper )
maker = code_creators.free_fun_transformed_t( self.curr_decl, wrapper )
maker.associated_decl_creators.append( wrapper )
else:
maker = code_creators.free_function_t( function=self.curr_decl )
self.curr_code_creator.adopt_creator( maker )
self.__opaque_types_manager.register_opaque( maker, self.curr_decl )
ctext_t = code_creators.custom_text_t
uc_creators = map( lambda uc: ctext_t( uc.text ), self.curr_decl.declaration_code )
insert_pos = self.__extmodule.creators.index( self.__module_body )
self.__extmodule.adopt_creators( uc_creators, insert_pos )
maker.associated_decl_creators.extend( uc_creators )
def visit_free_operator( self ):
self.__types_db.update( self.curr_decl )
self.__free_operators.append( self.curr_decl )
def visit_class_declaration(self ):
pass
def expose_overloaded_mem_fun_using_macro( self, cls, cls_creator ):
#returns set of exported member functions
exposed = set()
names = set( map( lambda decl: decl.name
, cls.member_functions( allow_empty=True, recursive=False ) ) )
for name in names:
overloads = cls.member_functions( name, allow_empty=True, recursive=False )
overloads = filter( lambda decl: decl.ignore == False and decl.use_overload_macro
, overloads )
if not overloads:
continue
else:
exposed.update( overloads )
for f in overloads:
self.__types_db.update( f )
self.__dependencies_manager.add_exported( f )
if None is f.call_policies:
f.call_policies = self.__call_policies_resolver( f )
overloads_cls_creator = code_creators.mem_fun_overloads_class_t( overloads )
self.__extmodule.adopt_declaration_creator( overloads_cls_creator )
overloads_reg = code_creators.mem_fun_overloads_t( overloads_cls_creator )
cls_creator.adopt_creator( overloads_reg )
overloads_reg.associated_decl_creators.append( overloads_cls_creator )
self.__opaque_types_manager.register_opaque( overloads_reg, overloads )
return exposed
def visit_class(self ):
self.__dependencies_manager.add_exported( self.curr_decl )
cls_decl = self.curr_decl
cls_parent_cc = self.curr_code_creator
exportable_members = self.curr_decl.get_exportable_members(sort_algorithms.sort)
wrapper = None
cls_cc = code_creators.class_t( class_inst=self.curr_decl )
if self.curr_decl.is_wrapper_needed():
wrapper = code_creators.class_wrapper_t( declaration=self.curr_decl
, class_creator=cls_cc )
cls_cc.wrapper = wrapper
cls_cc.associated_decl_creators.append( wrapper )
#insert wrapper before module body
if isinstance( self.curr_decl.parent, declarations.class_t ):
#we deal with internal class
self.curr_code_creator.wrapper.adopt_creator( wrapper )
else:
self.__extmodule.adopt_declaration_creator( wrapper )
#next constructors are not present in code, but compiler generated
#Boost.Python requiers them to be declared in the wrapper class
if '0.9' in self.curr_decl.compiler:
copy_constr = self.curr_decl.find_copy_constructor()
add_to_wrapper = False
if declarations.has_copy_constructor( self.curr_decl ):
#find out whether user or compiler defined it
if self.curr_decl.noncopyable:
add_to_wrapper = False
elif not copy_constr:
add_to_wrapper = True #compiler defined will not be exposed manually later
elif copy_constr.is_artificial:
add_to_wrapper = True #compiler defined will not be exposed manually later
if add_to_wrapper:
cccc = code_creators.copy_constructor_wrapper_t( class_=self.curr_decl)
wrapper.adopt_creator( cccc )
trivial_constr = self.curr_decl.find_trivial_constructor()
add_to_wrapper = False
if declarations.has_trivial_constructor( self.curr_decl ):
if not trivial_constr:
add_to_wrapper = True
elif trivial_constr.is_artificial:
add_to_wrapper = True
if add_to_wrapper:
tcons = code_creators.null_constructor_wrapper_t( class_=self.curr_decl )
wrapper.adopt_creator( tcons )
else:
if declarations.has_copy_constructor( self.curr_decl ):
copy_constr = self.curr_decl.find_copy_constructor()
if not self.curr_decl.noncopyable and copy_constr.is_artificial:
cccc = code_creators.copy_constructor_wrapper_t( class_=self.curr_decl)
wrapper.adopt_creator( cccc )
null_constr = self.curr_decl.find_trivial_constructor()
if null_constr and null_constr.is_artificial:
#this constructor is not going to be exposed
tcons = code_creators.null_constructor_wrapper_t( class_=self.curr_decl )
wrapper.adopt_creator( tcons )
exposed = self.expose_overloaded_mem_fun_using_macro( cls_decl, cls_cc )
cls_parent_cc.adopt_creator( cls_cc )
self.curr_code_creator = cls_cc
for decl in exportable_members:
if decl in exposed:
continue
self.curr_decl = decl
declarations.apply_visitor( self, decl )
for redefined_func in cls_decl.redefined_funcs():
if isinstance( redefined_func, declarations.operator_t ):
continue
self.curr_decl = redefined_func
declarations.apply_visitor( self, redefined_func )
#all static_methods_t should be moved to the end
#better approach is to move them after last def of relevant function
static_methods = filter( lambda creator: isinstance( creator, code_creators.static_method_t )
, cls_cc.creators )
for static_method in static_methods:
cls_cc.remove_creator( static_method )
cls_cc.adopt_creator( static_method )
if cls_decl.exception_translation_code:
translator = code_creators.exception_translator_t( cls_decl )
self.__extmodule.adopt_declaration_creator( translator )
cls_cc.associated_decl_creators.append( translator )
translator_register \
= code_creators.exception_translator_register_t( cls_decl, translator )
cls_cc.adopt_creator( translator_register )
for property_def in cls_decl.properties:
cls_cc.adopt_creator( code_creators.property_t(property_def) )
self.curr_decl = cls_decl
self.curr_code_creator = cls_parent_cc
def visit_enumeration(self):
self.__dependencies_manager.add_exported( self.curr_decl )
maker = None
if self.curr_decl.name:
maker = code_creators.enum_t( enum=self.curr_decl )
else:
maker = code_creators.unnamed_enum_t( unnamed_enum=self.curr_decl )
self.curr_code_creator.adopt_creator( maker )
def visit_namespace(self):
pass
def visit_typedef(self):
pass
def _register_array_1( self, array_type ):
data = ( array_type.decl_string, declarations.array_size( array_type ) )
if data in self.__array_1_registered:
return False
else:
self.__array_1_registered.add( data )
return True
def visit_variable(self):
self.__types_db.update( self.curr_decl )
self.__dependencies_manager.add_exported( self.curr_decl )
if declarations.is_array( self.curr_decl.type ):
if self._register_array_1( self.curr_decl.type ):
array_1_registrator = code_creators.array_1_registrator_t( array_type=self.curr_decl.type )
self.curr_code_creator.adopt_creator( array_1_registrator )
if isinstance( self.curr_decl.parent, declarations.namespace_t ):
maker = None
wrapper = None
if declarations.is_array( self.curr_decl.type ):
wrapper = code_creators.array_gv_wrapper_t( variable=self.curr_decl )
maker = code_creators.array_gv_t( variable=self.curr_decl, wrapper=wrapper )
else:
maker = code_creators.global_variable_t( variable=self.curr_decl )
if wrapper:
self.__extmodule.adopt_declaration_creator( wrapper )
else:
maker = None
wrapper = None
if self.curr_decl.bits != None:
wrapper = code_creators.bit_field_wrapper_t( variable=self.curr_decl )
maker = code_creators.bit_field_t( variable=self.curr_decl, wrapper=wrapper )
elif declarations.is_array( self.curr_decl.type ):
wrapper = code_creators.array_mv_wrapper_t( variable=self.curr_decl )
maker = code_creators.array_mv_t( variable=self.curr_decl, wrapper=wrapper )
elif declarations.is_pointer( self.curr_decl.type ):
wrapper = code_creators.member_variable_wrapper_t( variable=self.curr_decl )
maker = code_creators.member_variable_t( variable=self.curr_decl, wrapper=wrapper )
elif declarations.is_reference( self.curr_decl.type ):
if None is self.curr_decl.getter_call_policies:
self.curr_decl.getter_call_policies = self.__call_policies_resolver( self.curr_decl, 'get' )
if None is self.curr_decl.setter_call_policies:
self.curr_decl.setter_call_policies = self.__call_policies_resolver( self.curr_decl, 'set' )
wrapper = code_creators.mem_var_ref_wrapper_t( variable=self.curr_decl )
maker = code_creators.mem_var_ref_t( variable=self.curr_decl, wrapper=wrapper )
self.__opaque_types_manager.register_opaque( maker, self.curr_decl )
else:
maker = code_creators.member_variable_t( variable=self.curr_decl )
if wrapper:
self.curr_code_creator.wrapper.adopt_creator( wrapper )
self.curr_code_creator.adopt_creator( maker )
| [
"roman_yakovenko@dc5859f9-2512-0410-ae5c-dd123cda1f76"
] | roman_yakovenko@dc5859f9-2512-0410-ae5c-dd123cda1f76 |
29a8e3fae3cc14190dd745fda358b6adbedd3235 | 28de04457e8ebcd1b34494db07bde8a3f25d8cf1 | /easy/relative_ranks_506.py | dba84689bb3b4ada3bd233ad1d4f741db9778e21 | [] | no_license | YangXinNewlife/LeetCode | 1df4218eef6b81db81bf2f0548d0a18bc9a5d672 | 20d3d0aa325d79c716acfc75daef32f8d4f9f1ad | refs/heads/master | 2023-08-16T23:18:29.776539 | 2023-08-15T15:53:30 | 2023-08-15T15:53:30 | 70,552,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # -*- coding:utf-8 -*-
__author__ = 'yangxin_ryan'
"""
Solutions:
这里的处理办法是先将数据排序,默认的前三个元素分别是:
Gold Medal
Silver Medal
Bronze Medal
后面的用下标 + 1代替
"""
class RelativeRanks(object):
def findRelativeRanks(self, nums: List[int]) -> List[str]:
pos = {n: i + 1 for i, n in enumerate(sorted(nums, reverse=True))}
def func(x):
if pos[x] == 1:
return "Gold Medal"
elif pos[x] == 2:
return "Silver Medal"
elif pos[x] == 3:
return "Bronze Medal"
else:
return str(pos[x])
return map(func, nums)
| [
"[email protected]"
] | |
48105e2c2eb93f33c63a7ae9e894b6c88b70494d | 16caebb320bb10499d3712bf0bdc07539a4d0007 | /objc/_ContactsUICore.py | dea39d4b0255acdcdd6be154fd08c31e13327e61 | [] | no_license | swosnick/Apple-Frameworks-Python | 876d30f308a7ac1471b98a9da2fabd22f30c0fa5 | 751510137e9fa35cc806543db4e4415861d4f252 | refs/heads/master | 2022-12-08T07:08:40.154553 | 2020-09-04T17:36:24 | 2020-09-04T17:36:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,948 | py | '''
Classes from the 'ContactsUICore' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
_CNUILocalPhotoFuture = _Class('_CNUILocalPhotoFuture')
_CNUICachingLikenessRenderer = _Class('_CNUICachingLikenessRenderer')
CNMCProfileConnection = _Class('CNMCProfileConnection')
CNUIDefaultUserActionFetcher = _Class('CNUIDefaultUserActionFetcher')
CNUICoreFamilyMemberContactItem = _Class('CNUICoreFamilyMemberContactItem')
CNUIUserActionItemComparator = _Class('CNUIUserActionItemComparator')
CNUIUserActionRanking = _Class('CNUIUserActionRanking')
CNUIImageProvider = _Class('CNUIImageProvider')
CNUICoreProposedContactsFetchingDecorator = _Class('CNUICoreProposedContactsFetchingDecorator')
CNUIPlaceholderProviderFactory = _Class('CNUIPlaceholderProviderFactory')
_CNUIUserActionCurator = _Class('_CNUIUserActionCurator')
CNUIUserActionItemList = _Class('CNUIUserActionItemList')
CNUIPRLikenessPhotoProvider = _Class('CNUIPRLikenessPhotoProvider')
CNHandle = _Class('CNHandle')
CNUIPRLikenessProvider = _Class('CNUIPRLikenessProvider')
CNUICoreContactNoteValueFilter = _Class('CNUICoreContactNoteValueFilter')
CNUIAvatarLayoutManager = _Class('CNUIAvatarLayoutManager')
CNUIAvatarLayoutLayerItem = _Class('CNUIAvatarLayoutLayerItem')
CNUIAvatarLayoutItemConfiguration = _Class('CNUIAvatarLayoutItemConfiguration')
CNUICoreFamilyInfo = _Class('CNUICoreFamilyInfo')
CNUIIDSIDQueryControllerWrapper = _Class('CNUIIDSIDQueryControllerWrapper')
CNUICoreContactPhotoValueFilter = _Class('CNUICoreContactPhotoValueFilter')
CNUICoreContactManagementConsentInspector = _Class('CNUICoreContactManagementConsentInspector')
CNUICoreMainWhitelistedContactsController = _Class('CNUICoreMainWhitelistedContactsController')
CNUIApplicationLaunchOptions = _Class('CNUIApplicationLaunchOptions')
CNTUCallProvider = _Class('CNTUCallProvider')
CNUICoreEditAuthorizationCheck = _Class('CNUICoreEditAuthorizationCheck')
CNUICoreContactNicknameValueFilter = _Class('CNUICoreContactNicknameValueFilter')
CNUIUserActionTargetDiscovering = _Class('CNUIUserActionTargetDiscovering')
CNUIUserActionTargetDiscoveringReplaySubjectPair = _Class('CNUIUserActionTargetDiscoveringReplaySubjectPair')
CNUIPRLikenessLoadingPlaceholderProvider = _Class('CNUIPRLikenessLoadingPlaceholderProvider')
CNUIPRLikenessLoadingGroupPlaceholderProvider = _Class('CNUIPRLikenessLoadingGroupPlaceholderProvider')
CNUIUserActionDisambiguationModeler = _Class('CNUIUserActionDisambiguationModeler')
CNUIUserActionTargetDiscoveringReplaySubject = _Class('CNUIUserActionTargetDiscoveringReplaySubject')
CNFirstRawActionsModelReplaySubject = _Class('CNFirstRawActionsModelReplaySubject')
CNDiscoveredUserActionReplaySubject = _Class('CNDiscoveredUserActionReplaySubject')
_CNUIGravatarPhotoFuture = _Class('_CNUIGravatarPhotoFuture')
_CNUIUserActionUserActivityOpener = _Class('_CNUIUserActionUserActivityOpener')
CNUICoreContactTypeAssessor = _Class('CNUICoreContactTypeAssessor')
CNUICoreScreentimePasscodeInspector = _Class('CNUICoreScreentimePasscodeInspector')
CNUIContactPropertyRanker = _Class('CNUIContactPropertyRanker')
CNUICoreFamilyMemberContactsController = _Class('CNUICoreFamilyMemberContactsController')
CNUIUserActionWorkspaceURLOpener = _Class('CNUIUserActionWorkspaceURLOpener')
CNUIUserActionListModel = _Class('CNUIUserActionListModel')
CNTUCallProviderManagerDelegate = _Class('CNTUCallProviderManagerDelegate')
CNLSApplicationWorkspace = _Class('CNLSApplicationWorkspace')
CNUICoreFamilyElement = _Class('CNUICoreFamilyElement')
_CNUIUserActionImageProvider = _Class('_CNUIUserActionImageProvider')
CNUIPRLikenessPlaceholderProvider = _Class('CNUIPRLikenessPlaceholderProvider')
CNUIPRLikenessLookup = _Class('CNUIPRLikenessLookup')
CNTUCallProviderManager = _Class('CNTUCallProviderManager')
CNUIUserActionTargetDiscoveringObservableCancelationToken = _Class('CNUIUserActionTargetDiscoveringObservableCancelationToken')
CNCallProvidersChangedCancelationToken = _Class('CNCallProvidersChangedCancelationToken')
CNUICoreLogProvider = _Class('CNUICoreLogProvider')
CNUIInteractionDonor = _Class('CNUIInteractionDonor')
CNUICoreContactPropertyFilterBuilder = _Class('CNUICoreContactPropertyFilterBuilder')
CNUICoreContactStoreProductionFacade = _Class('CNUICoreContactStoreProductionFacade')
CNUICoreContactStoreTestFacade = _Class('CNUICoreContactStoreTestFacade')
CNUICoreContactRelationshipsFilter = _Class('CNUICoreContactRelationshipsFilter')
CNUIUserActionExtensionURLOpener = _Class('CNUIUserActionExtensionURLOpener')
CNUICoreFamilyInfoRetriever = _Class('CNUICoreFamilyInfoRetriever')
_CNUIDefaultUserActionRecorderEventFactory = _Class('_CNUIDefaultUserActionRecorderEventFactory')
CNUIDefaultUserActionRecorder = _Class('CNUIDefaultUserActionRecorder')
CNUIIDSContactPropertyResolver = _Class('CNUIIDSContactPropertyResolver')
CNUIIDSAvailabilityProvider = _Class('CNUIIDSAvailabilityProvider')
CNUIDSHandleAvailabilityPromise = _Class('CNUIDSHandleAvailabilityPromise')
CNUIDHandleAvailabilityFuture = _Class('CNUIDHandleAvailabilityFuture')
CNUIUserActionDiscoveringEnvironment = _Class('CNUIUserActionDiscoveringEnvironment')
CNUILikenessRenderer = _Class('CNUILikenessRenderer')
CNCapabilities = _Class('CNCapabilities')
CNUICoreContactEditingSession = _Class('CNUICoreContactEditingSession')
CNUICoreFamilyMemberContactsModel = _Class('CNUICoreFamilyMemberContactsModel')
CNUICoreFamilyMemberContactsStore = _Class('CNUICoreFamilyMemberContactsStore')
CNUICoreFamilyMemberWhitelistedContactsController = _Class('CNUICoreFamilyMemberWhitelistedContactsController')
CNUICoreContactPropertyValueFilterFactory = _Class('CNUICoreContactPropertyValueFilterFactory')
CNUIUserActivityManager = _Class('CNUIUserActivityManager')
CNUIRemotePhotoFutures = _Class('CNUIRemotePhotoFutures')
CNUIUserActionListDataSource = _Class('CNUIUserActionListDataSource')
CNUIUserActionDisambiguationViewDataSource = _Class('CNUIUserActionDisambiguationViewDataSource')
CNUIUserActionTargetDiscoveryCache = _Class('CNUIUserActionTargetDiscoveryCache')
CNUIDSHandleAvailabilityCache = _Class('CNUIDSHandleAvailabilityCache')
CNUIDSIMessageHandleAvailabilityCache = _Class('CNUIDSIMessageHandleAvailabilityCache')
CNUIDSFaceTimeHandleAvailabilityCache = _Class('CNUIDSFaceTimeHandleAvailabilityCache')
CNUIUserActionListModelCache = _Class('CNUIUserActionListModelCache')
CNUICoreContactsSyncProductionTrigger = _Class('CNUICoreContactsSyncProductionTrigger')
CNUIContactPropertyIDSHandle = _Class('CNUIContactPropertyIDSHandle')
CNUICoreRecentsManager = _Class('CNUICoreRecentsManager')
CNUIPRLikenessResolver = _Class('CNUIPRLikenessResolver')
CNUIPRLikenessResolverOptions = _Class('CNUIPRLikenessResolverOptions')
CNUICoreContactFetchRequestAccumulator = _Class('CNUICoreContactFetchRequestAccumulator')
CNUIMeContactComparisonStrategyUnified = _Class('CNUIMeContactComparisonStrategyUnified')
CNUIMeContactComparisonStrategyIdentifier = _Class('CNUIMeContactComparisonStrategyIdentifier')
CNUIMeContactMonitor = _Class('CNUIMeContactMonitor')
CNUICoreContactScratchpad = _Class('CNUICoreContactScratchpad')
CNUIPassKitWrapper = _Class('CNUIPassKitWrapper')
CNUICoreWhitelistedContactsControllerOptions = _Class('CNUICoreWhitelistedContactsControllerOptions')
CNUIUserActivityRestorer = _Class('CNUIUserActivityRestorer')
CNUISnowglobeUtilities = _Class('CNUISnowglobeUtilities')
CNUIRenderedLikenessCacheEntry = _Class('CNUIRenderedLikenessCacheEntry')
CNUIUserActionDisambiguationModelFinalizer = _Class('CNUIUserActionDisambiguationModelFinalizer')
CNUICoreFamilyMemberSaveRequestFactory = _Class('CNUICoreFamilyMemberSaveRequestFactory')
_CNUIDirectoryServicesPhotoFuture = _Class('_CNUIDirectoryServicesPhotoFuture')
CNUIDowntimeLogger = _Class('CNUIDowntimeLogger')
CNUICoreFamilyMemberContactsModelRetriever = _Class('CNUICoreFamilyMemberContactsModelRetriever')
CNUICoreContactEdit = _Class('CNUICoreContactEdit')
CNUICoreContactMatcher = _Class('CNUICoreContactMatcher')
CNUIUserActionItem = _Class('CNUIUserActionItem')
_CNUIUserActionDialRequestItem = _Class('_CNUIUserActionDialRequestItem')
_CNUIUserActionUserActivityItem = _Class('_CNUIUserActionUserActivityItem')
_CNUIUserActionURLItem = _Class('_CNUIUserActionURLItem')
CNUICoreFamilyMemberContactsModelBuilder = _Class('CNUICoreFamilyMemberContactsModelBuilder')
CNUICoreInMemoryWhitelistedContactsDataSourceDecorator = _Class('CNUICoreInMemoryWhitelistedContactsDataSourceDecorator')
CNUILibraryFolderDiscovery = _Class('CNUILibraryFolderDiscovery')
CNUICoreContactRefetcher = _Class('CNUICoreContactRefetcher')
CNUILikenessRenderingScope = _Class('CNUILikenessRenderingScope')
CNUIUserActionCacheKeyGenerator = _Class('CNUIUserActionCacheKeyGenerator')
CNUILikenessFingerprint = _Class('CNUILikenessFingerprint')
_CNUILikenessRenderer = _Class('_CNUILikenessRenderer')
_CNUIUserActionDialRequestOpener = _Class('_CNUIUserActionDialRequestOpener')
CNUIUserActionContext = _Class('CNUIUserActionContext')
CNUIUserActionTarget = _Class('CNUIUserActionTarget')
_CNUIUserActionSendMessageIntentTarget = _Class('_CNUIUserActionSendMessageIntentTarget')
_CNUIUserActionDirectionsTarget = _Class('_CNUIUserActionDirectionsTarget')
_CNUIUserActionMessagesTextTarget = _Class('_CNUIUserActionMessagesTextTarget')
_CNUIUserActionSkypeVoiceTarget = _Class('_CNUIUserActionSkypeVoiceTarget')
_CNUIUserActionSkypeVideoTarget = _Class('_CNUIUserActionSkypeVideoTarget')
_CNUIUserActionCallProviderVideoTarget = _Class('_CNUIUserActionCallProviderVideoTarget')
_CNUIUserActionSkypeTextTarget = _Class('_CNUIUserActionSkypeTextTarget')
_CNUIUserActionMailEmailTarget = _Class('_CNUIUserActionMailEmailTarget')
_CNUIUserActionFaceTimeVoiceTarget = _Class('_CNUIUserActionFaceTimeVoiceTarget')
_CNUIUserActionStartAudioCallIntentTarget = _Class('_CNUIUserActionStartAudioCallIntentTarget')
_CNUIUserActionTelephonyVoiceTarget = _Class('_CNUIUserActionTelephonyVoiceTarget')
_CNUIUserActionFaceTimeVideoTarget = _Class('_CNUIUserActionFaceTimeVideoTarget')
_CNUIUserActionCallProviderVoiceTarget = _Class('_CNUIUserActionCallProviderVoiceTarget')
_CNUIUserActionWalletPayTarget = _Class('_CNUIUserActionWalletPayTarget')
_CNUIUserActionStartVideoCallIntentTarget = _Class('_CNUIUserActionStartVideoCallIntentTarget')
TestCNUIIDSHandleAvailability = _Class('TestCNUIIDSHandleAvailability')
CNUIRTTUtilities = _Class('CNUIRTTUtilities')
_CNUIRTTUtilities = _Class('_CNUIRTTUtilities')
CNContactsUIError = _Class('CNContactsUIError')
_CNUIIDSHandleAvailability = _Class('_CNUIIDSHandleAvailability')
CNUIIDSRequest = _Class('CNUIIDSRequest')
CNUICoreContactAggregateValueFilter = _Class('CNUICoreContactAggregateValueFilter')
CNUICreateContactIntentResponse = _Class('CNUICreateContactIntentResponse')
CNUICreateContactIntent = _Class('CNUICreateContactIntent')
| [
"[email protected]"
] | |
ca3048e417708c69249c76c57c3e868c4c8ba729 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p00007/s006220863.py | cdaec1ff23fb635a4abf54da1e7866491a47bb12 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from math import ceil
from functools import reduce
print(reduce(lambda x, y: int(ceil(int(ceil(100000 * 1.05 / 1000) * 1000) * 1.05 / 1000) * 1000) if x == 0 else int(ceil(x * 1.05 / 1000) * 1000) , range(int(input())))) | [
"[email protected]"
] | |
e64374bbafac7b8b97a0251bdbbd43bf9e2035b7 | 82aada4592fc4fc8dfd6822bd37a1f6c79ee53c0 | /mainapp/daemons/vikidict/vikidict.py | 5cf68458abd9c82cd9e1ab610b1a5ab63d1f4ae0 | [] | no_license | Ancelada/canonizator | db79e9c16cdafb981e58dd933c03460a16803f90 | d5f14dddf3ed70dc8a0c10ecbb987fdf64eb682f | refs/heads/master | 2021-01-23T00:56:58.045369 | 2017-06-05T19:06:52 | 2017-06-05T19:06:52 | 85,855,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,353 | py | import sys
import os
from bs4 import BeautifulSoup
import requests
import pymorphy2
import binascii
# path = os.path.dirname(sys.modules[__name__].__file__)
# path = os.path.join(path, '..')
# sys.path.insert(0, path)
# __path__ = os.path.dirname(os.path.abspath(__file__))
class Vikidict():
def __init__(self):
self.headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)',
}
self.path = os.path.dirname(os.path.abspath(__file__))
self.morth = pymorphy2.MorphAnalyzer()
def __page_soup(self, url):
page_content = self.__get_page_content(url)
return BeautifulSoup(page_content, 'lxml')
def __get_page_content(self, url):
return requests.get(url, headers=self.headers).text
def __get_synonim_list(self, page_soup):
h4 = page_soup.find_all('h4')
for tag in h4:
if tag.span.string == 'Синонимы':
h4 = tag
break
if len(h4) > 0:
try:
a_arr = h4.next_sibling.next_sibling.find_all('a')
synonims = []
for a in a_arr:
if a.get('title') != None:
print (a['title'])
synonims.append(a['title'])
return synonims
except:
return []
else:
return []
def parse_to_morph(self, word):
return self.morth.parse(word)[0]
def normalize_word(self, parsed_to_morph):
normal_form = parsed_to_morph.normal_form
return normal_form
def start(self, words):
result = []
for word in words:
POS = self.parse_to_morph(word.name).tag.POS
page_soup = self.__page_soup('https://ru.wiktionary.org/wiki/{0}'.format(word.name))
synonims = self.__get_synonim_list(page_soup)
synonims = self.__remove_different_pos(POS, synonims)
result.append({
'id': word.id,
'word': word.name,
'synonims': self.__convert_synonims(synonims)
})
return result
def __convert_synonims(self, synonims):
result = []
for synonim in synonims:
result.append({'synonim': synonim, 'crc32': self.__convert_crc32(synonim)})
return result
def __remove_different_pos(self, POS, synonims):
for key, value in enumerate(synonims):
value = self.parse_to_morph(value)
synonims[key] = value.normal_form
if value.tag.POS != POS:
del synonims[key]
return self.__remove_different_pos(POS, synonims)
return synonims
def __convert_crc32(self, value):
value_bytes=bytes(value, 'utf-8')
return binascii.crc32(value_bytes) | [
"[email protected]"
] | |
e07b156b01c9f1e08b27c8f6e63b732cad71f565 | 4b7e282fe480415f5d52c0fc0429f144156190fe | /examples/account_management/reject_merchant_center_link.py | 92971480f07660b2cd66bdfdfeeac1e2397d9eed | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 6,207 | py | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demonstrates how to reject or unlink a Merchant Center link request.
Prerequisite: You need to have access to a Merchant Center account. You can find
instructions to create a Merchant Center account here:
https://support.google.com/merchants/answer/188924.
To run this example, you must use the Merchant Center UI or the Content API for
Shopping to send a link request between your Merchant Center and Google Ads
accounts. You can find detailed instructions to link your Merchant Center and
Google Ads accounts here: https://support.google.com/merchants/answer/6159060.
"""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
def main(client, customer_id, merchant_center_account_id):
"""Demonstrates how to reject a Merchant Center link request.
Args:
client: An initialized Google Ads client.
customer_id: The Google Ads customer ID.
merchant_center_account_id: The Merchant Center account ID for the
account requesting to link.
"""
# Get the MerchantCenterLinkService client.
merchant_center_link_service = client.get_service(
"MerchantCenterLinkService"
)
# Get the extant customer account to Merchant Center account links.
list_merchant_center_links_response = merchant_center_link_service.list_merchant_center_links(
customer_id=customer_id
)
number_of_links = len(
list_merchant_center_links_response.merchant_center_links
)
if number_of_links == 0:
print(
"There are no current merchant center links to Google Ads "
f"account {customer_id}. This example will now exit."
)
return
print(
f"{number_of_links} Merchant Center link(s) found with the "
"following details:"
)
for (
merchant_center_link
) in list_merchant_center_links_response.merchant_center_links:
print(
f"\tLink '{merchant_center_link.resource_name}' has status "
f"'{merchant_center_link.status.name}'."
)
# Check if this is the link to the target Merchant Center account.
if merchant_center_link.id == merchant_center_account_id:
# A Merchant Center link can be pending or enabled; in both
# cases, we reject it by removing the link.
_remove_merchant_center_link(
client,
merchant_center_link_service,
customer_id,
merchant_center_link,
)
# We can terminate early since this example concerns only one
# Google Ads account to Merchant Center account link.
return
# Raise an exception if no matching Merchant Center link was found.
raise ValueError(
"No link could was found between Google Ads account "
f"{customer_id} and Merchant Center account "
f"{merchant_center_account_id}."
)
# [START reject_merchant_center_link]
def _remove_merchant_center_link(
client, merchant_center_link_service, customer_id, merchant_center_link
):
"""Removes a Merchant Center link from a Google Ads client customer account.
Args:
client: An initialized Google Ads client.
merchant_center_link_service: An initialized
MerchantCenterLinkService client.
customer_id: The Google Ads customer ID of the account that has the link
request.
merchant_center_link: The MerchantCenterLink object to remove.
"""
# Create a single remove operation, specifying the Merchant Center link
# resource name.
operation = client.get_type("MerchantCenterLinkOperation")
operation.remove = merchant_center_link.resource_name
# Send the operation in a mutate request.
response = merchant_center_link_service.mutate_merchant_center_link(
customer_id=customer_id, operation=operation
)
print(
"Removed Merchant Center link with resource name "
f"'{response.result.resource_name}'."
)
# [END reject_merchant_center_link]
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v8")
parser = argparse.ArgumentParser(
description=(
"Demonstrates how to reject a Merchant Center link request."
)
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-m",
"--merchant_center_account_id",
type=int,
required=True,
help="The Merchant Center account ID for the account requesting to "
"link.",
)
args = parser.parse_args()
try:
main(
googleads_client, args.customer_id, args.merchant_center_account_id
)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| [
"[email protected]"
] | |
c80b24556a0be18f1c988d44226dd07ca53c1447 | ab9196b6356e3c0af7baf7b768d7eb8112243c06 | /Python&DataBase/5.21/HW02Pandas03_05_ClassEx05_김주현.py | 72383d87fa38906966d5ecf55a4a45b989d9756b | [] | no_license | wngus9056/Datascience | 561188000df74686f42f216cda2b4e7ca3d8eeaf | a2edf645febd138531d4b953afcffa872ece469b | refs/heads/main | 2023-07-01T00:08:00.642424 | 2021-08-07T02:10:25 | 2021-08-07T02:10:25 | 378,833,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py |
class FourCal:
def __init__(self,first,second):
self.first = first
self.second = second
def sum(self):
result = self.first + self.second
return result
def sub(self):
result = self.first - self.second
return result
def mul(self):
result = self.first * self.second
return result
def div(self):
result = self.first / self.second
return result
class MoreFourCal(FourCal):
def pow(self,su01):
result = su01 **2
return result
a = MoreFourCal(4,2)
print(a.first, '+', a.second, '=', a.sum())
print(a.first, '-', a.second, '=', a.sub())
print(a.first, '*', a.second, '=', a.mul())
print(a.first, '/', a.second, '=', a.div())
print('제곱출력 :', a.pow(5)) | [
"[email protected]"
] | |
e8dc3a1f5f2a3fdeec63fe5fd2a749367511e8ab | 5c2e0fe391f7c720d0a6c117a64f4c8e89fece93 | /research/object_detection/models/ssd_mobilenet_v1_feature_extractor.py | e64c966d5f72e27875bfa64f49a6f3ce45694522 | [
"Apache-2.0"
] | permissive | lyltencent/tf_models_v15 | e3bed9dfee42685118b0f3d21bb9de37d58cf500 | 0081dbe36831342051c09a2f94ef9ffa95da0e79 | refs/heads/master | 2022-10-20T20:00:26.594259 | 2020-09-19T05:37:22 | 2020-09-19T05:37:22 | 161,750,047 | 0 | 1 | Apache-2.0 | 2021-03-31T21:04:01 | 2018-12-14T07:47:33 | Python | UTF-8 | Python | false | false | 4,972 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV1 features."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import ops
from nets import mobilenet_v1
slim = tf.contrib.slim
class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
batch_norm_trainable=True,
reuse_weights=None):
"""MobileNetV1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a small batch size
(e.g. 1), it is desirable to disable batch norm update and use
pretrained batch norm params.
reuse_weights: Whether to reuse variables. Default is None.
"""
super(SSDMobileNetV1FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams, batch_norm_trainable, reuse_weights)
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
feature_map_layout = {
'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '',
'', ''],
'layer_depth': [-1, -1, 512, 256, 256, 128],
}
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with slim.arg_scope([slim.batch_norm], fused=False):
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return feature_maps.values()
| [
"[email protected]"
] | |
2dbd0c03e52beb11415376f65595220a7a52e734 | 29e1133741b339c2e6c4c0385a103f68baa32a11 | /Wos/crawle_spider.py | 8c53f1a85ff5ed5a1836f37dd9db86d8cfa3a4b6 | [] | no_license | Gscsd8527/AllProject | b406935dd1e969d1f45a62f870fb409f81ba4200 | 10b56c432b6f433e3a37967b7c717840e726765c | refs/heads/master | 2023-02-21T20:25:48.397668 | 2022-03-04T14:01:27 | 2022-03-04T14:01:27 | 199,461,253 | 13 | 6 | null | 2023-02-15T20:47:23 | 2019-07-29T13:45:25 | Python | UTF-8 | Python | false | false | 25,744 | py | import requests
from lxml import etree
import re
from lxml.html import tostring
import html
import uuid
import datetime
import pymongo
myclient = pymongo.MongoClient('mongodb://10.0.82.131:27017/')
mydb = myclient['Wos_Computer'] # 数据库
mydata = mydb['data2017'] # 表
from getCookies import getCookies, parseSetCookie, HEADERS
# from spider.getCookies import getCookies, parseSetCookie, HEADERS
COOKIES = {}
INDEX = 1
K = False
def parseHtml(url, cookie, name_string):
print('===========parseHtml================')
headers = HEADERS
qid = re.findall('qid=(.*?)&', url)[0]
sid = re.findall('SID=(.*?)&', url)[0]
page = re.findall('page=(.*?)&', url)[0]
headers['Referer'] = 'http://apps.webofknowledge.com/summary.do?product=UA&colName=&qid={}&SID={}&search_mode=GeneralSearch&formValue(summary_mode)=GeneralSearch&update_back2search_link_param=yes&page={}'.format(qid, sid, page)
response = requests.get(url, headers=headers, cookies=cookie)
if response.status_code == 200:
response.encoding = 'utf-8'
set_cookie = response.headers['Set-Cookie']
need_list = ['JSESSIONID', 'dotmatics.elementalKey', '_abck', 'bm_sv']
cookies = parseSetCookie(set_cookie, need_list)
# 翻页时需要用到这个cookie
new_cookie = cookie.update(cookies)
html_text = response.text
html_data = etree.HTML(html_text)
# HTML主体
content = html_data.xpath('//div[@class="l-content"]//div[@class="block-record-info"]')
# 论文名称 name
paper_Name = name_string
print('name= ', paper_Name)
# 作者 author
author = ''
try:
author_jc_list = []
author_qc_list = []
author_a = content[0].xpath('./p/a')
for a in author_a:
jc = a.xpath('./text()')[0].strip(' ')
author_jc_list.append(jc)
author_qc = content[0].xpath('./p/text()')
for i in author_qc:
if '(' in i:
qc = i.strip(' ')
author_qc_list.append(qc)
for i in range(len(author_jc_list)-len(author_qc_list)):
author_qc_list.append(' ')
author_zip = zip(author_jc_list, author_qc_list)
author_list = []
for i in author_zip:
auth = i[0] + i[1]
author_list.append(auth)
author = author_list
except Exception as e:
print(e)
# 信息资源 info_source
info_source = {}
try:
info = html_data.xpath('//div[@class="l-content"]//div[@class="block-record-info block-record-info-source"]')[0]
sourceTitle = ''
try:
try:
sourceTitle = html_data.xpath('//*[@class="sourceTitle"]/value/text()')
sourceTitle_list = [i.strip(' ') for i in sourceTitle if i != '']
sourceTitle = ''.join(sourceTitle_list)
except:
sourceTitle = html_data.xpath('//*[@class="sourceTitle"]/a/text()')
sourceTitle_list = [i.strip(' ') for i in sourceTitle if i != '']
sourceTitle = ''.join(sourceTitle_list)
except Exception as e:
print('sourceTitle error', e)
info_source['sourceTitle'] = sourceTitle
try:
sourceValue = info.xpath('./div[@class="block-record-info-source-values"]/p')
for i in sourceValue:
name = i.xpath('./span/text()')[0].strip(' ').strip(':')
value = i.xpath('./value/text()')[0].strip(' ')
info_source[name] = value
except:
pass
try:
fileds = info.xpath('./p[@class="FR_field"]')
for filed in fileds:
name = filed.xpath('./span[@class="FR_label"]/text()')[0].strip(' ').strip(':')
try:
value = filed.xpath('./value/text()')[0].strip(' ')
except Exception as e:
value = filed.xpath('./text()')
value_list = [i for i in value if i != '\n']
value = ''.join(value_list)
info_source[name] = value
except:
pass
try:
data = content[1:]
for dt in data:
title = dt.xpath('./div[@class="title3"]/text()')[0]
dt_string = tostring(dt).decode()
value_re = re.findall('>(.*?)<', dt_string)
value_list1 = [i.strip('\n').strip(' ') for i in value_re if i != '']
value_list2 = ' '.join(value_list1).strip('\n').strip(' ')
value_string = html.unescape(value_list2)
value = ''
if '摘要' in title:
value = value_string.strip('摘要').strip(' ').strip('\n')
elif '会议名称' in title:
try:
conference = {}
value_list = re.findall('(.*?):(.*?)会议', value_string)
value_string_end = value_string.rsplit('会议', 1)[-1].split(':')
for i in value_list:
name = i[0].strip(' ')
name_value = i[1].strip(' ')
conference[name] = name_value
name = '会议' + value_string_end[0].strip(' ')
name_value = value_string_end[1].strip(' ')
conference[name] = name_value
value = conference
except:
pass
elif '关键词' in title:
try:
value = {}
value_str = value_string.strip('关键词').strip(' ')
value_tuple = value_str.partition('KeyWords Plus:')
value_tuple1 = value_tuple[0].split(':')
value[value_tuple1[0]] = value_tuple1[1]
value[value_tuple[1].strip(':')] = value_tuple[2].strip(' ')
except:
pass
elif '作者信息' in title:
try:
value = {}
clean_data = {}
temp_dict = {}
temp_value = []
temp_list = []
value['source_data'] = value_string
try:
ps = dt.xpath('./p')
tables = dt.xpath('./table')
ps_tables = zip(ps, tables)
for p_t in ps_tables:
p_list = p_t[0].xpath('./text()')
p_str = ''.join(p_list)
p_new_str = p_str.strip(' ')
if '通讯作者' in p_new_str:
try:
table_bq = p_t[1].xpath('.//*[@class="fr_address_row2"]/text()')
table_bq_strip = [i.strip(' ') for i in table_bq if i != '']
temp_value.extend(table_bq_strip)
p_new_str = p_new_str.strip(' ').strip('\n')
if p_new_str in temp_dict.keys():
temp_value.extend(table_bq_strip)
temp_dict[p_new_str] = temp_value
else:
temp_dict[p_new_str] = table_bq_strip
except:
pass
else:
try:
table_bq = p_t[1].xpath('.//*[@class="fr_address_row2"]/a/text()')
table_bq_strip = [i.strip(' ') for i in table_bq if i != '']
temp_list = table_bq_strip
except:
pass
clean_data['地址'] = temp_list
clean_data['通讯作者地址'] = temp_dict
email = ''
try:
email = ps[-1].xpath('.//a/text()')
except:
pass
clean_data['邮箱'] = email
except Exception as e:
print('e=', e)
value['clean_data'] = clean_data
except:
pass
elif '基金资助致谢' in title:
value = re.findall('授权号(.*?)查看基金资助信息 ', value_string)[0].strip(' ')
elif '出版商' in title:
value = value_string.strip('出版商').strip(' ')
elif '/' in title:
try:
value = {}
value_tuple = value_string.split(':')
value['研究方向'] = value_tuple[1].strip(' ').strip('类别').strip(' ').strip('Web of Science').strip(' ')
value['类别'] = value_tuple[2].strip(' ')
except:
pass
elif '文献信息' in title:
try:
value = {}
value_string = value_string.strip('文献信息').strip(' ')
value_list = [i.strip(':').strip(' ') for i in value_string.split(' ')]
for i in range(len(value_list) // 2):
j = i * 2
value[value_list[j]] = value_list[j + 1]
except:
pass
elif '其他信息' in title:
value = value_string.strip('文献信息').strip(' ')
else:
value = value_string
info_source[title] = value
except Exception as e:
print(e)
except Exception as e:
print(e)
# works_cited 引用文献
works_cited = {}
try:
# 右侧介绍
sidebar = html_data.xpath('//div[@id="sidebar-container"]')[0]
columns = sidebar.xpath('.//div[@class="flex-column"]')
if len(columns) == 2:
column1 = columns[0]
value = column1.xpath('.//*[@class="large-number"]/text()')
works_cited['被引频次'] = value[0].strip(' ')
works_cited['引用的参考文献'] = value[1].strip(' ')
column2 = columns[1]
value2 = column2.xpath('.//*[@class="large-number"]/text()')
name = column2.xpath('.//div/span[@class="box-label"]/text()')
if '引用的参考文献' not in name:
works_cited[name[0]] = value2[0].strip(' ')
works_cited[name[1]] = value2[1].strip(' ')
except Exception as e:
print(e)
# 引用参考文献
references_datasets = {}
try:
references_data = html_data.xpath('//div[@class="cited-ref-section"]')[0]
separator = references_data.xpath('./div[@class="cited-ref-separator"]/h2/text()')[0]
num = 30
if ':' in separator:
try:
num = int(separator.split(':')[1].strip(' '))
except:
num = 30
page_num = num // 30
page_num_yushu = num / 30
if page_num_yushu > page_num:
nums = page_num + 1
else:
nums = page_num
for pages in range(1, nums+1):
headers = HEADERS.copy()
headers['Referer'] = url
references_url = 'http://apps.webofknowledge.com/summary.do?product=UA&parentProduct=UA&search_mode=CitedRefList&parentQid=1&parentDoc=2&qid=3&SID={}&colName=WOS&page={}'.format(sid, pages)
references_url = 'http://apps.webofknowledge.com/summary.do?product=UA&parentProduct=UA&search_mode=CitedRefList&parentQid=1&parentDoc=1&qid=2&SID={}&colName=WOS&page={}'.format(sid, pages)
response = requests.get(url=references_url, headers=headers, cookies=new_cookie)
print('共有 {} 页,这是第 {} 页'.format(nums, pages))
if response.status_code == 200:
html_context = response.text
references = {}
html_context = etree.HTML(html_context)
references_data = html_context.xpath('//div[@class="search-results-item"]')
for dt in references_data:
id_list = dt.xpath('./div[@class="search-results-number"]/div/text()')
id_str = ''.join(id_list).strip('\n').strip(' ').strip('.')
title_name = ''
try:
# name_list = dt.xpath('./a[@class="smallV110 snowplow-full-record"]//value/text()')
name_list = dt.xpath('.//span[@class="reference-title"]/value/text()')
name_str = ''.join(name_list)
title_name = name_str.strip('\n').strip(' ')
if title_name == '':
title_name = '标题不可用'
except Exception as e:
print('name错误:', e)
divs = dt.xpath('./*[@class="summary_data"]/div')
author_string = '未截取'
conference_info = {}
paper_info = {}
for div in divs:
div_str = tostring(div).decode()
div_str_un = html.unescape(div_str)
div_string = re.findall('>(.*?)<', div_str_un)
div_string = ''.join(div_string)
if '作者' in div_string:
try:
author_string = div_string.strip('作者: ').strip(' ').strip('等.')
if '标题' in author_string:
author_list = re.findall(':.*?(.*?)\.', author_string)
author_str = ''.join(author_list)
if ':' in author_str:
author_string = author_str.split(':')[-1].strip(' ').strip('等.')
elif '作者' in author_string:
author_string = re.findall('作者:(.*)\.', author_string)
author_string = ''.join(author_string)
zw_len = re.findall('([\u4e00-\u9fa5])', author_string)
if len(zw_len):
paper_data1 = re.sub('\s{2,5}', '=', div_string)
paper_data2 = paper_data1.split('=')
paper_data3 = paper_data2[0]
if ':' in paper_data3:
author_string = paper_data3.split(':')[-1].strip(' ')
else:
author_string = paper_data3
except Exception as e:
print('zuoze: ', e)
if '会议' in div_string:
try:
div_list = div_string.split('会议')
conference_name = [i for i in div_list if ':' not in i][0].strip(' ')
temp_dict = {i.split(':')[0].strip(' '): i.split(':')[1].strip(' ') for i in div_list if ':' in i}
conference_info['conference_name'] = conference_name
conference_info.update(temp_dict)
except:
pass
if ('丛书' in div_string) or ('卷' in div_string) or ('页' in div_string) or ('出版年' in div_string):
try:
paper_data1 = re.sub('\s{2,5}', '=', div_string)
paper_data2 = paper_data1.split('=')
if ':' not in paper_data2[0]:
paper_name = paper_data2[0].strip(' ')
paper_info['paper_name'] = paper_name
if '作者' in paper_data2[0]:
paper_name = paper_data2[0].rsplit('.', 1)[-1]
paper_info['paper_name'] = paper_name
if ':' not in paper_data2[-1]:
paper_info['出版日期'] = paper_data2[-1].strip(' ')
paper_dict = {i.split(':')[0].strip(' '): i.split(':')[1].strip(' ') for i in
paper_data2 if ':' in i}
paper_info.update(paper_dict)
except:
pass
# 清洗page_info, 主要是出版商、出版日期、脏数据
paper_info_copy = paper_info.copy()
for k, v in paper_info.items():
if '出版商' in k:
sz = re.findall('\d+', k)
if len(sz):
paper_info_copy['出版日期'] = sz[0]
paper_info_copy['出版商'] = v
del paper_info_copy[k]
if len(k) > 25:
del paper_info_copy[k]
frequency = ''
try:
frequency = dt.xpath('.//div[@class="search-results-data-cite"]/a/text()')[0].strip(' ')
except:
pass
temp_references = {
'name': title_name,
'author': author_string,
'conference_info': conference_info,
'paper_info': paper_info_copy,
'frequency': frequency
}
references[id_str] = temp_references
# print('references= ', references)
references_datasets.update(references.copy())
else:
print('翻页这里的错误响应码为: ', response.status_code)
except Exception as e:
pass
uid = uuid.uuid1()
suid = str(uid).replace('-', '')
# 1. 成果唯一标识
datasetId = suid
spiderDateTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
data_json = {
'datasetId': datasetId,
'handle': '',
'source': url,
'name': paper_Name,
'author': author,
'info_source': info_source,
'works_cited': works_cited,
'references_json': references_datasets,
'spiderDateTime': spiderDateTime,
}
print('data_json', data_json)
try:
mydata.insert_one(data_json)
except Exception as e:
# data_json = json.dumps(data_json, ensure_ascii=False)
# # print('文本类型为: ', data_json)
# with open('data.txt', 'a+', encoding='utf-8') as f:
# f.write(data_json)
# f.write('\n')
print('插入数据库报错: ', e)
else:
print('错误的响应码为: ', response.status_code)
# 解析得到总数和每条记录的URL
def parseGetUrlAndNum(html, cookie):
html_data = etree.HTML(html)
data = html_data.xpath('//div[@class="search-results-item"]')
for dt in data:
url = dt.xpath('./div[@class="search-results-content"]/div[1]//a/@href')[0]
new_url = 'http://apps.webofknowledge.com' + url
print('new_url= ', new_url)
name_list = dt.xpath('./div[@class="search-results-content"]/div[1]//a/value/text()')
name_list = [i.strip(' ') for i in name_list if i != '']
name_string = ''.join(name_list)
name_string = name_string.strip(' ').replace('\n', '')
print('name_string********* ', name_string)
parseHtml(new_url, cookie, name_string)
# urls.append(new_url)
# name = dt.xpath('./div[@class="search-results-content"]/div[1]//a/value/text()')[0]
# print(name)
# 计数原则更新cookie
def updateCookie(sid, headers, referer):
print('=========updateCookie=========')
url = 'http://apps.webofknowledge.com/summary.do?product=UA&parentProduct=UA&search_mode=GeneralSearch&qid=1&SID={}&&page=1&action=changePageSize&pageSize=50'.format(sid)
headers['Referer'] = referer
response = requests.get(url, headers=headers, cookies=COOKIES)
if response.status_code == 200:
set_cookie = response.headers['Set-Cookie']
need_list = ['JSESSIONID', 'dotmatics.elementalKey', '_abck', 'bm_sv']
cookies = parseSetCookie(set_cookie, need_list)
# print('更改过后的cookie', cookies)
COOKIES.update(cookies)
return COOKIES
# 选择一页50条记录
def summary(sid, cookie, referer):
print('================summary=====================')
COOKIES = cookie
COOKIES.pop('_abck')
for i in range(1, 2001):
print('这是第 {} 页'.format(i))
headers = HEADERS.copy()
global INDEX
if (i > 0) and (i < 2):
global K
K = True
# global K
# K = True
# if i < 0 or i > 1:
# # global K
# K = True
print('i的值在1和10之间')
else:
if K:
if INDEX > 9:
print('********************* 重新获取cookie *********************')
sid, cookie, url = getCookies()
COOKIES = cookie
COOKIES.pop('_abck')
INDEX = 1
print('INDEX 的值为:', INDEX)
cookies = updateCookie(sid, cookie, referer)
COOKIES.update(cookies)
url = 'http://apps.webofknowledge.com/summary.do?product=UA&parentProduct=UA&search_mode=GeneralSearch&qid=1&SID={}&&page={}&action=changePageSize&pageSize=50'.format(sid, i)
if i == 1:
headers['Referer'] = referer
else:
headers['Referer'] = url
response = requests.get(url, headers=headers, cookies=COOKIES)
if response.status_code == 200:
set_cookie = response.headers['Set-Cookie']
need_list = ['JSESSIONID', 'dotmatics.elementalKey', '_abck', 'bm_sv']
cookies = parseSetCookie(set_cookie, need_list)
# print('更改过后的cookie', cookies)
COOKIES.update(cookies)
print('SID的值为: ', COOKIES['SID'])
# if i != 1:
html = response.text
parseGetUrlAndNum(html, cookie)
# else:
# print('因为i的值为1,所以跳过')
else:
print('错误的响应吗为:', response.status_code)
INDEX += 1
def main():
sid, cookie, url = getCookies()
summary(sid, cookie, url)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
6a6cd80a80181a8fe1e845f57665f296dfaf7e0e | bf8ccfbe94fbf7d4d44cc2d44ce65f26560d008a | /virtupy/lib/python2.7/site-packages/botocore-0.33.0-py2.7.egg/botocore/hooks.py | 6e46ebf9402825ede0ecb3b19316970088e1714d | [] | no_license | yurmix/kupuestra2 | 921b2da933681f52a84c8c91bbd60ad964517a17 | aabeb7baa06b6d2a80e4ef495fa8fd6769ac76f9 | refs/heads/master | 2021-01-13T10:21:17.029635 | 2014-10-25T18:11:40 | 2014-10-25T18:11:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,542 | py | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import inspect
import six
from collections import defaultdict, deque
import logging
logger = logging.getLogger(__name__)
def first_non_none_response(responses, default=None):
"""Find first non None response in a list of tuples.
This function can be used to find the first non None response from
handlers connected to an event. This is useful if you are interested
in the returned responses from event handlers. Example usage::
print(first_non_none_response([(func1, None), (func2, 'foo'),
(func3, 'bar')]))
# This will print 'foo'
:type responses: list of tuples
:param responses: The responses from the ``EventHooks.emit`` method.
This is a list of tuples, and each tuple is
(handler, handler_response).
:param default: If no non-None responses are found, then this default
value will be returned.
:return: The first non-None response in the list of tuples.
"""
for response in responses:
if response[1] is not None:
return response[1]
return default
class BaseEventHooks(object):
def emit(self, event_name, **kwargs):
return []
def register(self, event_name, handler, unique_id=None):
self._verify_is_callable(handler)
self._verify_accept_kwargs(handler)
self._register(event_name, handler, unique_id)
def unregister(self, event_name, handler=None, unique_id=None):
pass
def _verify_is_callable(self, func):
if not six.callable(func):
raise ValueError("Event handler %s must be callable." % func)
def _verify_accept_kwargs(self, func):
"""Verifies a callable accepts kwargs
:type func: callable
:param func: A callable object.
:returns: True, if ``func`` accepts kwargs, otherwise False.
"""
try:
argspec = inspect.getargspec(func)
except TypeError:
return False
else:
if argspec[2] is None:
raise ValueError("Event handler %s must accept keyword "
"arguments (**kwargs)" % func)
class EventHooks(BaseEventHooks):
def __init__(self):
# event_name -> [handler, ...]
self._handlers = defaultdict(list)
def emit(self, event_name, **kwargs):
"""Call all handlers subscribed to an event.
:type event_name: str
:param event_name: The name of the event to emit.
:type **kwargs: dict
:param **kwargs: Arbitrary kwargs to pass through to the
subscribed handlers. The ``event_name`` will be injected
into the kwargs so it's not necesary to add this to **kwargs.
:rtype: list of tuples
:return: A list of ``(handler_func, handler_func_return_value)``
"""
kwargs['event_name'] = event_name
responses = []
for handler in self._handlers[event_name]:
response = handler(**kwargs)
responses.append((handler, response))
return responses
def _register(self, event_name, handler, unique_id=None):
self._handlers[event_name].append(handler)
def unregister(self, event_name, handler, unique_id=None):
try:
self._handlers[event_name].remove(handler)
except ValueError:
pass
class HierarchicalEmitter(BaseEventHooks):
def __init__(self):
# We keep a reference to the handlers for quick
# read only access (we never modify self._handlers).
# A cache of event name to handler list.
self._lookup_cache = {}
self._handlers = _PrefixTrie()
# This is used to ensure that unique_id's are only
# registered once.
self._unique_id_cache = {}
def emit(self, event_name, **kwargs):
responses = []
# Invoke the event handlers from most specific
# to least specific, each time stripping off a dot.
handlers_to_call = self._lookup_cache.get(event_name)
if handlers_to_call is None:
handlers_to_call = self._handlers.prefix_search(event_name)
self._lookup_cache[event_name] = handlers_to_call
elif not handlers_to_call:
# Short circuit and return an empty response is we have
# no handlers to call. This is the common case where
# for the majority of signals, nothing is listening.
return []
kwargs['event_name'] = event_name
responses = []
for handler in handlers_to_call:
logger.debug('Event %s: calling handler %s', event_name, handler)
response = handler(**kwargs)
responses.append((handler, response))
return responses
def _register(self, event_name, handler, unique_id=None):
if unique_id is not None:
if unique_id in self._unique_id_cache:
# We've already registered a handler using this unique_id
# so we don't need to register it again.
return
else:
# Note that the trie knows nothing about the unique
# id. We track uniqueness in this class via the
# _unique_id_cache.
self._handlers.append_item(event_name, handler)
self._unique_id_cache[unique_id] = handler
else:
self._handlers.append_item(event_name, handler)
# Super simple caching strategy for now, if we change the registrations
# clear the cache. This has the opportunity for smarter invalidations.
self._lookup_cache = {}
def unregister(self, event_name, handler=None, unique_id=None):
if unique_id is not None:
try:
handler = self._unique_id_cache.pop(unique_id)
except KeyError:
# There's no handler matching that unique_id so we have
# nothing to unregister.
return
try:
self._handlers.remove_item(event_name, handler)
self._lookup_cache = {}
except ValueError:
pass
class _PrefixTrie(object):
"""Specialized prefix trie that handles wildcards.
The prefixes in this case are based on dot separated
names so 'foo.bar.baz' is::
foo -> bar -> baz
Wildcard support just means that having a key such as 'foo.bar.*.baz' will
be matched with a call to ``get_items(key='foo.bar.ANYTHING.baz')``.
You can think of this prefix trie as the equivalent as defaultdict(list),
except that it can do prefix searches:
foo.bar.baz -> A
foo.bar -> B
foo -> C
Calling ``get_items('foo.bar.baz')`` will return [A + B + C], from
most specific to least specific.
"""
def __init__(self):
# Each dictionary can be though of as a node, where a node
# has values associated with the node, and children is a link
# to more nodes. So 'foo.bar' would have a 'foo' node with
# a 'bar' node as a child of foo.
# {'foo': {'children': {'bar': {...}}}}.
self._root = {'chunk': None, 'children': {}, 'values': None}
def append_item(self, key, value):
"""Add an item to a key.
If a value is already associated with that key, the new
value is appended to the list for the key.
"""
key_parts = key.split('.')
current = self._root
for part in key_parts:
if part not in current['children']:
new_child = {'chunk': part, 'values': None, 'children': {}}
current['children'][part] = new_child
current = new_child
else:
current = current['children'][part]
if current['values'] is None:
current['values'] = [value]
else:
current['values'].append(value)
def prefix_search(self, key):
"""Collect all items that are prefixes of key.
Prefix in this case are delineated by '.' characters so
'foo.bar.baz' is a 3 chunk sequence of 3 "prefixes" (
"foo", "bar", and "baz").
"""
collected = deque()
key_parts = key.split('.')
current = self._root
self._get_items(current, key_parts, collected, 0)
return collected
def _get_items(self, starting_node, key_parts, collected, starting_index):
stack = [(starting_node, starting_index)]
key_parts_len = len(key_parts)
# Traverse down the nodes, where at each level we add the
# next part from key_parts as well as the wildcard element '*'.
# This means for each node we see we potentially add two more
# elements to our stack.
while stack:
current_node, index = stack.pop()
if current_node['values']:
seq = reversed(current_node['values'])
# We're using extendleft because we want
# the values associated with the node furthest
# from the root to come before nodes closer
# to the root.
collected.extendleft(seq)
if not index == key_parts_len:
children = current_node['children']
directs = children.get(key_parts[index])
wildcard = children.get('*')
next_index = index + 1
if wildcard is not None:
stack.append((wildcard, next_index))
if directs is not None:
stack.append((directs, next_index))
def remove_item(self, key, value):
"""Remove an item associated with a key.
If the value is not associated with the key a ``ValueError``
will be raised. If the key does not exist in the trie, a
``ValueError`` will be raised.
"""
key_parts = key.split('.')
current = self._root
self._remove_item(current, key_parts, value, index=0)
def _remove_item(self, current_node, key_parts, value, index):
if current_node is None:
return
elif index < len(key_parts):
next_node = current_node['children'].get(key_parts[index])
if next_node is not None:
self._remove_item(next_node, key_parts, value, index + 1)
if index == len(key_parts) - 1:
next_node['values'].remove(value)
if not next_node['children'] and not next_node['values']:
# Then this is a leaf node with no values so
# we can just delete this link from the parent node.
# This makes subsequent search faster in the case
# where a key does not exist.
del current_node['children'][key_parts[index]]
else:
raise ValueError(
"key is not in trie: %s" % '.'.join(key_parts))
| [
"[email protected]"
] | |
5676800b14c8980248c0ab574043fba7b054977d | cacb2757d54aef112c43cc962b674582cbf1468e | /pumpp/core.py | 0db13b0e93d5301aac60fc44e8c0b7a720099169 | [
"ISC"
] | permissive | justinsalamon/pumpp | 9cf1ac6cf0dde1936b45c4d4c44728132a41d2b5 | c8d7be644f998721a841cb43e28c8e285af225a4 | refs/heads/master | 2021-01-01T15:37:38.949477 | 2017-07-18T19:31:07 | 2017-07-18T19:31:07 | 97,657,947 | 2 | 0 | null | 2017-07-19T01:10:14 | 2017-07-19T01:10:14 | null | UTF-8 | Python | false | false | 6,628 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
Core functionality
==================
.. autosummary::
:toctree: generated/
Pump
'''
import librosa
import jams
from .base import Slicer
from .exceptions import ParameterError
from .task import BaseTaskTransformer
from .feature import FeatureExtractor
from .sampler import Sampler
class Pump(Slicer):
'''Top-level pump object.
This class is used to collect feature and task transformers
Attributes
----------
ops : list of (BaseTaskTransformer, FeatureExtractor)
The operations to apply
Examples
--------
Create a CQT and chord transformer
>>> p_cqt = pumpp.feature.CQT('cqt', sr=44100, hop_length=1024)
>>> p_chord = pumpp.task.ChordTagTransformer(sr=44100, hop_length=1024)
>>> pump = pumpp.Pump(p_cqt, p_chord)
>>> data = pump.transform(audio_f='/my/audio/file.mp3',
... jam='/my/jams/annotation.jams')
Or use the call interface:
>>> data = pump(audio_f='/my/audio/file.mp3',
... jam='/my/jams/annotation.jams')
Or apply to audio in memory, and without existing annotations:
>>> y, sr = librosa.load('/my/audio/file.mp3')
>>> data = pump(y=y, sr=sr)
Access all the fields produced by this pump:
>>> pump.fields
{'chord/chord': Tensor(shape=(None, 170), dtype=<class 'bool'>),
'cqt/mag': Tensor(shape=(None, 288), dtype=<class 'numpy.float32'>),
'cqt/phase': Tensor(shape=(None, 288), dtype=<class 'numpy.float32'>)}
Access a constituent operator by name:
>>> pump['chord'].fields
{'chord/chord': Tensor(shape=(None, 170), dtype=<class 'bool'>)}
'''
def __init__(self, *ops):
self.ops = []
self.opmap = dict()
super(Pump, self).__init__(*ops)
def add(self, operator):
'''Add an operation to this pump.
Parameters
----------
operator : BaseTaskTransformer, FeatureExtractor
The operation to add
Raises
------
ParameterError
if `op` is not of a correct type
'''
if not isinstance(operator, (BaseTaskTransformer, FeatureExtractor)):
raise ParameterError('operator={} must be one of '
'(BaseTaskTransformer, FeatureExtractor)'
.format(operator))
if operator.name in self.opmap:
raise ParameterError('Duplicate operator name detected: '
'{}'.format(operator))
super(Pump, self).add(operator)
self.opmap[operator.name] = operator
self.ops.append(operator)
def transform(self, audio_f=None, jam=None, y=None, sr=None, crop=False):
'''Apply the transformations to an audio file, and optionally JAMS object.
Parameters
----------
audio_f : str
Path to audio file
jam : optional, `jams.JAMS`, str or file-like
Optional JAMS object/path to JAMS file/open file descriptor.
If provided, this will provide data for task transformers.
y : np.ndarray
sr : number > 0
If provided, operate directly on an existing audio buffer `y` at
sampling rate `sr` rather than load from `audio_f`.
crop : bool
If `True`, then data are cropped to a common time index across all
fields. Otherwise, data may have different time extents.
Returns
-------
data : dict
Data dictionary containing the transformed audio (and annotations)
Raises
------
ParameterError
At least one of `audio_f` or `(y, sr)` must be provided.
'''
if y is None:
if audio_f is None:
raise ParameterError('At least one of `y` or `audio_f` '
'must be provided')
# Load the audio
y, sr = librosa.load(audio_f, sr=sr, mono=True)
if sr is None:
raise ParameterError('If audio is provided as `y`, you must '
'specify the sampling rate as sr=')
if jam is None:
jam = jams.JAMS()
jam.file_metadata.duration = librosa.get_duration(y=y, sr=sr)
# Load the jams
if not isinstance(jam, jams.JAMS):
jam = jams.load(jam)
data = dict()
for operator in self.ops:
if isinstance(operator, BaseTaskTransformer):
data.update(operator.transform(jam))
elif isinstance(operator, FeatureExtractor):
data.update(operator.transform(y, sr))
if crop:
data = self.crop(data)
return data
def sampler(self, n_samples, duration, random_state=None):
'''Construct a sampler object for this pump's operators.
Parameters
----------
n_samples : None or int > 0
The number of samples to generate
duration : int > 0
The duration (in frames) of each sample patch
random_state : None, int, or np.random.RandomState
If int, random_state is the seed used by the random number
generator;
If RandomState instance, random_state is the random number
generator;
If None, the random number generator is the RandomState instance
used by np.random.
Returns
-------
sampler : pumpp.Sampler
The sampler object
See Also
--------
pumpp.sampler.Sampler
'''
return Sampler(n_samples, duration,
random_state=random_state,
*self.ops)
@property
def fields(self):
'''A dictionary of fields constructed by this pump'''
out = dict()
for operator in self.ops:
out.update(**operator.fields)
return out
def layers(self):
'''Construct Keras input layers for all feature transformers
in the pump.
Returns
-------
layers : {field: keras.layers.Input}
A dictionary of keras input layers, keyed by the corresponding
fields.
'''
layermap = dict()
for operator in self.ops:
if hasattr(operator, 'layers'):
layermap.update(operator.layers())
return layermap
def __getitem__(self, key):
return self.opmap.get(key)
def __call__(self, *args, **kwargs):
return self.transform(*args, **kwargs)
| [
"[email protected]"
] | |
87ca3e048e36dae8645447b238f92ab0c3d924af | fe066cfd366f38e52fd779a437250001391d2de1 | /shortener_url/__init__.py | 9169fe6e8f3c003d35ba78c4b25ab63416fb6691 | [] | no_license | srault95/shortener-url | 708eb2c2f0d0b802362bd5846eb8bbe1dcf1b34b | 60255b9ccc86f9f59a95f04d49c14faacc7056c7 | refs/heads/master | 2021-01-21T10:45:40.155056 | 2017-03-01T07:07:25 | 2017-03-01T07:07:25 | 83,480,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | try:
from gevent import monkey
monkey.patch_all()
except:
pass
| [
"[email protected]"
] | |
82a3aa46b605e8f17a99a67cb9b5993e4cac0a60 | 2bc7659be83178c43b1592efbe1d79c62fc4fa36 | /Python/1253 a부터 b까지 출력하기.py | 5543b4bb8d3d0ac5509cef8ba4a12eff8b847690 | [] | no_license | KIMSUBIN17/Code-Up-Algorithm | ede6f443fcf640ecf58282c582da43e124ca44af | 831180c28d234366a1d3cf118bd2a615dc404f00 | refs/heads/master | 2023-07-22T21:42:06.990542 | 2021-09-05T08:36:32 | 2021-09-05T08:36:32 | 286,932,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | a, b = input().split()
a = int(a)
b = int(b)
if(a > b):
for i in range(b, a+1):
print(i, end=' ')
else:
for i in range(a, b+1):
print(i, end=' ')
| [
"[email protected]"
] | |
dc5d59c9621d108d4954db1465d6a8e5fee0b977 | 68ee9027d4f780e1e5248a661ccf08427ff8d106 | /extra/unused/LandsatPX_doc/get_landsat_pairs.py | a88d250ae60b9821006277c69db2c0e9ef8eebc0 | [
"MIT"
] | permissive | whyjz/CARST | 87fb9a6a62d39fd742bb140bddcb95a2c15a144c | 4fc48374f159e197fa5a9dbf8a867b0a8e0aad3b | refs/heads/master | 2023-05-26T20:27:38.105623 | 2023-04-16T06:34:44 | 2023-04-16T06:34:44 | 58,771,687 | 17 | 4 | MIT | 2021-03-10T01:26:04 | 2016-05-13T20:54:42 | Python | UTF-8 | Python | false | false | 2,628 | py | import os
import glob
import math
def makeParamFiles(A,B):
dayA = (A[13:16])
yearA = (A[9:13])
dayB = (B[13:16])
yearB = (B[9:13])
file1 = open("landsat8_" + yearB +"_"+dayA+"_"+dayB+".txt","w")
file1.write(os.path.realpath(A) +" "+ os.path.realpath(B))
file1.close()
file2 = open("params_landsat8_"+yearB+"_to_"+dayA+"_to_"+dayB+"_r32x32_s32x32.txt","w")
# Change these parameters for each section as needed
file2.write("UTM_ZONE = 40 \n\
UTM_LETTER = X \n\
BAND = B8 \n\
ICE = /13t1/wjd73/Glacier_outlines/central_alaska_range/central_alaska_range_utm_ice.gmt \n\
ROCK = /13t1/wjd73/Glacier_outlines/central_alaska_range/central_alaska_range_utm_rock.gmt \n\
IMAGE_DIR = /13t1/wjd73/Franz_Joseph/Landsat8/IMAGES\n\
METADATA_DIR = /13t1/wjd73/Franz_Joseph/Landsat8/IMAGES\n\
PAIRS_DIR = /13t1/wjd73/Franz_Joseph/Landsat8/Pairs\n\
PROCESSORS = 20\n\
RESOLUTION = 15\n\
SATELLITE = Landsat8\n\
SNR_CUTOFF = 0\n\
DEM = /13t1/wjd73/Franz_Joseph/DEM/FJLREGION_DEM.tif\n\
PREFILTER = False\n\
REF_X = 32\n\
REF_Y = 32\n\
SEARCH_X = 32\n\
SEARCH_Y = 32\n\
STEP = 8\n\
M_SCRIPTS_DIR = /13t1/wjd73/MATLAB/Adam_Cleaner\n\
VEL_MAX = 5\n\
TOL = 0.3\n\
NUMDIF = 3\n\
SCALE = 1500000\n\
PAIRS = /13t1/wjd73/Franz_Joseph/Landsat8/Pairs/"+file1.name+"\n")
file2.close()
file3 = open("px_landsat8_"+yearB+"_"+dayA+"_to_"+dayB+".cmd","w")
file3.write("python /home/wjd73/Python/landsatPX.py " + file2.name+"\n")
file3.close()
def daydiff(A,B):
dayA = int(A[13:16])
yearA = int(A[9:13])
dayB = int(B[13:16])
yearB = int(B[9:13])
diff =(dayB - (dayA -(yearB-yearA)*365))
#print(str(dayA) +"\t" +str(yearA) +"\t" + str(dayB) + "\t" +str(yearB))
#print diff
return diff
###################################################
def main():
scenelist = glob.glob("*B8.TIF")
scenelist.sort()
for i in range(len(scenelist) -1):
A = scenelist[i]
B = scenelist[i+1]
#print(A + "\t" + B)
diff = daydiff(A,B)
if (diff <= 48):
print(A + "\t" + B + "\t" + str(diff))
makeParamFiles(A,B)
main()
| [
"[email protected]"
] | |
596d04f0a09f737e1286f16c621b077b530e988b | aed18908f6cdf93774aac253273e47d6e3521003 | /runs/"res-30-deviation-as-0p01_loss--base10x46-scale-1.0.h5"/two_time_pad.py | f6a206ae98cac656207d3073a5e849774c74163f | [] | no_license | matthiasgoergens/TwoTimePad | cbd1e2cb081805baf503b0a4984e74e0508060f8 | 7dc3a276179e3c15b07b1ae2c3c8d31d34bf9a00 | refs/heads/master | 2021-07-03T22:59:06.211897 | 2021-06-13T09:03:33 | 2021-06-13T09:03:33 | 9,916,768 | 0 | 0 | null | 2013-07-04T05:25:17 | 2013-05-07T16:48:56 | Haskell | UTF-8 | Python | false | false | 38,620 | py | # -*- coding: utf-8 -*-
import functools as ft
import itertools as it
import math
import random
import re
import sys
from datetime import datetime
from pprint import pprint
import numpy as np
import tensorflow as tf
# import tensorflow_addons as tfa
from tensorflow.keras.callbacks import (
ModelCheckpoint,
TensorBoard,
ReduceLROnPlateau,
EarlyStopping,
LearningRateScheduler,
)
from tensorboard.plugins.hparams import api as hp
from tensorflow import keras
from tensorflow.keras.layers import (
LSTM,
Add,
Average,
average,
BatchNormalization,
Bidirectional,
Conv1D,
Dense,
Dropout,
GaussianDropout,
Embedding,
Flatten,
GlobalMaxPooling1D,
Input,
MaxPooling1D,
SeparableConv1D,
SimpleRNN,
Softmax,
SpatialDropout1D,
TimeDistributed,
concatenate,
Layer,
Lambda,
)
from tensorflow.keras.models import Model, Sequential
from tensorflow_addons.layers import Maxout
import tensorflow_addons as tfa
device_name = tf.test.gpu_device_name()
if device_name != "/device:GPU:0":
useGPU = False
print(SystemError("GPU device not found", device_name))
# raise NotImplementedError("Want GPU")
else:
useGPU = True
print("Found GPU at: {}".format(device_name))
from tensorflow.keras.mixed_precision import experimental as mixed_precision
np.set_printoptions(precision=4)
alpha = " ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.?,-:;'()".lower()
alphaRE = alpha.replace("-", "\\-")
assert len(alpha) == 46
accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def nAccuracy(y_true, y_pred):
return 1 - accuracy(y_true, y_pred)
def error(y_true, y_pred):
return 1 - accuracy(y_true, y_pred)
def sumError(y_true, y_pred):
# raise TabError((y_true, y_pred))
# shape = (32, 50)
output = tf.reduce_mean(y_pred, -1)
return output
def load():
# text = ' '.join(f.open('r').read() for f in pathlib.Path('data').glob('*.txt')).lower()
text = open("corpus.txt", "r").read().lower()
text = re.sub("\s+", " ", text)
# text = re.sub(f'[^{alphaRE}]', '', text)
text = re.sub("[^%s]" % alphaRE, "", text)
return text
def sub(cipher, key):
return [(a - b) % len(alpha) for a, b in zip(cipher, key)]
def clean(text):
t = {c: i for i, c in enumerate(alpha)}
return [t[c] for c in text]
def toChar(numbers):
return "".join(alpha[i] for i in numbers)
# toChar
# (5, 125, 46)
def toChars(tensor):
(linesNums, charNum, alphaNum) = tensor.shape
output = []
# TODO: use gather https://www.tensorflow.org/api_docs/python/tf/gather?version=stable
assert alphaNum == len(alpha)
for lineNum in range(linesNums):
chars = []
for cN in range(charNum):
(_, char) = max(
[(tensor[lineNum, cN, alphaN], alphaN) for alphaN in range(alphaNum)]
)
chars.append(char)
output.append(toChar(chars))
return output
batch_size = 32
def round_to(x, n):
return (x // n) * n
def make1(window, text):
(size,) = text.shape
start = random.randrange(window)
return tf.reshape(
tf.slice(
text, [start], [round_to(size - window * batch_size, window * batch_size)]
),
(-1, window),
)
def makeEpochs(mtext, window, ratio):
while True:
x = make1(window, mtext)
y = make1(window, mtext)
(size, _) = x.shape
training_size = round(size * ratio)
for _ in range(100):
xx = tf.random.shuffle(x)
yy = tf.random.shuffle(y)
cipherX = (xx - yy) % 46
cipherY = (yy - xx) % 46
# Drop last epoch, it's probably not full.
for i in list(range(0, x.shape[0], training_size))[:-1]:
yield (
cipherX[i : i + training_size, :],
cipherY[i : i + training_size, :],
), (
xx[i : i + training_size, :],
yy[i : i + training_size, :],
)
class TwoTimePadSequence(keras.utils.Sequence):
def _load(self):
self.aa = tf.reshape(tf.random.shuffle(self.a), (-1, batch_size, self.window))
self.bb = tf.reshape(tf.random.shuffle(self.b), (-1, batch_size, self.window))
self.cipherA = (self.aa - self.bb) % 46
self.cipherB = (self.bb - self.aa) % 46
self.size = self.aa.shape[0]
self.items = iter(range(self.size))
def on_epoch_end(self):
print("Epoch {self.epochs} ended.")
self._load()
self.epochs += 1
raise NotImplementedError("Called on epoch end")
def __len__(self):
return self.training_size
def __getitem__(self, idx):
i = next(self.items, None)
# Hack, because on_epoch_end doesn't seem to be called.
if i is None:
self._load()
return self.__getitem__(idx)
else:
if self.both and not self.dev:
return (
(self.cipherA[i, :, :], self.cipherB[i, :, :]),
(self.aa[i, :, :], self.bb[i, :, :]),
)
elif self.both and self.dev:
return (
(self.cipherA[i, :, :], self.cipherB[i, :, :]),
(
self.aa[i, :, :],
self.bb[i, :, :],
tf.zeros(
(batch_size, self.window), dtype=tf.dtypes.float32
),
),
)
else:
# return (self.cipherA[i, :, :], ), (self.aa[i, :, :], self.bb[i, :, :])
return (self.cipherA[i, :, :],), (self.aa[i, :, :],)
def __init__(self, window, training_size, mtext, both=True, dev=False):
self.a = make1(window, mtext)
self.b = make1(window, mtext)
self.epochs = 0
self.training_size = training_size
self.window = window
self._load()
self.both = both
self.dev = dev
def cipher_for_predict():
# remove eol
c1 = clean(open("TwoTimePad/examples/ciphertext-1.txt", "r").read().lower()[:-1])
# print(c1)
c2 = clean(open("TwoTimePad/examples/ciphertext-2.txt", "r").read().lower()[:-1])
# print(c2)
return tf.convert_to_tensor([sub(c1, c2)])
HP_DROPOUT = hp.HParam("dropout", hp.RealInterval(0.0, 0.5))
HP_HEIGHT = hp.HParam("height", hp.IntInterval(0, 30))
HP_blocks = hp.HParam("blocks", hp.IntInterval(0, 30))
HP_WINDOW = hp.HParam("window", hp.IntInterval(1, 100))
HP_resSize = hp.HParam("resSize", hp.IntInterval(46, 8 * 46))
HP_bottleneck = hp.HParam("bottleneck", hp.IntInterval(0, 1000))
HP_blowup = hp.HParam("blowup", hp.IntInterval(1, 8))
HP_max_kernel = hp.HParam("max_kernel", hp.IntInterval(3, 1 + 2 * 9))
HP_deviation_as_loss = hp.HParam("deviation_weight", hp.RealInterval(0.0, 10.0))
METRIC_ACCURACY = "accuracy"
relu = ft.partial(tf.keras.layers.PReLU, shared_axes=[1])
crelu = lambda: tf.nn.crelu
def plus(a, b):
if a is None:
return b
elif b is None:
return a
else:
return Add()([a, b])
def concat(l):
l = [item for item in l if item is not None]
if len(l) == 1:
return l[0]
else:
return concatenate(l)
def avg(l):
assert isinstance(l, (list,)), type(l)
l = [item for item in l if item is not None]
if len(l) == 1:
return l[0]
else:
return average(l)
def cat(a, b):
if a is None:
return b
elif b is None:
return a
else:
return concatenate([a, b])
msra = tf.initializers.VarianceScaling(scale=1.0, distribution="truncated_normal")
def sequential(*layers):
def helper(last):
for layer in layers:
last = layer(last)
return last
return helper
@Lambda
def justShift(tensors):
clear, shifts = tensors
r = tf.range(46)
r = tf.broadcast_to(r, tf.shape(clear))
shifts = tf.broadcast_to(tf.expand_dims(shifts, -1), tf.shape(clear))
indices = (r - 1 * shifts) % 46
clearShift = tf.gather(clear, indices, batch_dims=2)
return clearShift
# TODO: I suspect something is still wrong with my shift function. Test more!
def ShiftLayer(clear, key, shifts):
clear = justShift([clear, shifts])
clear = Softmax(dtype='float32')(clear)
key = Softmax(dtype='float32')(key)
kld = keras.losses.kullback_leibler_divergence
return (kld(clear, key) + kld(key, clear))
# Resnet.
def make_model_simple(hparams):
n = hparams[HP_WINDOW]
height = hparams[HP_HEIGHT]
ic = lambda: Sequential(
[BatchNormalization(), SpatialDropout1D(rate=hparams[HP_DROPOUT]),]
)
sd = lambda: SpatialDropout1D(rate=hparams[HP_DROPOUT])
inputA = Input(shape=(n,), name="ciphertextA", dtype="int32")
# inputB = Input(shape=(n,), name="ciphertextB", dtype='int32')
base = 4 * 46
blowup = 3
embeddedA = Embedding(
output_dim=base,
input_length=n,
input_dim=len(alpha),
name="embeddingA",
batch_input_shape=[batch_size, n],
)(inputA)
# Idea: Start first res from ic() or conv.
# Idea: also give input directly, not just embedding?
conved = Sequential(
[
ic(),
Conv1D(
filters=blowup * base,
kernel_size=9,
padding="same",
kernel_initializer=msra,
),
]
)(embeddedA)
outputs = embeddedA
for i in range(height - 1):
outputs = cat(outputs, conved)
conved = plus(
conved,
Sequential(
[
Maxout(base),
ic(),
Conv1D(
filters=blowup * base,
kernel_size=9,
padding="same",
kernel_initializer=msra,
),
]
)(conved),
)
make_end = lambda name: Sequential(
[
Maxout(base),
ic(),
Conv1D(
name="output",
filters=46,
kernel_size=1,
padding="same",
strides=1,
dtype="float32",
kernel_initializer=msra,
),
],
name=name,
)
clear = make_end("clear")(cat(outputs, conved))
# key = make_end('key')(cat(outputs, conved))
model = Model([inputA], [clear])
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=1),
# optimizer=tf.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
# loss_weights={'clear': 1/2, 'key': 1/2},
metrics=[nAccuracy],
)
return model
def make_model_conv_res(hparams):
n = hparams[HP_WINDOW]
height = hparams[HP_HEIGHT]
width = hparams[HP_max_kernel]
ic = lambda: TimeDistributed(BatchNormalization())
inputA = Input(shape=(n,), name="ciphertextA", dtype="int32")
inputB = Input(shape=(n,), name="ciphertextB", dtype="int32")
base = hparams[HP_resSize]
blowup = hparams[HP_blowup]
embedding = Embedding(
output_dim=46,
input_length=n,
input_dim=len(alpha),
batch_input_shape=[batch_size, n],
)
embeddedA = embedding(inputA)
embeddedB = embedding(inputB)
def conv():
return Sequential(
[
ic(),
relu(),
Conv1D(
filters=base,
kernel_size=width,
padding="same",
kernel_initializer=msra,
),
]
)
convedA = embeddedA
convedB = embeddedB
for _ in range(height):
c = conv()
cA, cB = c(cat(convedA, convedB)), c(cat(convedB, convedA))
if tuple(convedA.shape) == tuple(cA.shape):
convedA = plus(convedA, cA)
convedB = plus(convedB, cB)
else:
convedA = cA
convedB = cB
make_end = Conv1D(
name="output",
filters=46,
kernel_size=1,
padding="same",
strides=1,
dtype="float32",
kernel_initializer=msra,
)
clear = Layer(name="clear", dtype="float32")(make_end(convedA))
key = Layer(name="key", dtype="float32")(make_end(convedB))
model = Model([inputA, inputB], [clear, key])
deviation_weight = hparams[HP_deviation_as_loss]
dev = ShiftLayer(clear, key, inputA)
sdev = Layer(name="dev", dtype="float32")(tf.reduce_mean(dev))
model.add_loss(sdev * deviation_weight)
model.add_metric(sdev, name="deviation", aggregation='mean')
model.compile(
optimizer=tf.optimizers.Adam(),
loss={
"clear": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
"key": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
},
loss_weights={"clear": 1 / 2, "key": 1 / 2},
metrics=[error],
)
return model
#def make_model_conv(hparams):
# n = hparams[HP_WINDOW]
# height = hparams[HP_HEIGHT]
# width = hparams[HP_max_kernel]
#
# ic = lambda: TimeDistributed(BatchNormalization())
#
# inputA = Input(shape=(n,), name="ciphertextA", dtype="int32")
# inputB = Input(shape=(n,), name="ciphertextB", dtype="int32")
# base = hparams[HP_resSize]
# blowup = hparams[HP_blowup]
# embedding = Embedding(
# output_dim=46,
# input_length=n,
# input_dim=len(alpha),
# batch_input_shape=[batch_size, n],
# )
# embeddedA = embedding(inputA)
# embeddedB = embedding(inputB)
#
# def conv():
# return Sequential(
# [
# Conv1D(
# filters=base,
# kernel_size=width,
# padding="same",
# kernel_initializer=msra,
# ),
# ic(),
# relu(),
# ]
# )
#
# convedA = embeddedA
# convedB = embeddedB
# for _ in range(height):
# c = conv()
# convedA, convedB = c(cat(convedA, convedB)), c(cat(convedB, convedA))
#
# make_end = Conv1D(
# name="output",
# filters=46,
# kernel_size=1,
# padding="same",
# strides=1,
# dtype="float32",
# kernel_initializer=msra,
# )
#
# clear = Layer(name="clear", dtype="float32")(make_end(convedA))
# key = Layer(name="key", dtype="float32")(make_end(convedB))
#
# b = TimeDistributed(BatchNormalization())
# b = lambda x: x
#
# dev = ShiftLayer(clear, key, inputA)
# # assert tuple(dev.shape) in [(None, n, 46), (32, n, 46)], dev
#
# # assert tuple(key.shape) == (None, n, 46), key
# # assert tuple(dev.shape) == (None, n, 46), dev
# # assert tuple(key.shape) == (None, n, 46), key
#
#
# model = Model([inputA, inputB], [clear, key])
#
# deviation_weight = hparams[HP_deviation_as_loss]
# sdev = Layer(name="dev", dtype="float32")(tf.reduce_mean(dev)) * deviation_weight
# model.add_loss(sdev)
# model.add_metric(sdev, name="deviation", aggregation='mean')
#
# model.compile(
# # optimizer=tf.optimizers.Adam(learning_rate=0.001/2),
# optimizer=tf.optimizers.Adam(),
# loss={
# "clear": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
# "key": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
# },
# loss_weights={"clear": 1 / 2, "key": 1 / 2},
# metrics=[error],
# )
# return model
def make_model_fractal(hparams):
n = hparams[HP_WINDOW]
height = hparams[HP_HEIGHT]
width = hparams[HP_max_kernel]
ic = lambda: TimeDistributed(BatchNormalization())
inputA = Input(shape=(n,), name="ciphertextA", dtype="int32")
inputB = Input(shape=(n,), name="ciphertextB", dtype="int32")
base = hparams[HP_resSize]
blowup = hparams[HP_blowup]
embedding = Embedding(
output_dim=46,
input_length=n,
input_dim=len(alpha),
batch_input_shape=[batch_size, n],
)
embeddedA = embedding(inputA)
embeddedB = embedding(inputB)
def conv():
# Went from [x | blowup] * base to base to blowup * base
# So could use cat?
# Now: geting from base -> blowup * base -> base
return Sequential(
[
# Idea: parallel of different kernel sizes. Will save on trainable params.
ic(),
Maxout(base),
Conv1D(
filters=blowup * base,
kernel_size=width,
padding="same",
kernel_initializer=msra,
),
]
)
def block(n):
if n <= 0:
# None means: no weight in average.
return lambda *args: [None, None]
else:
# f 0 = identity (or conv in paper) # Not Implemented
# f 1 = conv # to be like paper.
# f (n+1) = (f n . f n) + conv
inputA = Input(shape=(n, blowup * base))
inputB = Input(shape=(n, blowup * base))
c = conv()
convA = c(cat(inputA, inputB))
convB = c(cat(inputB, inputA))
[blockA, blockB] = block(n - 1)(block(n - 1)([inputA, inputB]))
return Model([inputA, inputB], [avg([blockA, convA]), avg([blockB, convB])])
c0 = Conv1D(
filters=blowup * base,
kernel_size=width,
padding="same",
kernel_initializer=msra,
)
cA = c0(embeddedA)
cB = c0(embeddedB)
convedA, convedB = block(height)([cA, cB])
make_end = Conv1D(
name="output",
filters=46,
kernel_size=1,
padding="same",
strides=1,
dtype="float32",
kernel_initializer=msra,
)
clear = Layer(name="clear", dtype="float32")(
make_end(SpatialDropout1D(rate=hparams[HP_DROPOUT])(convedA))
)
key = Layer(name="key", dtype="float32")(
make_end(SpatialDropout1D(rate=hparams[HP_DROPOUT])(convedB))
)
un = tf.unstack(key, axis=-2)
assert n == len(un), un
embs = tf.unstack(embeddedA, axis=-2)
shifts = tf.unstack(inputA, axis=-2)
tf.roll()
model = Model([inputA, inputB], [clear, key])
model.compile(
# optimizer=tf.optimizers.Adam(learning_rate=0.001/2),
optimizer=tf.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
loss_weights={"clear": 1 / 2, "key": 1 / 2},
metrics=[error],
)
return model
# Mixture between fractal and dense.
def make_model_fractal_dense(hparams):
n = hparams[HP_WINDOW]
height = hparams[HP_HEIGHT]
ic = lambda: sequential(
TimeDistributed(BatchNormalization()),
SpatialDropout1D(rate=hparams[HP_DROPOUT]),
)
input = Input(shape=(n,), name="ciphertextA", dtype="int32")
base = hparams[HP_resSize]
blowup = hparams[HP_blowup]
embedded = Embedding(
output_dim=46,
input_length=n,
input_dim=len(alpha),
name="embeddingA",
batch_input_shape=[batch_size, n],
)(input)
def conv(extra):
def helper(inputs):
input = avg(inputs)
max_kernel = hparams[HP_max_kernel]
try:
conved = Conv1D(
filters=extra,
kernel_size=3,
padding="same",
kernel_initializer=msra,
)(TimeDistributed(BatchNormalization())(relu()(input)))
except:
print(f"Input: {input}")
raise
return [cat(input, conved)]
return helper
def block(n):
if n <= 0:
assert NotImplementedError
# Identity. Work out whether we can/should use conv instead?
# If we use conv, we can have embedding go to 46 instead of base, I think.
return avg
elif n <= 1:
return conv(base)
else:
# f 0 = identity (or conv in paper)
# f (n+1) = (f n . f n) + conv
def helper(inputs):
(_batch_size, _time, input_features) = inputs[-1].shape
inter_out = block(n - 1)(inputs)
assert isinstance(inter_out, (list,)), type(inter_out)
outputs = block(n - 1)(inter_out)
assert isinstance(outputs, (list,)), type(outputs)
(_batch_sizeO, _timeO, output_features) = outputs[-1].shape
assert (_batch_size, _time) == (_batch_sizeO, _timeO), (
(_batch_size, _time),
(_batch_sizeO, _timeO),
)
assert input_features <= output_features, (
input_features,
output_features,
)
try:
c = conv(output_features - input_features)(inputs)
except:
print("input, output, diff")
print(inputs[-1].shape)
print(outputs[-1].shape)
print((input_features, output_features))
raise
o = [*c, *outputs]
assert isinstance(o, (list,)), o
return o
return helper
# Idea: Start first res from ic() or conv.
# Idea: also give input directly, not just embedding?
conved = avg(block(height)([embedded]))
clear = Conv1D(
name="clear",
filters=46,
kernel_size=1,
padding="same",
strides=1,
dtype="float32",
kernel_initializer=msra,
)(conved)
model = Model([input], [clear])
model.compile(
# optimizer=tf.optimizers.Adam(learning_rate=0.001/2),
optimizer=tf.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[nAccuracy],
)
return model
def make_model_dense(hparams):
n = hparams[HP_WINDOW]
height = hparams[HP_HEIGHT]
ic = lambda: sequential(
TimeDistributed(BatchNormalization()),
SpatialDropout1D(rate=hparams[HP_DROPOUT] / 2),
Dropout(rate=hparams[HP_DROPOUT] / 2),
)
inputA = Input(shape=(n,), name="ciphertextA", dtype="int32")
inputB = Input(shape=(n,), name="ciphertextB", dtype="int32")
base = hparams[HP_resSize]
blowup = hparams[HP_blowup]
embedding = Embedding(
output_dim=46,
input_length=n,
input_dim=len(alpha),
name="embeddingA",
batch_input_shape=[batch_size, n],
)
embeddedA = embedding(inputA)
embeddedB = embedding(inputB)
def conv():
def helper(input):
conved = Conv1D(
filters=base, kernel_size=3, padding="same", kernel_initializer=msra
)(input)
return ic()(relu()(conved))
return helper
def dense1(inputA, inputB):
# TODO: finish
return cat(input, conv()(input))
def denseN(height, input):
return sequential(*(height * [dense1]))(input)
# Idea: Start first res from ic() or conv.
# Idea: also give input directly, not just embedding?
bottleneck = hparams[HP_bottleneck]
blocks = hparams[HP_blocks]
def block(n, input):
if n <= 0:
return input
else:
output = denseN(height, input)
# Residual connection for all but first block:
if 1 < n:
print(f"Bottlenecking at block {n}.")
output = Conv1D(
filters=bottleneck,
kernel_size=1,
padding="same",
kernel_initializer=msra,
)(output)
else:
print(f"No bottlenecking at block {n}.")
if 1 < n < blocks:
assert tuple(input.shape) == tuple(output.shape), (
input.shape,
output.shape,
)
print(f"Residual connection at block {n}.")
output = plus(input, output)
else:
print(f"No residual connection at block {n}.")
return block(n - 1, output)
conved = block(blocks, embedded)
make_end = lambda name: sequential(
Conv1D(
name=name,
filters=46,
kernel_size=1,
padding="same",
strides=1,
dtype="float32",
kernel_initializer=msra,
),
)
clear = make_end("clear")(conved)
model = Model([input], [clear])
model.compile(
# optimizer=tf.optimizers.Adam(learning_rate=0.001),
optimizer=tf.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[nAccuracy],
)
return model
def make_model_recreate(hparams):
relu = ft.partial(tf.keras.layers.PReLU, shared_axes=[1])
n = hparams[HP_WINDOW]
inputA = Input(shape=(n,), name="ciphertextA", dtype="int32")
inputB = Input(shape=(n,), name="ciphertextB", dtype="int32")
resSize = hparams[HP_resSize]
width = hparams[HP_max_kernel]
height = hparams[HP_HEIGHT]
embedding = Embedding(
output_dim=resSize,
input_length=n,
input_dim=len(alpha),
name="my_embedding",
batch_input_shape=[batch_size, n],
)
embeddedA = embedding(inputA)
embeddedB = embedding(inputB)
def makeResNetNew(i, channels, _, size):
fanInput = Input(shape=(n, 4 * size,))
fan = concatenate(
[
Conv1D(
filters=round(size / 4),
kernel_size=width,
padding="same",
kernel_initializer=msra,
)(fanInput)
for width in [3, 5, 7, 9]
]
)
m = Model([fanInput], [fan])
return Sequential(
[
# Input(shape=(n,channels,)),
# SpatialDropout1D(rate=hparams[HP_DROPOUT]), # Not sure whether that's good.
# TODO: if BatchNormalization is independent for each dimension, we can do post BatchNorm, instead of pre?
# TODO: try different dropout scheme here, that messes less with variance?
## Note: dropout done outside.
# SpatialDropout1D(rate=hparams[HP_DROPOUT] * i / height),
# TimeDistributed(BatchNormalization()),
# relu(),
Conv1D(
filters=16 * size,
kernel_size=1,
padding="same",
kernel_initializer=msra,
),
# TODO: Might want to drop this intermediate batch norm? So that dropout doesn't have too much impact on variance.
TimeDistributed(BatchNormalization()),
Maxout(4 * size),
m,
TimeDistributed(BatchNormalization()),
Maxout(size),
],
name="resnet{}".format(i),
)
def makeResNet(i, channels, width, size):
return Sequential(
[
Input(name="res_inputMe", shape=(n, channels,)),
# SpatialDropout1D(rate=hparams[HP_DROPOUT]), # Not sure whether that's good.
TimeDistributed(BatchNormalization()),
relu(),
Conv1D(
filters=4 * size,
kernel_size=1,
padding="same",
kernel_initializer=msra,
),
TimeDistributed(BatchNormalization()),
relu(),
Conv1D(
filters=size,
kernel_size=width,
padding="same",
kernel_initializer=msra,
),
],
name="resnet{}".format(i),
)
def make_drop(layers):
drop = hparams[HP_DROPOUT]
return list(
reversed(
[
(SpatialDropout1D(drop * distance / height))(layer)
for distance, layer in enumerate(reversed(layers))
]
)
)
random.seed(23)
def make_block(convedA, convedB):
convedAx = [convedA]
convedBx = [convedB]
for i in range(height):
# We deliberately use different dropout masks in all four cases.
catA = concatenate([*make_drop(convedAx), *make_drop(convedBx)])
catB = concatenate([*make_drop(convedBx), *make_drop(convedAx)])
(_, _, num_channels) = catA.shape
(_, _, num_channelsB) = catB.shape
assert tuple(catA.shape) == tuple(catB.shape), (catA.shape, catB.shape)
width = 1 + 2 * random.randrange(5, 8)
size = random.randrange(23, 2 * 46)
size = resSize
resNet = makeResNetNew(i, num_channels, width, size)
resA = plus(convedAx[-1], resNet(catA))
resB = plus(convedBx[-1], resNet(catB))
convedAx.append(resA)
convedBx.append(resB)
assert len(convedAx) == len(convedBx), (len(convedAx), len(convedBx))
for j, (a, b) in enumerate(zip(convedAx, convedBx)):
assert tuple(a.shape) == tuple(b.shape), (i, j, a.shape, b.shape)
return convedAx, convedBx
convedA, convedB = make_block(embeddedA, embeddedB)
# assert tuple(convedA.shape) == tuple(convedB.shape), (convedA.shape, convedB.shape)
# TODO: check whether final BatchNorm would help? (Pre dropout, of course.)
# TODO: Similar for relu?
# TODO: Try final Dropout with my other approaches, too.
# TODO: Try different amounts of final dropout. Can even try very high amounts, because we have so many dimensions at the end.
# Approx 1,246 dimensions at the end for something close to `faithful` repro.
# So could try even 90% dropout.
make_end = Conv1D(
name="output",
filters=46,
kernel_size=1,
padding="same",
strides=1,
dtype="float32",
kernel_initializer=msra,
)
totes_clear = Layer(name="clear", dtype="float32")(
make_end(SpatialDropout1D(rate=0.0)(concatenate(convedA)))
)
totes_key = Layer(name="key", dtype="float32")(
make_end(SpatialDropout1D(rate=0.0)(concatenate(convedB)))
)
model = Model([inputA, inputB], [totes_clear, totes_key])
model.compile(
optimizer=tf.optimizers.Adam(),
# optimizer=tf.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.0), # momentum=0.9, nesterov=True),
# optimizer=tfa.optimizers.AdamW(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
loss_weights={"clear": 1 / 2, "key": 1 / 2},
metrics=[error],
)
return model
l = 50
hparams = {
HP_DROPOUT: 0.0,
HP_HEIGHT: 30,
HP_blocks: 1,
HP_bottleneck: 46 * 5,
## Idea: skip the first few short columns in the fractal.
# HP_SKIP_HEIGH: 3,
HP_WINDOW: l,
HP_resSize: round_to(10*46, 4),
HP_blowup: 2,
HP_max_kernel: 5,
HP_deviation_as_loss: 1/100,
}
weights_name = "res-30-deviation-as-0p01_loss--base10x46-scale-1.0.h5"
make_model = make_model_conv_res
def show():
make_model(hparams).summary()
def showOld():
keras.models.load_model("weights/" + weights_name).summary()
def main():
# TODO: Actually set stuff to float16 only, in inference too. Should use
# less memory.
policy = mixed_precision.Policy("mixed_float16")
mixed_precision.set_policy(policy)
print("Compute dtype: %s" % policy.compute_dtype)
print("Variable dtype: %s" % policy.variable_dtype)
with tf.device(device_name):
text = clean(load())
# mtext = tf.convert_to_tensor(text)
mtext = tf.convert_to_tensor(text)
# logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = "logs/scalars/{}".format(weights_name)
tensorboard_callback = TensorBoard(
log_dir=logdir, update_freq=50_000, profile_batch=0, embeddings_freq=5,
)
checkpoint = ModelCheckpoint(
"weights/" + weights_name, monitor="loss", verbose=1, save_best_only=True
)
def schedule(epoch):
# TODO: Aso try SGD with momentum.
default = 0.001
maxLR = 32 / 3
lastEpoch = 12
if epoch < lastEpoch:
lr = default * min(maxLR, 1 + epoch * maxLR / 4)
else:
lr = default * max(1, maxLR * (1 - (epoch - lastEpoch) / 20))
# NOTE: 32 was still fine, 64 broke.
lr = default * 1
print(
f"Scheduled learning rate for epoch {epoch}: {default} * {lr/default}"
)
return lr
def scheduleRampSGD(epoch):
# TODO: Aso try SGD with momentum.
default = 0.005
# lr = default * (epoch / 2) **2
lr = default * (epoch + 1)
# NOTE: 32 was still fine, 64 broke.
print(
f"Scheduled learning rate for epoch {epoch}: {default} * {lr/default}"
)
return lr
def slow(epoch):
return 0.001
return 0.001 / 100
callbacks_list = [
# checkpoint,
tensorboard_callback,
# hp.KerasCallback(logdir, hparams),
# ReduceLROnPlateau(
# monitor="loss",
# mode="min",
# patience=20,
# cooldown=10,
# factor=1 / 2,
# verbose=1,
# min_delta=0.001,
# ),
# LearningRateScheduler(schedule),
# EarlyStopping(
# monitor="loss", patience=60, verbose=1, restore_best_weights=True
# ),
]
with tf.summary.create_file_writer("logs/scalars").as_default():
hp.hparams_config(
hparams=[HP_DROPOUT, HP_HEIGHT, HP_WINDOW],
metrics=[hp.Metric(METRIC_ACCURACY, display_name="Accuracy")],
)
# try:
print("Making model.")
model = make_model(hparams)
try:
raise NotIMplementedError("Not loading weights for testing.")
print("Trying to load weights.")
model.load_weights("weights/" + weights_name)
model.summary()
print(weights_name)
print("Loaded weights.")
except:
model.summary()
print(weights_name)
print("Failed to load weights.")
pass
# raise
# model = keras.models.load_model('weights/'+weights_name)
# for i in range(10*(layers+1)):
# print("Predict:")
# predict_size = 10
# ita_cipher = cipher_for_predict()
# [ita_label, ita_key] = model .predict(ita_cipher)
# print(toChars_labels(ita_cipher))
# pprint(toChars(ita_label)[:1000])
# pprint(toChars(ita_key)[:1000])
# (ciphers_p, labels_p, keys_p) = samples(text, predict_size, l)
# [pred_label, pred_key] = model.predict(ciphers_p)
# # clear, key, prediction
# pprint(
# list(
# zip(
# toChars_labels(ciphers_p),
# toChars_labels(labels_p),
# toChars(pred_label),
# toChars_labels(keys_p),
# toChars(pred_key),
# predict_size * [l * " "],
# )
# ),
# width=120,
# )
# model.evaluate(TwoTimePadSequence(l, 10**4 // 32), callbacks=[tensorboard_callback])
# print("Training:")
# (ciphers, labels, keys) = samples(text, training_size, l)
# print(model.fit(ciphers, [labels, keys],
# for epoch, (x, y) in enumerate(makeEpochs(mtext, l, 1/60)):
# print(f"My epoch: {epoch}")
if True:
try:
model.fit(
x=TwoTimePadSequence(l, 10 ** 4 // 32, mtext, both=True, dev=False),
# x = x, y = y,
# steps_per_epoch=10 ** 4 // 32,
max_queue_size=10 ** 3,
initial_epoch=0,
# epochs=epoch+1,
# validation_split=0.1,
validation_data=TwoTimePadSequence(
l, 10 ** 3 // 32, mtext, both=True, dev=False
),
epochs=100_000,
callbacks=callbacks_list,
# batch_size=batch_size,
verbose=1,
# workers=8,
# use_multiprocessing=True,
)
except:
print("Saving model...")
model.save("weights/" + weights_name, include_optimizer=True)
print("Saved model.")
raise
# (ciphers_t, labels_t, keys_t) = samples(text, 1000, l)
# print("Eval:")
# model.evaluate(TwoTimePadSequence(l, 10**4))
# Idea: we don't need the full 50% dropout regularization, because our input is already random.
# So try eg keeping 90% of units? Just enough to punish big gross / small nettto co-adaptions.
# But wow, this bigger network (twice as large as before) trains really well without dropout. And no learning rate reduction, yet.
# It's plateau-ing about ~2.54 loss at default learning rate after ~20 epoch. (If I didn't miss a restart.)
# adense-6-c46.h5/train and fractal-6-relu-avg-base_8-post-staggered3.h5 and denseCNN-20-random-mixed-pre-activation-shorter-seed-23.h5 are best so far.
# denseCNN-20-random-mixed-pre-activation-shorter-seed-23.h5 best by far. That's what I'm trying to recreate and improve on.
# Both-loss at minimum was ~.92 (so single ~0.46) and accuracy was ~86.2%
# Dropout _after_ all BatchNorm is fine. Especially drop out just before the end should help.
# Base loss for one side:
# log(46, 2)
# 5.523561956057013
if __name__ == "__main__":
if useGPU:
main()
else:
show()
| [
"[email protected]"
] | |
5d52550b78b7a096a5f07042c2f7a901e73ada2b | d7ccb4225f623139995a7039f0981e89bf6365a4 | /.history/accounts/views_20211013005944.py | b3778559b57bf3bf0425e1678a3cc6003745a10b | [] | no_license | tonnymuchui/django-mall | 64fd4abc3725c1bd0a3dcf20b93b490fe9307b37 | 55c083d8433be3c77adc61939cd197902de4ce76 | refs/heads/master | 2023-08-23T04:59:20.418732 | 2021-10-13T15:59:37 | 2021-10-13T15:59:37 | 415,668,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | from django.shortcuts import render
# Create your views here.
def register(register):
return render('register.html')
def login(register):
return render('register.html')
def log(register):
return render('register.html') | [
"[email protected]"
] | |
ac3cb9735acbbe7d612a9d4587f73eb38dc0804d | 14af6e17a596aa340f7a1700b213dc5d41771043 | /space/main.py | 63584e9f3b4a359761c6cf892ffe4d17c1d144dd | [
"BSD-3-Clause"
] | permissive | samdmarshall/space | 56d54d0da7a503cc1678786d0c2430ad20ebd194 | e9d9899d856c7c20d819e03357017dd07e1c8f23 | refs/heads/master | 2021-04-29T07:30:41.834581 | 2017-02-09T21:03:12 | 2017-02-09T21:03:12 | 77,950,055 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,477 | py | # Copyright (c) 2017, Samantha Marshall (http://pewpewthespells.com)
# All rights reserved.
#
# https://github.com/samdmarshall/space
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of Samantha Marshall nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import argparse
from .version import __version__ as SPACE_VERSION
from .Logger import Logger
from . import Settings
from . import Executor
# Main
def main():
# setup the argument parsing
parser = argparse.ArgumentParser(description='space is a tool for managing workspaces')
parser.add_argument(
'--version',
help='Displays the version information',
action='version',
version=SPACE_VERSION,
)
parser.add_argument(
'--quiet',
help='Silences all logging output',
default=False,
action='store_true',
)
parser.add_argument(
'--verbose',
help='Adds verbosity to logging output',
default=False,
action='store_true',
)
parser.add_argument(
'--no-ansi',
help='Disables the ANSI color codes as part of the logger',
default=False,
action='store_true',
)
parser.add_argument(
'--debug',
help=argparse.SUPPRESS,
default=False,
action='store_true',
)
parser.add_argument(
'--list',
help='Displays a list of all available subcommands for the current working directory',
default=False,
action='store_true',
)
parser.add_argument(
'--edit',
help='Opens the space.yml file in your EDITOR',
default=False,
action='store_true',
)
parser.add_argument(
'--env',
help='Passes values into the environment you are working in',
action='store',
default='',
)
initial_arguments, remaining_args = parser.parse_known_args()
# perform the logging modifications before we do any other operations
Logger.disableANSI(initial_arguments.no_ansi)
Logger.enableDebugLogger(initial_arguments.debug)
Logger.isVerbose(initial_arguments.verbose)
Logger.isSilent(initial_arguments.quiet)
Logger.write().info('Loading the configuration for space...')
configuration = Settings.Configuration()
if initial_arguments.edit is True:
Logger.write().info('Launching in editor mode...')
if os.environ.get('EDITOR') is None:
Logger.write().critical('The value of EDITOR is not set, defaulting to nano...')
Logger.write().info('Opening the spaces.yml file in the default editor...')
Executor.Invoke((os.environ.get('EDITOR', 'nano'), configuration.get_preferences_path()))
else:
Logger.write().info('Validating configuration file...')
if configuration.is_valid() is False:
Logger.write().warning('No configuration setup for this directory!')
parser.exit(1, '')
Logger.write().info('Checking arguments...')
if initial_arguments.list is True:
message = '%s [-h] {%s}\n' % (parser.prog, '|'.join(configuration.commands()))
parser.exit(0, message)
Logger.write().info('Creating subcommand parser...')
subparsers = parser.add_subparsers(title='Subcommands', dest='command')
subparsers.required = True
Logger.write().info('Adding subcommands to command line parser...')
for command_name in configuration.commands():
Logger.write().debug('Adding command "%s"...' % command_name)
command_subparser = subparsers.add_parser(command_name)
Logger.write().info('Parsing remaining command line arguments...')
command_args = parser.parse_args(remaining_args)
Logger.write().info('Running subcommand...')
if configuration.invoke(initial_arguments.env, command_args.command) is False:
Logger.write().error('Unknown command "%s" was encountered!' % command_args.command)
parser.exit(1, '')
if __name__ == "__main__": # pragma: no cover
main()
| [
"[email protected]"
] | |
af0520a4722ea5f1f8dd0e65547fbf2701eaadfa | fd2aab479e164fc6334ba6de46e1e1a11a4eee37 | /pygamer/ugame.py | 99ea2ffce43b4f228bcad1c1ab36db100dcbd451 | [
"MIT"
] | permissive | cwalther/circuitpython-stage | 4286c61288e672da4249439518a03fae921bf2cf | 9596a5904ed757e6fbffcf03e7aa77ae9ecf5223 | refs/heads/master | 2023-07-13T23:05:10.034386 | 2020-07-18T12:47:18 | 2020-07-18T16:36:09 | 283,866,058 | 0 | 0 | MIT | 2020-07-30T20:01:11 | 2020-07-30T20:01:10 | null | UTF-8 | Python | false | false | 2,890 | py | """
A helper module that initializes the display and buttons for the uGame
game console. See https://hackaday.io/project/27629-game
"""
import board
import digitalio
import analogio
import gamepadshift
import stage
import displayio
import busio
import time
K_X = 0x01
K_O = 0x02
K_START = 0x04
K_SELECT = 0x08
K_DOWN = 0x10
K_LEFT = 0x20
K_RIGHT = 0x40
K_UP = 0x80
# re-initialize the display for correct rotation and RGB mode
_TFT_INIT = (
b"\x01\x80\x96" # SWRESET and Delay 150ms
b"\x11\x80\xff" # SLPOUT and Delay
b"\xb1\x03\x01\x2C\x2D" # _FRMCTR1
b"\xb2\x03\x01\x2C\x2D" # _FRMCTR2
b"\xb3\x06\x01\x2C\x2D\x01\x2C\x2D" # _FRMCTR3
b"\xb4\x01\x07" # _INVCTR line inversion
b"\xc0\x03\xa2\x02\x84" # _PWCTR1 GVDD = 4.7V, 1.0uA
b"\xc1\x01\xc5" # _PWCTR2 VGH=14.7V, VGL=-7.35V
b"\xc2\x02\x0a\x00" # _PWCTR3 Opamp current small, Boost frequency
b"\xc3\x02\x8a\x2a"
b"\xc4\x02\x8a\xee"
b"\xc5\x01\x0e" # _VMCTR1 VCOMH = 4V, VOML = -1.1V
b"\x20\x00" # _INVOFF
b"\x36\x01\xa0" # _MADCTL
# 1 clk cycle nonoverlap, 2 cycle gate rise, 3 sycle osc equalie,
# fix on VTL
b"\x3a\x01\x05" # COLMOD - 16bit color
b"\xe0\x10\x02\x1c\x07\x12\x37\x32\x29\x2d\x29\x25\x2B\x39\x00\x01\x03\x10" # _GMCTRP1 Gamma
b"\xe1\x10\x03\x1d\x07\x06\x2E\x2C\x29\x2D\x2E\x2E\x37\x3F\x00\x00\x02\x10" # _GMCTRN1
b"\x13\x80\x0a" # _NORON
b"\x29\x80\x64" # _DISPON
)
displayio.release_displays()
_tft_spi = busio.SPI(clock=board.TFT_SCK, MOSI=board.TFT_MOSI)
_tft_spi.try_lock()
_tft_spi.configure(baudrate=24000000)
_tft_spi.unlock()
_fourwire = displayio.FourWire(_tft_spi, command=board.TFT_DC,
chip_select=board.TFT_CS)
_reset = digitalio.DigitalInOut(board.TFT_RST)
_reset.switch_to_output(value=0)
time.sleep(0.05)
_reset.value = 1
time.sleep(0.05)
display = displayio.Display(_fourwire, _TFT_INIT, width=160, height=128,
rotation=0, backlight_pin=board.TFT_LITE)
del _TFT_INIT
display.auto_brightness = True
class Buttons:
def __init__(self):
self.buttons = gamepadshift.GamePadShift(
digitalio.DigitalInOut(board.BUTTON_CLOCK),
digitalio.DigitalInOut(board.BUTTON_OUT),
digitalio.DigitalInOut(board.BUTTON_LATCH),
)
self.joy_x = analogio.AnalogIn(board.JOYSTICK_X)
self.joy_y = analogio.AnalogIn(board.JOYSTICK_Y)
def get_pressed(self):
pressed = self.buttons.get_pressed()
dead = 15000
x = self.joy_x.value - 32767
if x < -dead:
pressed |= K_LEFT
elif x > dead:
pressed |= K_RIGHT
y = self.joy_y.value - 32767
if y < -dead:
pressed |= K_UP
elif y > dead:
pressed |= K_DOWN
return pressed
buttons = Buttons()
audio = stage.Audio(board.SPEAKER, board.SPEAKER_ENABLE)
| [
"[email protected]"
] | |
9da2db5bfcfd595f9ceebece424d51f7ce16fdcb | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2823/60678/309534.py | ff9fc77e3106db7acaf5c0c5b11bbc5446c89c0d | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | a = input()
if a == '2 1':
print(2)
elif a == '1000 1':
print(1000)
elif a == '122 1310':
print(913060508)
elif a == '3 2':
print(5)
elif a == '247 394':
print(579515894)
elif a == '6 4':
print(39)
elif a == '6 4':
print(39)
elif a == '276 803':
print(472119642)
elif a == '141 1620':
print(621513949)
elif a == '260 840':
print(466364900)
else:
print(498532220) | [
"[email protected]"
] | |
c91bc90b0cf1fad42a4e5dfed7b828e6b96f344c | 7f8bbf4b33f7ce03573f0a694fe9d44558805330 | /pos/models.py | 45b599ad9d3d9eb95756c4d88656c3cea2cc9b75 | [] | no_license | hashirharis/DjangoERP | 12b6937ea6f8664de80b43597a51388b116d0dfd | a70cc5980869c4d448f78bf06a8b0e876df6469e | refs/heads/master | 2021-01-02T09:20:29.739692 | 2014-12-14T01:24:49 | 2014-12-14T01:24:49 | 27,979,350 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,946 | py | #local imports
from users.models import Staff, Store, StoreLevelObject
from core.models import Product
#django imports
from django.db import models
from django.db.models import Sum
from django.conf import settings
#python imports
from decimal import Decimal as D
class LedgerAccount(models.Model): #for store/customer accounts
creditLimit = models.DecimalField(max_digits=8, decimal_places=2, default=D('0.00'))
accountTypes = (
('customer', 'customer'),
('store', 'store'),
)
type = models.CharField(max_length=30, choices=accountTypes)
def getCurrentEntries(self):
return LedgerAccountEntry.objects.filter(account=self, status__exact="CURRENT") #amount owing
def getCreditEntries(self):
return LedgerAccountEntry.objects.filter(account=self, status__exact="CURRENT", total__lt=0.00) #amount owing
def getAccountEntries(self):
return LedgerAccountEntry.objects.filter(account=self, status__exact="CURRENT", total__gt=0.00) #amount owing
def getAllHistoricEntries(self):
return LedgerAccountEntry.objects.filter(account=self) #amount owing
def getAccountBalance(self):
balance = self.getCurrentEntries().aggregate(Sum('balance'))['balance__sum']
return balance if balance is not None else D('0.00')
def getAccountTotal(self):
totals = self.getCurrentEntries().exclude(balance=0).aggregate(Sum('total'))
balance = D('0.00') if totals['total__sum'] is None else totals['total__sum']
return balance
def remainingCreditLimit(self):
return self.creditLimit + self.getAccountBalance()
def getPaidToDate(self):
return self.getAccountTotal() - self.getAccountBalance()
def getMostRecentGroupedBy(self):
try:
return LedgerAccountPayment.objects.filter(account=self).order_by('-date')[:1].get().groupedBy
except LedgerAccountPayment.DoesNotExist:
return 0
class LedgerAccountEntry(models.Model):
account = models.ForeignKey(LedgerAccount)
dueDate = models.DateTimeField(null=True, blank=True)
description = models.TextField()
status = models.CharField(max_length=30) #CURRENT, FINALISED
referenceID = models.PositiveIntegerField() #polymorphic foreign key to either invoice, order, sale or another ledger entry
referenceNum = models.CharField(max_length=250)
refTypes = (
('invoice', 'invoice'), #new entries created from balance paid forward
('sale', 'sale'), #reference to a sale
('entry', 'entry') #reference to another ledger entry
)
referenceType = models.CharField(max_length=30, choices=refTypes)
paymentGrouping = models.PositiveIntegerField(null=True)
total = models.DecimalField(max_digits=8, decimal_places=2)
balance = models.DecimalField(max_digits=8, decimal_places=2)
comment = models.CharField(max_length=1000, blank=True)
created = models.DateTimeField(auto_now_add=True, auto_created=True)
def is_negative(self):
return self.total < 0
def total_sign_correct(self):
if self.total < 0:
return self.total * -1
return self.total
def balance_sign_correct(self):
if self.balance < 0:
return self.balance * -1
return self.balance
def getReferenceEntries(self):
return LedgerAccountEntry.objects.filter(referenceType="entry", referenceID=self.id)
def invoice(self):
try:
sale = Sale.objects.get(code=self.referenceNum)
return sale.getMostRecentInvoiceItem().reference if sale.getMostRecentInvoiceItem() else ''
except Sale.DoesNotExist:
return ''
class Customer(StoreLevelObject):
firstName = models.CharField("First Name", max_length=50)
lastName = models.CharField("Last Name", max_length=50, blank=True)
title_choices = (
('Mr','Mr'),
('Mrs','Mrs'),
('Ms','Ms'),
('Miss','Miss'),
('Dr','Dr')
)
title = models.CharField("Title", max_length=10, choices=title_choices, default='Staff', blank=True)
#addresses
address = models.CharField("Address", help_text="Used for Invoice", max_length=100)
suburb = models.CharField("Suburb", max_length=20)
state_choices=(
('AU-NSW','New South Wales'),
('AU-QLD','Queensland'),
('AU-SA','South Australia'),
('AU-TAS','Tasmania'),
('AU-VIC','Victoria'),
('AU-WA','Western Australia'),
('AU-ACT','Australian Capital Territory'),
('AU-NT','Northern Territory'),
)
cityState = models.CharField("City/State", max_length=50,choices=state_choices)
postcode = models.CharField("Post Code", max_length=10)
paddress = models.CharField("Postal Address", help_text="Leave blank if same as address",max_length=100, blank=True, null=False)
psuburb = models.CharField("Suburb", max_length=20, blank=True, null=False)
pcityState = models.CharField("City/State", max_length=50, blank=True, null=False)
ppostcode = models.CharField("Post Code", max_length=10, blank=True, null=False)
email = models.CharField("Email", max_length=50, blank=True, null=False)
homePhone = models.CharField("Home Phone", max_length=50, blank=True, null=False)
workPhone = models.CharField("Work Phone", max_length=50, blank=True, null=False)
fax = models.CharField("Fax", max_length=50, blank=True, null=False)
mobile = models.CharField("Mobile", max_length=50, blank=True, null=False)
contact_choices = (
('E', 'Email'),
('H', 'Home Phone'),
('W', 'Work Phone'),
('M', 'Mobile'),
)
preferredContact = models.CharField(max_length=50, choices=contact_choices, default='M')
comment = models.TextField("Comment", blank=True, null=False)
VCN = models.CharField("Valued Customer Number", max_length=50, null=False, blank=True)
account = models.OneToOneField(LedgerAccount, unique=True, default=lambda: LedgerAccount.objects.create(type='customer'))
def __unicode__(self):
return u'%s %s' % (self.firstName, self.lastName)
def htmlFormattedAddress(self):
formattedAddress = u''
if self.address != "":
formattedAddress = u'%s %s %s %s' %(self.address+'<br />',self.suburb+'<br />',self.cityState,self.postcode)
formattedAddress += u'<br />Home Phone : %s<br /> Work Phone : %s<br /> Mobile : %s' % (self.homePhone,self.workPhone,self.mobile)
elif self.paddress != "":
formattedAddress = u'%s %s %s %s' %(self.paddress+'<br />',self.psuburb+'<br />',self.pcityState,self.ppostcode)
formattedAddress += u'<br />Home Phone : %s<br /> Work Phone : %s<br /> Mobile : %s' % (self.homePhone,self.workPhone,self.mobile)
else:
formattedAddress = ""
return formattedAddress
def firstContactPoint(self):
firstContactPoint = ""
if self.preferredContact == "M":
firstContactPoint = self.mobile
elif self.preferredContact == "E":
firstContactPoint = self.email
elif self.preferredContact == "H":
firstContactPoint = self.homePhone
elif self.preferredContact == "W":
firstContactPoint = self.workPhone
return firstContactPoint
class Terminal (models.Model):
store = models.ForeignKey(Store)
name = models.CharField(max_length=40)
def recentActivitySet(self):
return self.terminalactivity_set.order_by('-modified')[:10]
class Sale(models.Model):
total = models.DecimalField(max_digits=8, decimal_places=2)
customer = models.ForeignKey(Customer)
deliveryAddress = models.CharField(max_length=1000)
purchaseDate = models.DateTimeField()
fullPaymentDate = models.DateTimeField(null=True, blank=True)
salesPerson = models.ForeignKey(Staff)
code = models.CharField(max_length=255)
status = models.CharField(max_length=50) #COMPLETED, PENDING, QUOTE
note = models.CharField(max_length=1000)
storeNote = models.CharField(max_length=1000)
terminal = models.ForeignKey(Terminal)
modified = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u'%i' % (self.id)
@staticmethod
def generateSaleNumber(terminal, store, saleId):
return str(store.code) + str(terminal.name) + "-" + str(saleId)
def getSalePayments(self):
totalPayments = SalesPayment.objects.filter(sale=self).aggregate(Sum('amount'))
return 0 if totalPayments['amount__sum'] is None else totalPayments['amount__sum']
def getSalePaymentsTill(self, groupedByID):
totalPayments = SalesPayment.objects.filter(sale=self, groupedBy__lt=groupedByID).aggregate(Sum('amount'))
return 0 if totalPayments['amount__sum'] is None else totalPayments['amount__sum']
def getSaleBalanceAfter(self, groupedByID):
totalPayments = SalesPayment.objects.filter(sale=self, groupedBy__lt=groupedByID).aggregate(Sum('amount'))
paid = D('0.00') if totalPayments['amount__sum'] is None else totalPayments['amount__sum']
return self.total - paid
def getGroupedPayments(self, groupedByID):
return SalesPayment.objects.filter(sale=self, groupedBy=groupedByID)
def getPayments(self):
return SalesPayment.objects.filter(sale=self)
def getPaymentServedBy(self, groupedByID):
staff = SalesPayment.objects.filter(sale=self, groupedBy=groupedByID).get().receivedBy
return staff.name
def getMostRecentGroupedBy(self):
try:
return SalesPayment.objects.filter(sale=self).order_by('-date')[:1].get().groupedBy
except SalesPayment.DoesNotExist:
return 0
def getMostRecentInvoice(self):
try:
return len(SaleInvoice.objects.filter(sale=self))
except SaleInvoice.DoesNotExist:
return 0
def getMostRecentInvoiceItem(self):
if self.getMostRecentInvoice():
return SaleInvoice.objects.filter(sale=self).latest('created')
else:
return None
def subtotal(self):
GST = D(settings.GST)
return self.total/GST
def totalGST(self):
subtotal = self.subtotal()
GSTAmount = self.total - subtotal
return GSTAmount
def balanceDue(self):
paidFor = self.getSalePayments()
total = self.total
return total-paidFor
class SalesLine(models.Model):
sale = models.ForeignKey(Sale)
item = models.ForeignKey(Product)
modelNum = models.CharField(max_length=200)
warrantyRef = models.CharField(max_length=100, blank=True)
description = models.CharField(max_length=200)
quantity = models.IntegerField()
released = models.IntegerField(default=0)
#number of units released.
unitPrice = models.DecimalField(max_digits=8, decimal_places=2)
price = models.DecimalField(max_digits=8, decimal_places=2)
line = models.PositiveIntegerField()
def __unicode__(self):
return u'Sale: %s. Line: %i' % (self.sale.code, self.line)
class PaymentMethod(StoreLevelObject):
name = models.CharField(max_length=50)
parentMethod = models.ForeignKey('self', blank=True, null=True, help_text="A Parent of the payment method if one exists. If this is null this is a root parent.")
def __unicode__(self):
return '%s' % self.name
def getChildren(self, store):
return PaymentMethod.objects.all().filterReadAll(store).filter(parentMethod=self)
class SalesPayment(models.Model):
sale = models.ForeignKey(Sale)
amount = models.DecimalField(max_digits=8, decimal_places=2)
date = models.DateTimeField()
receivedBy = models.ForeignKey(Staff)
paymentMethod = models.ForeignKey(PaymentMethod)
groupedBy = models.PositiveIntegerField()
def __unicode__(self):
return u'Payment for sale: %i' % (self.sale.id)
class SaleInvoice(models.Model):
sale = models.ForeignKey(Sale)
reference = models.CharField(max_length=150)
total = models.DecimalField(max_digits=8, decimal_places=2)
created = models.DateTimeField(auto_created=True, auto_now_add=True)
salesPerson = models.ForeignKey(Staff)
notes = models.CharField(max_length=1000)
@staticmethod
def generateInvoiceNumber(terminal, store, saleId, invoiceNum):
return str(store.code) + str(terminal.name) + "-" + str(saleId) + "-" + str(invoiceNum)
def subtotal(self):
GST = D(settings.GST)
return self.total/GST
def totalGST(self):
subtotal = self.subtotal()
GSTAmount = self.total - subtotal
return GSTAmount
class SaleInvoiceLine(models.Model):
invoice = models.ForeignKey(SaleInvoice)
salesLine = models.ForeignKey(SalesLine)
quantity = models.IntegerField()
unitPrice = models.DecimalField(max_digits=8, decimal_places=2)
price = models.DecimalField(max_digits=8, decimal_places=2)
class LedgerAccountPayment(models.Model):
account = models.ForeignKey(LedgerAccount)
amount = models.DecimalField(max_digits=8, decimal_places=2)
date = models.DateTimeField()
notes = models.TextField(null=True, blank=True)
receivedBy = models.ForeignKey(Staff)
paymentMethod = models.ForeignKey(PaymentMethod)
groupedBy = models.PositiveIntegerField()
def __unicode__(self):
return u'Payment for account: %i' % (self.account.id)
#for historical payment receipts
class LedgerAccountPaymentSnapshot(models.Model):
account = models.ForeignKey(LedgerAccount)
accountTotal = models.DecimalField(max_digits=8, decimal_places=2)
paidToDate = models.DecimalField(max_digits=8, decimal_places=2)
balanceDue = models.DecimalField(max_digits=8, decimal_places=2)
paymentGrouping = models.PositiveIntegerField()
balanceCarried = models.DecimalField(max_digits=8, decimal_places=2)
created = models.DateTimeField(auto_now=True, auto_created=True)
class CreditNote(models.Model):
customer = models.ForeignKey(Customer)
sale = models.ForeignKey(Sale) #refund sale
amount = models.DecimalField(max_digits=8, decimal_places=2)
active = models.BooleanField(default=True)
payment = models.OneToOneField(LedgerAccountPayment, null=True)#if no longer active then this will need a value
class TerminalClosure(models.Model):
startDate = models.DateTimeField()
endDate = models.DateTimeField()
terminal = models.ForeignKey(Terminal)
status = models.CharField(max_length=10) #BALANCED, NOBALANCED
total = models.DecimalField(max_digits=8,decimal_places=2)
count = models.DecimalField(max_digits=8,decimal_places=2)
difference = models.DecimalField(max_digits=8,decimal_places=2) # negative amount indicates less, positive more
#total payments should be the salespayment on that given date.
comment = models.TextField("Comment", blank=True, null=False)
closedBy = models.ForeignKey(Staff)
class TerminalCount(models.Model):
paymentMethod = models.ForeignKey(PaymentMethod)
total = models.DecimalField(max_digits=8,decimal_places=2)
count = models.DecimalField(max_digits=8,decimal_places=2)
difference = models.DecimalField(max_digits=8,decimal_places=2) # negative amount indicates less, positive more
eod = models.ForeignKey(TerminalClosure)
class TerminalActivity(models.Model):
terminal = models.ForeignKey(Terminal)
sale = models.ForeignKey(Sale, null=True)
closure = models.ForeignKey(TerminalClosure, null=True)
created = models.DateTimeField(auto_created=True, auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return u'%s' % self.text
def text(self):
if self.sale:
if self.sale.status == "QUOTE":
action = "<strong>New</strong> Quote"
icon_class = "glyphicon glyphicon-file"
elif self.sale.status == "COMPLETED":
action = "<strong>Completed</strong> Sale/Order"
icon_class = "glyphicon glyphicon-ok"
elif self.sale.status == "PENDING":
action = "<strong>New</strong> Sale/Order"
icon_class = "glyphicon glyphicon-time"
else:
action = "<strong>Action</strong> Sale/Order"
icon_class = "glyphicon glyphicon-question-sign"
else: # Balanced Till
action = "<strong>Balanced</strong> Till"
icon_class = "glyphicon glyphicon-repeat"
icon = '<i class="%s"></i>' % icon_class
if self.sale:
return u'%s %s (%s) <span> - %s</span>' % (icon, action, self.sale.code, self.sale.salesPerson.name)
else:
return u'%s %s <span> - %s</span>' % (icon, action, self.closure.closedBy.name)
| [
"[email protected]"
] | |
35524ae86beda78a5c68d1cabaf6999179bed782 | d2c87e3374e637a22f72ef050d0c868d634443e9 | /tournament.py | 378a79e6895a9e8d2e8a51bfaf4f922f99203636 | [] | no_license | Mec-iS/nanodegree-relationaldb | 64a5a3ca29b27ee52132c73b6c202ddbcaa56c89 | 02f18eb003b405fae24f55ef46fcb507522c829d | refs/heads/master | 2020-06-01T10:01:07.408243 | 2015-03-19T15:31:20 | 2015-03-19T15:31:20 | 32,499,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,383 | py | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament user=vagrant")
def deleteMatches():
"""Remove all the match records from the database."""
conn = connect()
cur = conn.cursor()
SQL = "DELETE FROM matches;"
cur.execute(SQL)
conn.commit()
cur.close()
conn.close()
return True
def deletePlayers():
"""Remove all the player records from the database."""
conn = connect()
cur = conn.cursor()
SQL = "DELETE FROM players;"
cur.execute(SQL)
conn.commit()
cur.close()
conn.close()
return True
def countPlayers():
"""Returns the number of players currently registered."""
conn = connect()
cur = conn.cursor()
SQL = "SELECT count(*) FROM players;"
cur.execute(SQL)
result = cur.fetchone()
cur.close()
conn.close()
return result[0]
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
conn = connect()
cur = conn.cursor()
SQL = "INSERT INTO players(name) VALUES(%s);"
data = (name, )
cur.execute(SQL, data)
conn.commit()
cur.close()
conn.close()
return True
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
conn = connect()
cur = conn.cursor()
p_SQL = "SELECT * FROM players;"
cur.execute(p_SQL)
players = cur.fetchall()
results = []
for p in players:
SQL = "SELECT count(*) FROM matches where win_id=%s"
cur.execute(SQL, (p[0],))
w = cur.fetchone()
SQL = "SELECT count(*) FROM matches where loss_id=%s"
cur.execute(SQL, (p[0],))
l = cur.fetchone()
results.append((p[0], p[1], int(w[0]), int(w[0])+int(l[0])))
cur.close()
conn.close()
return results
def reportMatch(winner, loser):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
conn = connect()
cur = conn.cursor()
SQL = "INSERT INTO matches(win_id, loss_id) VALUES(%s, %s);"
data = (int(winner), int(loser) )
cur.execute(SQL, data)
conn.commit()
cur.close()
conn.close()
return True
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
conn = connect()
cur = conn.cursor()
# use the W/L view to define the pairs
p_SQL = "SELECT * FROM player_w_l;"
cur.execute(p_SQL)
players = cur.fetchall()
results = []
for p in players:
won = int(p[2])
games = int(p[2])+int(p[3])
results.append((p[0], p[1], won/games))
cur.close()
conn.close()
ordered = sorted(results, key=lambda x: x[2], reverse=True)
results = []
for i, r in enumerate(ordered):
if i % 2 == 0:
results.append((r[0], r[1], ordered[i+1][0], ordered[i+1][1]))
return results
| [
"[email protected]"
] | |
56090a90809fe8aa497d3bc52c32b42be2a07449 | 3b9b4049a8e7d38b49e07bb752780b2f1d792851 | /src/sync/syncable/DEPS | b0c904e40965a30b6f4ee6ae362b1d0d7dc40ede | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | webosce/chromium53 | f8e745e91363586aee9620c609aacf15b3261540 | 9171447efcf0bb393d41d1dc877c7c13c46d8e38 | refs/heads/webosce | 2020-03-26T23:08:14.416858 | 2018-08-23T08:35:17 | 2018-09-20T14:25:18 | 145,513,343 | 0 | 2 | Apache-2.0 | 2019-08-21T22:44:55 | 2018-08-21T05:52:31 | null | UTF-8 | Python | false | false | 259 | include_rules = [
"+net/base/escape.h",
"+sql",
"+sync/api/attachments",
"+sync/base",
"+sync/internal_api/public/base",
"+sync/internal_api/public/engine",
"+sync/internal_api/public/util",
"+sync/protocol",
"+sync/test",
"+sync/util",
]
| [
"[email protected]"
] | ||
81c2e9fcbbd0b7dc64330d4b895b0b65f9cac825 | b4bc5fb10b0d498cb0d3e5ee2ce3473b10b553e5 | /fast_transformers/recurrent/attention/self_attention/fradamax_attention.py | 99342c8b72ae8ed6b8e5873ca2bced7ffb99fc31 | [] | no_license | minhtannguyen/momentum-transformer-code-submission | 2f0005028ab7e32957612f642330acd802bded8e | 68b11ce5564a8212cd91cb2093b457a00d511046 | refs/heads/master | 2023-05-31T19:20:57.380490 | 2021-06-04T15:08:26 | 2021-06-04T15:08:26 | 373,784,396 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,207 | py | """Implement the causally masked linear attention as a recurrent model."""
import torch
from torch.nn import Module
from ....attention_registry import RecurrentAttentionRegistry, Optional, Float, Int, \
Callable, EventDispatcherInstance
from ....events import EventDispatcher
from ....feature_maps import elu_feature_map
from ..._utils import check_state
class RecurrentFRAdamaxAttention(Module):
"""Implement fast_transformers.attention.causal_linear_attention as a
fixed-dimensional state recurrent model.
See fast_transformers.attention.linear_attention and
fast_transformers.attention.causal_linear_attention for the general concept
of replacing the softmax with feature maps.
Arguments
---------
feature_map: callable, a callable that applies the feature map to the
last dimension of a tensor (default: elu(x)+1)
eps: float, a small number to ensure the numerical stability of the
denominator (default: 1e-6)
event_dispatcher: str or EventDispatcher instance to be used by this
module for dispatching events (default: the default
global dispatcher)
"""
def __init__(self, query_dimensions, stepsize, beta, delta, feature_map=None, eps=1e-6,
event_dispatcher=""):
super(RecurrentFRAdamaxAttention, self).__init__()
self.feature_map = (
feature_map(query_dimensions) if feature_map else
elu_feature_map(query_dimensions)
)
self.eps = eps
self.event_dispatcher = EventDispatcher.get(event_dispatcher)
# for fradamax transformer
self.stepsize = stepsize
self.beta = beta
self.delta = delta
def forward(self, query, key, value, state=None, memory=None):
# Normalize state/memory
state = check_state(state, memory)
# If this is a new sequence reinitialize the feature map
if state is None:
self.feature_map.new_feature_map()
# Apply the feature map to the query and key
Q = self.feature_map.forward_queries(query)
K = self.feature_map.forward_keys(key)
# Extract some shapes
N, H, D = Q.shape
_, _, M = value.shape
# Extract the memory or initialize it
if state is None:
Siprev = query.new_zeros((N, H, D, M))
Zi = query.new_zeros((N, H, D))
Pi = query.new_zeros((N, H, D, M))
Mi = query.new_zeros((N, H, D, M))
else:
Siprev, Zi, Pi, Mi, Uiprev, Siprev2 = state
# Ensure the batch size did not change
if len(Siprev) != N:
raise ValueError("The batch size changed during iteration")
# Update the internal state
#
# NOTE: The if clause is added due to GitHub PR #10. Simply using the
# following two lines does not perform the operation in place which
# means it is slower for inference.
if K.grad_fn is not None or value.grad_fn is not None:
Zi = Zi + K
Ui = torch.einsum("nhd,nhm->nhdm", K, value)
if state is None:
Pi = 0.0 - Ui
else:
mu = (1.0 - torch.sqrt(self.stepsize * torch.norm((Ui - Uiprev).reshape(N,-1), dim=1, keepdim=True) / torch.norm((Siprev - Siprev2).reshape(N,-1), dim=1, keepdim=True)))**2
mu = torch.clamp(mu, min=0.0, max=1.0 - self.delta)
Pi = mu[:, :, None, None] * Pi - self.stepsize * Ui
Mi = torch.max(self.beta * Mi, torch.abs(Ui))
Si = Siprev - Pi/torch.sqrt(Mi + 1e-16)
else:
Zi += K
Ui = torch.einsum("nhd,nhm->nhdm", K, value)
if state is None:
Pi = 0.0 - Ui
else:
mu = (1.0 - torch.sqrt(self.stepsize * torch.norm((Ui - Uiprev).reshape(N,-1), dim=1, keepdim=True) / torch.norm((Siprev - Siprev2).reshape(N,-1), dim=1, keepdim=True)))**2
mu = torch.clamp(mu, min=0.0, max=1.0 - self.delta)
Pi *= mu[:, :, None, None]
Pi -= self.stepsize * Ui
Mi = torch.max(self.beta * Mi, torch.abs(Ui))
Si = Siprev - Pi/torch.sqrt(Mi + 1e-16)
# Compute the output
Z = 1. / (torch.einsum("nhd,nhd->nh", Q, Zi) + self.eps)
V = torch.einsum("nhd,nhdm,nh->nhm", Q, Si, Z)
return V, [Si, Zi, Pi, Mi, Ui, Siprev]
# Register the attention implementation so that it becomes available in our
# builders
# RecurrentAttentionRegistry.register(
# "momentum-linear", RecurrentMomentumAttention,
# [
# ("query_dimensions", Int),
# ("feature_map", Optional(Callable)),
# ("event_dispatcher", Optional(EventDispatcherInstance, ""))
# ]
# )
RecurrentAttentionRegistry.register(
"fradamax-linear", RecurrentFRAdamaxAttention,
[
("query_dimensions", Int),
("stepsize", Float),
("beta", Float),
("delta", Float),
("feature_map", Optional(Callable)),
("event_dispatcher", Optional(EventDispatcherInstance, ""))
]
)
| [
"[email protected]"
] | |
3f155324903843bae4304f1068687cdf92ed8338 | dba16143d8fa6aa73ca1d4df7bcfaca42824412c | /tests/src/year2021/test_day18a.py | 4a8bf87f8680744f81d6ef29b60953d7cddcb317 | [
"Unlicense"
] | permissive | lancelote/advent_of_code | 84559bf633189db3c3e4008b7777b1112f7ecd30 | 4b8ac6a97859b1320f77ba0ee91168b58db28cdb | refs/heads/master | 2023-02-03T14:13:07.674369 | 2023-01-24T20:06:43 | 2023-01-24T20:06:43 | 47,609,324 | 11 | 0 | null | 2019-10-07T07:06:42 | 2015-12-08T08:35:51 | Python | UTF-8 | Python | false | false | 3,719 | py | """2021 - Day 18 Part 1: Snailfish."""
import functools
from textwrap import dedent
import pytest
from src.year2021.day18a import explode
from src.year2021.day18a import Node
from src.year2021.day18a import reduce
from src.year2021.day18a import solve
from src.year2021.day18a import split
@pytest.mark.parametrize(
"line,expected_magnitude",
[
("[9,1]", 29),
("[1,9]", 21),
("[[9,1],[1,9]]", 129),
("[[1,2],[[3,4],5]]", 143),
("[[[[0,7],4],[[7,8],[6,0]]],[8,1]]", 1384),
("[[[[1,1],[2,2]],[3,3]],[4,4]]", 445),
("[[[[3,0],[5,3]],[4,4]],[5,5]]", 791),
("[[[[5,0],[7,4]],[5,5]],[6,6]]", 1137),
("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]", 3488),
],
)
def test_magnitude(line, expected_magnitude):
assert Node.from_line(line).magnitude == expected_magnitude
@pytest.mark.parametrize(
"line",
[
"[9,1]",
"[[9,1],[1,9]]",
"[[1,2],[[3,4],5]]",
],
)
def test_str(line):
assert str(Node.from_line(line)) == line
def test_add():
a = Node.from_line("[1,2]")
b = Node.from_line("[[3,4],5]")
c = a + b
assert str(c) == "[[1,2],[[3,4],5]]"
@pytest.mark.parametrize(
"from_line,to_line",
[
(
"[10,1]",
"[[5,5],1]",
),
(
"[[[[0,7],4],[15,[0,13]]],[1,1]]",
"[[[[0,7],4],[[7,8],[0,13]]],[1,1]]",
),
(
"[[[[0,7],4],[[7,8],[0,13]]],[1,1]]",
"[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]",
),
],
)
def test_split(from_line, to_line):
num = Node.from_line(from_line)
assert str(split(num)) == to_line
@pytest.mark.parametrize(
"from_line,to_line",
[
(
"[[[[[9,8],1],2],3],4]",
"[[[[0,9],2],3],4]",
),
(
"[7,[6,[5,[4,[3,2]]]]]",
"[7,[6,[5,[7,0]]]]",
),
(
"[[6,[5,[4,[3,2]]]],1]",
"[[6,[5,[7,0]]],3]",
),
(
"[[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]",
"[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]",
),
(
"[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]",
"[[3,[2,[8,0]]],[9,[5,[7,0]]]]",
),
],
)
def test_explode(from_line, to_line):
num = Node.from_line(from_line)
assert str(explode(num)) == to_line
def test_reduce():
line = "[[[[[4,3],4],4],[7,[[8,4],9]]],[1,1]]"
expected = "[[[[0,7],4],[[7,8],[6,0]]],[8,1]]"
assert str(reduce(Node.from_line(line))) == expected
def test_sum():
task = """
[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]
[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]
[[2,[[0,8],[3,4]]],[[[6,7],1],[7,[1,6]]]]
[[[[2,4],7],[6,[0,5]]],[[[6,8],[2,8]],[[2,1],[4,5]]]]
[7,[5,[[3,8],[1,4]]]]
[[2,[2,2]],[8,[8,1]]]
[2,9]
[1,[[[9,3],9],[[9,0],[0,7]]]]
[[[5,[7,4]],7],1]
[[[[4,2],2],6],[8,7]]
"""
nums = [Node.from_line(line) for line in dedent(task).strip().splitlines()]
expected = "[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]"
assert str(functools.reduce(lambda x, y: x + y, nums)) == expected
def test_solve():
task = """
[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]
"""
assert solve(dedent(task).strip()) == 4140
| [
"[email protected]"
] | |
d9ac7951c1faea6379410b63657b01be113355d3 | d0d94276979375dd0e2ce0629afdfc9e7a1ca52e | /program/agent/basic.py | fafd805dd273843147d87a5c31748f7f0716b756 | [] | no_license | haoxizhong/handy | 04f1abfe9855eb30531e51ad833b546438502a7c | e805dcabdb50d25f852d2eaec583fba7f6709e18 | refs/heads/master | 2020-03-08T10:20:09.245659 | 2018-05-03T08:38:57 | 2018-05-03T08:38:57 | 128,070,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | class BasicAgent:
def __init__(self, k, mod):
pass
def reinit(self):
pass
def next_step(self, l1, r1, l2, r2, action_list):
pass
| [
"[email protected]"
] | |
9a9a722f51886c3fdb5a4a62aed3c8878ddfb36e | 8ccf2280a5b14e5003cf876692f99fad59d47d92 | /coding_corner/urls.py | 6f0326b80566c55784e65c3b1673feede0bd7ee1 | [] | no_license | Ngahu/coding-corner | aa746dc0cac84f91c4afee620a593b7745a31b20 | 199839d093f4261384282e687af00a6dc46ae7f2 | refs/heads/master | 2020-03-11T05:00:12.444649 | 2018-04-16T20:13:51 | 2018-04-16T20:13:51 | 129,790,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py |
from django.conf.urls import include,url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls')),
url(r'^todo/', include('todo.urls', namespace='todo')),
]
| [
"[email protected]"
] | |
20338f57a39deabac94d80491f28c3f18bd949db | 90dfd9eba0103cbd44eb9e4f20d4d498a2509b41 | /appengine/django/conf/global_settings.py | adba93491251e09761cb14ed1fee633c777a6217 | [] | no_license | prestomation/SOSpy | 8303cb6cb157a6d66a1b2d3797ea3a48a7d189ac | 110310a21a2f3e17478910e7ab7bb05d3dad0419 | refs/heads/master | 2021-01-13T02:12:14.831471 | 2012-03-04T20:46:33 | 2012-03-04T20:46:33 | 3,180,852 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 21,677 | py | # Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing siutations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', '[email protected]'), ('Full Name', '[email protected]'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities).
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('nb', gettext_noop('Norwegian Bokmal')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
# Legacy format
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# New format
DATABASES = {
}
# Classes used to implement db routing behaviour
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# 404s that may be ignored.
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com/media/"
MEDIA_URL = ''
# Absolute path to the directory that holds static files.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when spliting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
from django import get_version
URL_VALIDATOR_USER_AGENT = "Django/%s (http://www.djangoproject.com)" % get_version()
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = False # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# New format
CACHES = {
}
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in the
# 'hasNoProfanities' validator. All of these should be in lowercase.
PROFANITIES_LIST = ()
# The group ID that designates which users are banned.
# Set to None if you're not using it.
COMMENTS_BANNED_USERS_GROUP = None
# The group ID that designates which users can moderate comments.
# Set to None if you're not using it.
COMMENTS_MODERATORS_GROUP = None
# The group ID that designates the users whose comments should be e-mailed to MANAGERS.
# Set to None if you're not using it.
COMMENTS_SKETCHY_USERS_GROUP = None
# The system will e-mail MANAGERS the first COMMENTS_FIRST_FEW comments by each
# user. Set this to 0 if you want to disable it.
COMMENTS_FIRST_FEW = 0
# A tuple of IP addresses that have been banned from participating in various
# Django-powered features.
BANNED_IPS = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Name and domain for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
############
# MESSAGES #
############
# Class to use as messges backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.user_messages.LegacyFallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'django.utils.log.dictConfig'
# The default logging configuration. This sends an email to
# the site admins on every HTTP 500 error. All other log
# records are sent to the bit bucket.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
# The name of the database to use for testing purposes.
# If None, a name of 'test_' + DATABASE_NAME will be assumed
TEST_DATABASE_NAME = None
# Strings used to set the character set and collation order for the test
# database. These values are passed literally to the server, so they are
# backend-dependent. If None, no special settings are sent (system defaults are
# used).
TEST_DATABASE_CHARSET = None
TEST_DATABASE_COLLATION = None
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# URL prefix for admin media -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
| [
"prestomation@prestomation-ubuntu.(none)"
] | prestomation@prestomation-ubuntu.(none) |
9dc9d9fb1df3a21179180a6280037b1caa2c6bb4 | e7718e75201b5506206871da1239e1e49c0c438e | /djredcap/management/commands/redcap.py | 504014e946de7f547374d5fc0346750477f76f0d | [
"BSD-3-Clause"
] | permissive | dmegahan/django-redcap | aeca161f463ac5b69a8b6ae7beeb2d441bc18b26 | 9907bf8d35c02ab5937c60fdcd5c9bd88ccb24d8 | refs/heads/master | 2021-01-18T08:55:20.825128 | 2013-07-12T20:18:53 | 2013-07-12T20:18:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,862 | py | import sys
from optparse import NO_DEFAULT, OptionParser
from django.core.management.base import CommandError, BaseCommand, handle_default_options
from django.utils.importlib import import_module
class Command(BaseCommand):
help = "A wrapper for REDCap subcommands"
commands = ['inspect']
def print_subcommands(self, prog_name):
usage = ['', 'Available subcommands:']
for name in sorted(self.commands):
usage.append(' {0}'.format(name))
return '\n'.join(usage)
def usage(self, subcommand):
usage = '%prog {0} subcommand [options] [args]'.format(subcommand)
if self.help:
return '{0}\n\n{1}'.format(usage, self.help)
return usage
def print_help(self, prog_name, subcommand):
super(Command, self).print_help(prog_name, subcommand)
sys.stdout.write('{0}\n\n'.format(self.print_subcommands(prog_name)))
def get_subcommand(self, name):
try:
module = import_module('djredcap.management.subcommands.{0}'.format(name))
return module.Command()
except KeyError:
raise CommandError('Unknown subcommand: redcap {0}'.format(name))
def run_from_argv(self, argv):
"""Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
if len(argv) > 2 and not argv[2].startswith('-') and argv[2] in self.commands:
subcommand = argv[2]
klass = self.get_subcommand(subcommand)
parser = OptionParser(prog=argv[0], usage=klass.usage('{0} {1}'.format(argv[1], subcommand)),
version=klass.get_version(), option_list=klass.option_list)
options, args = parser.parse_args(argv[3:])
args = [subcommand] + args
else:
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__)
def handle(self, *args, **options):
if not args or args[0] not in self.commands:
return self.print_help('./manage.py', 'redcap')
subcommand, args = args[0], args[1:]
klass = self.get_subcommand(subcommand)
# Grab out a list of defaults from the options. optparse does this for
# us when the script runs from the command line, but since
# call_command can be called programatically, we need to simulate the
# loading and handling of defaults (see #10080 for details).
defaults = {}
for opt in klass.option_list:
if opt.default is NO_DEFAULT:
defaults[opt.dest] = None
else:
defaults[opt.dest] = opt.default
defaults.update(options)
return klass.execute(*args, **defaults)
| [
"[email protected]"
] | |
8118b5fbdb5b2009ee62c064de4350914636261b | 78d23de227a4c9f2ee6eb422e379b913c06dfcb8 | /LeetCode/205.py | 3bee43846ede99d934ea7b76898a58a5aed0797e | [] | no_license | siddharthcurious/Pythonic3-Feel | df145293a3f1a7627d08c4bedd7e22dfed9892c0 | 898b402b7a65073d58c280589342fc8c156a5cb1 | refs/heads/master | 2020-03-25T05:07:42.372477 | 2019-09-12T06:26:45 | 2019-09-12T06:26:45 | 143,430,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | class Solution:
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
s1 = set(s)
t1 = set(t)
if len(s1) != len(t1):
return False
hashmap = {}
for i, j in zip(s,t):
if i in hashmap:
if j == hashmap[i]:
continue
else:
return False
elif i not in hashmap:
hashmap.update({i: j})
return True
if __name__ == "__main__":
obj = Solution()
s = "ab"
t = "aa"
r = obj.isIsomorphic(s, t)
print(r)
| [
"[email protected]"
] | |
48627bb2a04d19b055aa36f14dabc49952e1c8a7 | e10422c540b3199cc5663c1c226ae2b8f24fd5cf | /OsComponents/mkdir_func.py | 7a3f21eb0fc810380d92f8b8716383523d149003 | [] | no_license | cccccsf/single_point | f014a9f0a18eb30ddd4a967a822eba3bd26ed53a | 61cc11b0c40e082b45c5458c8435dbea001af466 | refs/heads/master | 2020-05-09T10:10:05.035435 | 2019-05-07T12:44:30 | 2019-05-07T12:44:30 | 181,030,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #!/usr/bin/python3
import os
def mkdir(path):
folder = os.path.exists(path)
if not folder: #判断是否存在文件夹如果不存在则创建为文件夹
os.makedirs(path) #makedirs 创建文件时如果路径不存在会创建这个路径
print("--- new folder: ---")
print(os.path.split(os.path.split(os.path.split(path)[0])[0])[-1] + '/' + os.path.split(os.path.split(path)[0])[-1] + '/' + os.path.split(path)[-1])
else:
print("--- There is already this folder! ---")
print(os.path.split(os.path.split(os.path.split(path)[0])[0])[-1] + '/' + os.path.split(os.path.split(path)[0])[-1] + '/' + os.path.split(path)[-1])
| [
"[email protected]"
] | |
7fc5ebfee57b2b9578b1c963127de47bf9d7bf00 | c5294a8e9a6aa7da37850443d3a5d366ee4b5c35 | /build/spencer_people_tracking/messages/spencer_vision_msgs/catkin_generated/pkg.installspace.context.pc.py | 44a3d4ad2ccc89c32276c0ffe532f9874abd3346 | [] | no_license | scutDavid/ros_gradution_project | 6eab9a5776ae090ae8999d31e840a12a99020c79 | fbbd83ada5aa223809615d55a48e632699afd4b5 | refs/heads/master | 2020-03-07T18:39:24.084619 | 2018-04-25T13:41:04 | 2018-04-25T13:41:04 | 127,647,113 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/wwh/qqq/install/include".split(';') if "/home/wwh/qqq/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs;geometry_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "spencer_vision_msgs"
PROJECT_SPACE_DIR = "/home/wwh/qqq/install"
PROJECT_VERSION = "1.0.8"
| [
"[email protected]"
] | |
2de1274d7baaf6c2df45857c1c62f0be6aeb0e56 | e3c79bc77c660dd400e68ed498d876ec1b2a54f3 | /distances.py | 9686a61d7745b417ce403e8bcdcdd4ff25cde4cb | [] | no_license | rotolonico/image-regression | e3267e184b8ad30c3b8ce24d75b0a0b83eec9354 | edef7bf3aa9bc5b58c97e91fc4ffd3ac43ad9293 | refs/heads/master | 2022-09-11T02:23:20.387385 | 2020-06-02T17:26:08 | 2020-06-02T17:26:08 | 268,854,054 | 0 | 0 | null | 2020-06-02T16:35:45 | 2020-06-02T16:35:45 | null | UTF-8 | Python | false | false | 1,176 | py | from __future__ import division
import argparse
from skimage.io import imread
import numpy as np
from tsp_solver.greedy import solve_tsp
"""
Naively calculate a short path through the images
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'-i',
dest='image_filenames',
nargs='+',
type=str,
help='File names of input images',
required=True
)
args = arg_parser.parse_args()
images = []
for image_filename in args.image_filenames:
image = imread(image_filename, as_grey=True, plugin='pil')
if str(image.dtype) == 'uint8':
image = np.divide(image, 255.0)
images.append(image)
num_images = len(images)
differences = np.zeros((num_images, num_images))
for i, image in enumerate(images):
for j in range(i, len(images)):
other_image = images[j]
difference = ((image - other_image) ** 2).sum()
differences[i, j] = difference
differences[j, i] = difference
differences_matrix = differences.tolist()
path = solve_tsp(differences_matrix)
print(path)
ordered_image_filenames = [args.image_filenames[i] for i in path]
for filename in ordered_image_filenames:
print(filename)
| [
"[email protected]"
] | |
0c02dd809ad65b1b45b65187e2984797641b152a | 275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc | /test/test_run_operation_result.py | 858116adc0976720fb2bb7ad92ca5198c0249495 | [] | no_license | cascadiarc/cyclos-python-client | 8029ce07174f2fe92350a92dda9a60976b2bb6c2 | a2e22a30e22944587293d51be2b8268bce808d70 | refs/heads/main | 2023-04-03T16:52:01.618444 | 2021-04-04T00:00:52 | 2021-04-04T00:00:52 | 354,419,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | # coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.run_operation_result import RunOperationResult # noqa: E501
from swagger_client.rest import ApiException
class TestRunOperationResult(unittest.TestCase):
"""RunOperationResult unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRunOperationResult(self):
"""Test RunOperationResult"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.run_operation_result.RunOperationResult() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8ea9654512a97537bcda051c9e026e93673c53da | eee85a1ee54fa54e74b93bf3af8391c3f0c80b2a | /basic/python_izm01/joinpath.py | 15e64e79b249d6c2dbacd0b88e92f6391351436d | [] | no_license | ryu-0406/study-python | 8712a6e235e1ca92bb3c00ad053c8298f691108c | da10d5913de32569b2ba4bc98d9919a78e85d22a | refs/heads/master | 2022-12-14T22:43:45.236184 | 2020-09-13T03:55:36 | 2020-09-13T03:55:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | # os.join.path
import os
PROJECT_DIR = 'C:\python-izm'
SETTING_FILE = 'setting.ini'
print(os.path.join(PROJECT_DIR, SETTING_FILE))
print(os.path.join(PROJECT_DIR, 'setting_dir', SETTING_FILE))
| [
"[email protected]"
] | |
337304ee03e82971ba43476dce4568be927b4c77 | 44d1936bbc8e256534f3946f100bb0028e92fee5 | /backend/src/hatchling/builders/hooks/version.py | e139d19d9c3fdd6576fe6d2c89376873ed7d7b45 | [
"MIT"
] | permissive | pypa/hatch | aeb72e6a465a39073a020f63a931def16ce90ce8 | 7dac9856d2545393f7dd96d31fc8620dde0dc12d | refs/heads/master | 2023-09-04T04:04:25.079348 | 2023-09-03T23:48:21 | 2023-09-03T23:48:21 | 92,997,800 | 1,869 | 125 | MIT | 2023-09-13T19:39:25 | 2017-05-31T23:37:53 | Python | UTF-8 | Python | false | false | 2,362 | py | from __future__ import annotations
from typing import Any
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
from hatchling.version.core import VersionFile
class VersionBuildHook(BuildHookInterface):
PLUGIN_NAME = 'version'
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.__config_path: str | None = None
self.__config_template: str | None = None
self.__config_pattern: str | bool | None = None
@property
def config_path(self) -> str:
if self.__config_path is None:
path = self.config.get('path', '')
if not isinstance(path, str):
message = f'Option `path` for build hook `{self.PLUGIN_NAME}` must be a string'
raise TypeError(message)
elif not path:
message = f'Option `path` for build hook `{self.PLUGIN_NAME}` is required'
raise ValueError(message)
self.__config_path = path
return self.__config_path
@property
def config_template(self) -> str:
if self.__config_template is None:
template = self.config.get('template', '')
if not isinstance(template, str):
message = f'Option `template` for build hook `{self.PLUGIN_NAME}` must be a string'
raise TypeError(message)
self.__config_template = template
return self.__config_template
@property
def config_pattern(self) -> str | bool:
if self.__config_pattern is None:
pattern = self.config.get('pattern', '')
if not isinstance(pattern, (str, bool)):
message = f'Option `pattern` for build hook `{self.PLUGIN_NAME}` must be a string or a boolean'
raise TypeError(message)
self.__config_pattern = pattern
return self.__config_pattern
def initialize(self, version: str, build_data: dict[str, Any]) -> None:
version_file = VersionFile(self.root, self.config_path)
if self.config_pattern:
version_file.read(self.config_pattern)
version_file.set_version(self.metadata.version)
else:
version_file.write(self.metadata.version, self.config_template)
build_data['artifacts'].append(f'/{self.config_path}')
| [
"[email protected]"
] | |
922d426fc1139ee74ca4893cc71971950691e447 | cec68acfc0187b7d92fb7d6e5107058e3f8269ea | /OOPRehber.py | ca98535ddc792915a8aac56bbfe827b36e878c99 | [] | no_license | vektorelpython/Python8 | 441575224100a687467c4934f7c741aa0c4bd087 | d135fbf1444d56a0da38c42fd2e8feda48646f49 | refs/heads/master | 2022-01-18T12:17:40.387422 | 2019-09-07T13:47:55 | 2019-09-07T13:47:55 | 205,534,765 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,727 | py | from datetime import datetime
import os
class OOPDosya():
dosyaFormat = ".csv"
def __init__(self,**kwargs):
self.dosya = None
for key,value in kwargs.items():
if key == "dosya":
self.adres =os.getcwd()+os.sep+value+self.dosyaFormat
if key == "veriler":
self.degiskenler = value
def dosyaAcma(self):
if os.path.exists(self.adres):
kip = "r+"
else:
kip = "w+"
self.dosya = open(self.adres,kip)
def dosyaAcma2(self,adres=""):
if os.path.exists(adres):
kip = "r+"
else:
kip = "w+"
self.dosya = open(adres,kip)
def HataLog(self,Mesaj="",HataMesaji="",HataYer=""):
try:
adim = "Log1"
self.dosyaAcma2(os.getcwd()+os.sep+"hata.csv")
adim = "Log2"
hata = "{};{};{};{}\n".format(Mesaj,HataMesaji,HataYer,str(datetime.now()))
adim = "Log3"
self.dosya.read()
adim = "Log4"
self.dosya.write(hata)
except Exception as hata:
print("Log Hatası",hata,adim)
finally:
self.dosya.close()
def dosyaOkuma(self):
try:
adim = "dO1A"
self.dosyaAcma()
adim = "dO1A1"
print("-"*20)
tumListe = self.dosya.readlines()
adim = "dO1A3"
for item in tumListe:
adim = "dO1A3_for"
liste = item.split(";")
print("{}-{}-{}-{}".format(tumListe.index(item)+1,liste[0],liste[1],liste[2]))
print("-"*20)
adim = "dO1A4"
except Exception as hata:
print(adim)
self.HataLog("Dosya Okuma",hata,adim)
def dosyaSilDuzelt(self,islem = 0):
self.dosyaOkuma()
kayitNum = input("Kayıt Seçiniz")
self.dosyaAcma()
liste = self.dosya.readlines()
if islem == 0:
kayit = self.veriTopla()
liste[int(kayitNum)-1] = kayit
elif islem == 1:
liste.pop(int(kayitNum)-1)
self.dosyaKayit(liste)
print("Kayıt İşlemi Gerçekleşti")
def dosyaYazma(self):
self.dosyaAcma()
kayit = self.veriTopla()
liste = self.dosya.readlines()
liste.append(kayit)
self.dosyaKayit(liste)
print("Kayıt İşlemi Gerçekleşti")
def veriTopla(self):
kayit = ""
for item in self.degiskenler:
kayit += input(item+" Giriniz")
if self.dosyaFormat == ".csv":
kayit += ";"
else:
kayit += "\t"
return kayit
def dosyaKayit(self,liste):
self.dosya.seek(0)
self.dosya.truncate()
self.dosya.writelines(liste)
self.dosya.close()
def dosyaArama(self):
arama = input("Aramak istediğiniz metni giriniz")
self.dosyaAcma()
liste = self.dosya.readlines()
sonuc = []
for item in liste:
eleman = item.split(";")
if arama in eleman[0] or arama in eleman[1] or arama in eleman[2]:
sonuc.append(item)
for item in sonuc:
liste = item.split(";")
print("{}-{}-{}-{}".format(sonuc.index(item)+1,liste[0],liste[1],liste[2]))
def dosyaYazma(self):
self.dosyaAcma()
kayit = self.veriTopla()
liste = self.dosya.readlines()
liste.append(kayit)
self.dosya.seek(0)
self.dosya.truncate()
self.dosya.writelines(liste)
self.dosya.close()
print("Kayıt İşlemi Gerçekleşti")
def Menu(self):
adim = ""
metin = """
1 - Arama
2 - Ekleme
3 - Silme
4 - Düzeltme
5 - Listeleme
6 - Çıkış
"""
while True:
print(metin)
try:
islem = int(input("İşlem Seçiniz"))
if islem == 1:
self.dosyaArama()
elif islem == 2:
self.dosyaYazma()
elif islem == 3:
self.dosyaSilDuzelt(1)
elif islem == 4:
self.dosyaSilDuzelt()
elif islem == 5:
adim = "AnaI5A"
self.dosyaOkuma()
adim = "AnaI5B"
elif islem == 6:
break
except Exception as hata:
self.HataLog("Ana Menü",hata,adim)
if __name__=="__main__":
defter = OOPDosya(dosya ="banka", veriler=["Adı","Soyadı","Banka Hesap No","Bakiye"])
defter.Menu() | [
"Kurs"
] | Kurs |
0077d6ec93d6be2f47c4029e0d2c475b5640c47f | 3a4fbde06794da1ec4c778055dcc5586eec4b7d2 | /@lib/12-13-2011-01/vyperlogix/ssl/__init__.py | eb74e37cfe6c8bc07909756a2517d38c03690188 | [] | no_license | raychorn/svn_python-django-projects | 27b3f367303d6254af55c645ea003276a5807798 | df0d90c72d482b8a1e1b87e484d7ad991248ecc8 | refs/heads/main | 2022-12-30T20:36:25.884400 | 2020-10-15T21:52:32 | 2020-10-15T21:52:32 | 304,455,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | __copyright__ = """\
(c). Copyright 2008-2014, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
def fetch_from_web(url):
import requests
from vyperlogix.misc import _utils
ioBuf = _utils.stringIO()
request = requests.get(url)
for block in request.iter_content(1024):
if not block:
break
print >>ioBuf, block
return ioBuf.getvalue()
def fetch_from_ssl(url):
return fetch_from_web(url)
| [
"[email protected]"
] | |
3ecf8dfe7d9571bffd4b64faea23576dd474e7a9 | 14fc2ee47e1081416f0465e8afa18da33169095f | /src/PP4E/Internet/Other/http-getfile.py | 457616b78f1fe71aee22b13b3dcdf0b7839d82a9 | [] | no_license | madtyn/progPython | d95ea8021b1a54433e7b73de9d3b11d53a3096b7 | f3a1169149afdeb5191dd895462139f60d21d458 | refs/heads/master | 2021-07-09T13:35:27.519439 | 2017-10-04T14:46:57 | 2017-10-04T14:46:57 | 104,866,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | """
fetch a file from an HTTP (web) server over sockets via http.client; the filename
parameter may have a full directory path, and may name a CGI script with ? query
parameters on the end to invoke a remote program; fetched file data or remote
program output could be saved to a local file to mimic FTP, or parsed with str.find
or html.parser module; also: http.client request(method, url, body=None, hdrs={});
"""
import sys, http.client
showlines = 6
try:
servername, filename = sys.argv[1:] # cmdline args?
except:
servername, filename = 'learning-python.com', '/index.html'
print(servername, filename)
server = http.client.HTTPConnection(servername) # connect to http site/server
server.putrequest('GET', filename) # send request and headers
server.putheader('Accept', 'text/html') # POST requests work here too
server.endheaders() # as do CGI script filenames
reply = server.getresponse() # read reply headers + data
if reply.status != 200: # 200 means success
print('Error sending request', reply.status, reply.reason)
else:
data = reply.readlines() # file obj for data received
reply.close() # show lines with eoln at end
for line in data[:showlines]: # to save, write data to file
print(line) # line already has \n, but bytes
| [
"[email protected]"
] | |
c9a63f38eca1a4e6e50a9780623a331040a75f93 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/xuo.py | 1917c03cb3041115ab9ff752721092b0a98fb74e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'xuO':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
e6de6fda316d44315778acf690209b6163af6b15 | dd3d31d48f1fae5fabc8355d83046c731e306612 | /google_api/google-cloud-sdk/lib/surface/compute/health_checks/update/ssl.py | d4bf2cbca6783ab1fbb8b4ef667588004dca5559 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | cjholz/Stutter_Classification | ea2aeea10c57a2525447285a37fe0735b89fd52c | 32cd5f793bf6cf8e231ae2048b7b2dbb0592b21a | refs/heads/master | 2020-06-04T02:11:08.252524 | 2019-06-13T20:40:09 | 2019-06-13T20:40:09 | 191,828,461 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,482 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for updating health checks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import health_checks_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute.health_checks import flags
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class Update(base.UpdateCommand):
"""Update a SSL health check.
*{command}* is used to update an existing SSL health check. Only
arguments passed in will be updated on the health check. Other
attributes will remain unaffected.
"""
HEALTH_CHECK_ARG = None
@classmethod
def Args(cls, parser):
cls.HEALTH_CHECK_ARG = flags.HealthCheckArgument('SSL')
cls.HEALTH_CHECK_ARG.AddArgument(parser, operation_type='update')
health_checks_utils.AddTcpRelatedUpdateArgs(parser)
health_checks_utils.AddProtocolAgnosticUpdateArgs(parser, 'SSL')
def _GetGetRequest(self, client, health_check_ref):
"""Returns a request for fetching the existing health check."""
return (client.apitools_client.healthChecks,
'Get',
client.messages.ComputeHealthChecksGetRequest(
healthCheck=health_check_ref.Name(),
project=health_check_ref.project))
def _GetSetRequest(self, client, health_check_ref, replacement):
"""Returns a request for updating the health check."""
return (client.apitools_client.healthChecks,
'Update',
client.messages.ComputeHealthChecksUpdateRequest(
healthCheck=health_check_ref.Name(),
healthCheckResource=replacement,
project=health_check_ref.project))
def Modify(self, client, args, existing_check):
"""Returns a modified HealthCheck message."""
# We do not support using 'update ssl' with a health check of a
# different protocol.
if (existing_check.type !=
client.messages.HealthCheck.TypeValueValuesEnum.SSL):
raise core_exceptions.Error(
'update ssl subcommand applied to health check with protocol ' +
existing_check.type.name)
# Description, PortName, Request, and Response are the only attributes that
# can be cleared by passing in an empty string (but we don't want to set it
# to an empty string).
if args.description:
description = args.description
elif args.description is None:
description = existing_check.description
else:
description = None
port, port_name, port_specification = health_checks_utils. \
HandlePortRelatedFlagsForUpdate(args, existing_check.sslHealthCheck)
if args.request:
request = args.request
elif args.request is None:
request = existing_check.sslHealthCheck.request
else:
request = None
if args.response:
response = args.response
elif args.response is None:
response = existing_check.sslHealthCheck.response
else:
response = None
proxy_header = existing_check.sslHealthCheck.proxyHeader
if args.proxy_header is not None:
proxy_header = client.messages.SSLHealthCheck.ProxyHeaderValueValuesEnum(
args.proxy_header)
new_health_check = client.messages.HealthCheck(
name=existing_check.name,
description=description,
type=client.messages.HealthCheck.TypeValueValuesEnum.SSL,
sslHealthCheck=client.messages.SSLHealthCheck(
request=request,
response=response,
port=port,
portName=port_name,
portSpecification=port_specification,
proxyHeader=proxy_header),
checkIntervalSec=(args.check_interval or
existing_check.checkIntervalSec),
timeoutSec=args.timeout or existing_check.timeoutSec,
healthyThreshold=(args.healthy_threshold or
existing_check.healthyThreshold),
unhealthyThreshold=(args.unhealthy_threshold or
existing_check.unhealthyThreshold),
)
return new_health_check
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
health_checks_utils.CheckProtocolAgnosticArgs(args)
args_unset = not (args.port or args.check_interval or args.timeout or
args.healthy_threshold or args.unhealthy_threshold or
args.proxy_header or args.use_serving_port)
if (args.description is None and args.request is None and
args.response is None and args.port_name is None and args_unset):
raise exceptions.ToolException('At least one property must be modified.')
health_check_ref = self.HEALTH_CHECK_ARG.ResolveAsResource(
args, holder.resources)
get_request = self._GetGetRequest(client, health_check_ref)
objects = client.MakeRequests([get_request])
new_object = self.Modify(client, args, objects[0])
# If existing object is equal to the proposed object or if
# Modify() returns None, then there is no work to be done, so we
# print the resource and return.
if objects[0] == new_object:
log.status.Print(
'No change requested; skipping update for [{0}].'.format(
objects[0].name))
return objects
return client.MakeRequests(
[self._GetSetRequest(client, health_check_ref, new_object)])
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class UpdateAlpha(Update):
"""Update a SSL health check.
*{command}* is used to update an existing SSL health check. Only
arguments passed in will be updated on the health check. Other
attributes will remain unaffected.
"""
HEALTH_CHECK_ARG = None
@classmethod
def Args(cls, parser):
cls.HEALTH_CHECK_ARG = flags.HealthCheckArgument(
'SSL', include_l7_internal_load_balancing=True)
cls.HEALTH_CHECK_ARG.AddArgument(parser, operation_type='update')
health_checks_utils.AddTcpRelatedUpdateArgs(parser)
health_checks_utils.AddProtocolAgnosticUpdateArgs(parser, 'SSL')
def _GetRegionalGetRequest(self, client, health_check_ref):
"""Returns a request for fetching the existing health check."""
return (client.apitools_client.regionHealthChecks, 'Get',
client.messages.ComputeRegionHealthChecksGetRequest(
healthCheck=health_check_ref.Name(),
project=health_check_ref.project,
region=health_check_ref.region))
def _GetRegionalSetRequest(self, client, health_check_ref, replacement):
"""Returns a request for updating the health check."""
return (client.apitools_client.regionHealthChecks, 'Update',
client.messages.ComputeRegionHealthChecksUpdateRequest(
healthCheck=health_check_ref.Name(),
healthCheckResource=replacement,
project=health_check_ref.project,
region=health_check_ref.region))
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
health_checks_utils.CheckProtocolAgnosticArgs(args)
args_unset = not (args.port or args.check_interval or args.timeout or
args.healthy_threshold or args.unhealthy_threshold or
args.proxy_header or args.use_serving_port)
if (args.description is None and args.request is None and
args.response is None and args.port_name is None and args_unset):
raise exceptions.ToolException('At least one property must be modified.')
health_check_ref = self.HEALTH_CHECK_ARG.ResolveAsResource(
args, holder.resources)
if health_checks_utils.IsRegionalHealthCheckRef(health_check_ref):
get_request = self._GetRegionalGetRequest(client, health_check_ref)
else:
get_request = self._GetGetRequest(client, health_check_ref)
objects = client.MakeRequests([get_request])
new_object = self.Modify(client, args, objects[0])
# If existing object is equal to the proposed object or if
# Modify() returns None, then there is no work to be done, so we
# print the resource and return.
if objects[0] == new_object:
log.status.Print('No change requested; skipping update for [{0}].'.format(
objects[0].name))
return objects
if health_checks_utils.IsRegionalHealthCheckRef(health_check_ref):
set_request = self._GetRegionalSetRequest(client, health_check_ref,
new_object)
else:
set_request = self._GetSetRequest(client, health_check_ref, new_object)
return client.MakeRequests([set_request])
| [
"[email protected]"
] | |
a5c236042dbe21c66ff0f9cabec1cdaf04ff4535 | 24353bdd2695f7d277f00f1397b2fcc06a1413fe | /omsdk/http/sdkrestpdu.py | 1ef2eabd17449d6aab16f19fd08a9be4b26d8016 | [
"Apache-2.0",
"GPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | DanielFroehlich/omsdk | f129eb43a9335e29a9b1bb01b29a5c886138ea82 | 475d925e4033104957fdc64480fe8f9af0ab6b8a | refs/heads/master | 2020-04-10T00:16:48.223458 | 2018-12-06T14:00:33 | 2018-12-06T14:00:33 | 160,680,831 | 0 | 0 | Apache-2.0 | 2018-12-06T13:46:07 | 2018-12-06T13:46:07 | null | UTF-8 | Python | false | false | 4,655 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Vaideeswaran Ganesan
#
import subprocess
import io
from xml.dom.minidom import parse
import xml.dom.minidom
import json
import re
import uuid
import sys
import xml.etree.ElementTree as ET
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
class RestRequest:
envAttrs = {
'xmlns:enc': 'http://www.w3.org/2003/05/soap-encoding',
'xmlns:env': 'http://www.w3.org/2003/05/soap-envelope',
'xmlns:tns': 'http://schemas.microsoft.com/wmx/2005/06',
# xmlns:a = xmlns:wsa
'xmlns:a': 'http://schemas.xmlsoap.org/ws/2004/08/addressing',
'xmlns:wse': 'http://schemas.xmlsoap.org/ws/2004/08/eventing',
# xmlns:n = xmlns:wsen
'xmlns:n': 'http://schemas.xmlsoap.org/ws/2004/09/enumeration',
# xmlns:w = xmlns:wsman
'xmlns:w': 'http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd',
# xmlns:b = xmlns:wsmb
'xmlns:b': 'http://schemas.dmtf.org/wbem/wsman/1/cimbinding.xsd',
'xmlns:wsmid': 'http://schemas.dmtf.org/wbem/wsman/identity/1/wsmanidentity.xsd',
# xmlns:x = xmlns:wxf
'xmlns:x': 'http://schemas.xmlsoap.org/ws/2004/09/transfer',
'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xmlns:p': 'http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd',
}
def __init__(self):
self.root = {}
self.selector = None
def enumerate(self, to, ruri, selectors, envSize=512000, mid=None, opTimeout=60):
return self
def set_header(self, to, ruri, action, envSize=512000, mid=None, opTimeout=60):
return self
def add_selectors(self, selectors):
return self
def add_body(self, ruri, action, args):
self.root = {}
sample = {
"ExportFormat": "XML",
"ShareParameters": {
"Target": "ALL",
"IPAddress": "10.9.9.9",
"ShareName": "sammba",
"ShareType": 0,
"UserName": "root",
"Password": "calvin",
"FileName": "/root/file.xml",
}
}
for i in args:
self.root[i] = str(args[i])
return self
def add_error(self, ex):
self.root = {
"Body": {
"ClientFault": {
"Reason": {
"Text": str(ex)
}
}
}
}
return self
def identify(self):
return self
def get_text(self):
return json.dumps(self.root)
class RestResponse:
def __init__(self):
pass
def strip_ns(self, s, stripNS):
return (re.sub(".*:", "", s) if stripNS else s)
def execute_str(self, value, stripNS=True):
return json.loads(value)
def get_message(self, fault):
msg = None
while fault != None and msg == None:
if not isinstance(fault, dict):
msg = fault
elif "Message" in fault:
if isinstance(fault["Message"], dict):
fault = fault["Message"]
else:
msg = fault["Message"]
elif "WSManFault" in fault:
fault = fault["WSManFault"]
else:
for field in fault:
if field.startswith("Fault"):
m = self.get_message(fault[field])
if not m is None:
msg = m
break
elif field == "Text":
msg = fault[field]
return msg
| [
"[email protected]"
] | |
23e3531802e99bc3c6eea2f5b5c543aaa4bbbc7b | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/p_w.py | 08a9ccc7fea6e95530819e5611c84363b105e4b6 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'p_W':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
ecae9cbc32b6ec9ff2a4302c98197f778c67bb43 | b3d86713ed58e0b7fe3c1191324e36659c0d9d78 | /RegressionProgram/Testcode/kmeans/kmeans_test1.py | 6cd86f98bfc94b55fd5f3dc719af5851cedc2e96 | [] | no_license | Kose-i/machine_learning_tutorial | 3d6cb30a20d65c66aa6efcba0e693de75791507a | def223fecb459ad1a6e7f9f36b3d733a89efd378 | refs/heads/master | 2021-07-03T10:37:26.809388 | 2020-07-27T12:53:19 | 2020-07-27T12:53:19 | 174,057,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import kmeans
np.random.seed(0)
points1 = np.random.randn(50,2)
points2 = np.random.randn(50,2) + np.array([5,0])
points3 = np.random.randn(50,2) + np.array([5,5])
points = np.r_[points1, points2, points3]
np.random.shuffle(points)
model = kmeans.KMeans(3)
model.fit(points)
markers = ["+", "*", "o"]
for i in range(3):
p = points[model.labels_ == i, :]
plt.scatter(p[:,0], p[:,1], color="k", marker=markers[i])
plt.show()
| [
"[email protected]"
] | |
4e86b144cd367d4da2d02b1dfe5f04ed00b40944 | f97c13830f216cd58748bc78186634d02aed5e57 | /consolelogs/apps.py | 8d68002248fb77502bf33ed2554edc4a645fdde0 | [
"MIT"
] | permissive | stephanpoetschner/demo-whitehats | d835aa7fa1aa742519d368bb88ff685c97660756 | 0bd8ccd75f37129ac3ad82949a6899aa7b706b90 | refs/heads/master | 2020-03-13T20:20:00.726739 | 2018-05-04T07:40:45 | 2018-05-04T07:40:45 | 131,271,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from django.apps import AppConfig
class ConsolelogsConfig(AppConfig):
name = 'consolelogs'
| [
"[email protected]"
] | |
a3275208520062eedf2819e57546d3a412078bde | f1a5905649c19688f2d01998da805dbdd5b73a5d | /supervised_learning/0x06-keras/7-train.py.bak | 834bd89ded9a75a8f806b9a74ca732608ed8c39f | [] | no_license | Aishaharrash/holbertonschool-machine_learning | 4d388daab993848b8c354af33478e14b04a6ef25 | 8a3792da58e6102293cd2b4aadc938c264ec3928 | refs/heads/main | 2023-06-07T07:16:37.643164 | 2021-07-09T19:14:47 | 2021-07-09T19:14:47 | 358,292,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,430 | bak | #!/usr/bin/env python3
"""
7-train.py
Module that defines a function called train_model
"""
import tensorflow.keras as K
def train_model(network, data, labels, batch_size, epochs,
validation_data=None, early_stopping=False,
patience=0, learning_rate_decay=False, alpha=0.1,
decay_rate=1, verbose=True, shuffle=False):
"""
Function that trains a model using mini-batch gradient descent
Args:
network (keras model): model to train
data (np.ndarray): matrix of shape (m, nx) containing the input data
labels (np.ndarray): one hot matrix of shape (m, classes) containing
the labels of data
batch_size (int): size of the batch used for mini-batch gradient
descent
epochs (int): number of passes through data for mini-batch gradient
descent
validation_data (tuple): data to validate the model with, if not None
early_stopping(bool): indicates whether early stopping should be used
patient (int): the patience used for early stopping
learning_rate_decay (bool): indicates whether learning rate decay
should be used
alpha (float): learning rate
decay_rate (int): the decay rate
verbose (bool): determines if output should be printed during training
shuffle (bool): determines whether to shuffle the batches every epoch
Returns:
The History object generated after training the model.
"""
def step_decay(epoch):
"""Function that calculates the step decay"""
return alpha / (1 + decay_rate * epoch)
callbacks = []
if validation_data and learning_rate_decay:
callbacks.append(K.callbacks.LearningRateScheduler(step_decay,
verbose=1))
if validation_data and early_stopping:
callbacks.append(K.callbacks.EarlyStopping(monitor="val_loss",
patience=patience))
return network.fit(x=data,
y=labels,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
validation_data=validation_data,
shuffle=shuffle,
callbacks=callbacks)
| [
"vagrant@precise32.(none)"
] | vagrant@precise32.(none) |
cc1137e006408026a166b891fd18a04af1c92d3a | c0b10aa2dbb20d916cf77c31aab9f27c3003ecdb | /constructbinarysearchtreefrompreordertraversal1008.py | e8b70142385656e327035da093e1706b8af6380d | [] | no_license | cc13ny/LeetcodePractices | f6ba4881ebaa6d739cc01217d21653ae971f837d | 95f3344a14e10a8ba7816632a6d2177c6c81b8a3 | refs/heads/master | 2021-01-14T14:38:12.559455 | 2020-02-24T03:59:38 | 2020-02-24T03:59:38 | 242,645,175 | 0 | 0 | null | 2020-02-24T04:19:18 | 2020-02-24T04:19:17 | null | UTF-8 | Python | false | false | 2,259 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def bstFromPreorder(self, preorder):
#tree construction. insertion
if preorder == []:
return None
self.root = TreeNode(preorder[0])
for i in range(1, len(preorder),1):
self.insert(preorder[i])
return self.root
def insert(self,newItem):
pre = self.root
while pre:
if pre.val > newItem:
preP = pre
pre = pre.left
elif pre.val < newItem:
preP = pre
pre = pre.right
currentNode = TreeNode(newItem)
if preP.val > newItem:
preP.left = currentNode
else:
preP.right = currentNode
class Solution1:
def bstFromPreorder(self,preorder):
#recursive solution.
self.index = 0
def helper(lower = float('-inf'), upper = float('inf')):
if self.index>= len(preorder) or preorder[self.index]> upper or preorder[self.index] < lower:
return None
root = TreeNode(preorder[self.index])
self.index += 1
root.left = helper(lower, root.val)
root.right = helper(root.val, upper)
return root
return helper()
class Solution2:
def bstFromPreorder(self,preorder):
#iterative solution written by self.
if preorder == []:
return None
root = TreeNode(preorder[0])
stackList = [root]
for i in range(1, len(preorder)):
currentNode = TreeNode(preorder[i])
if stackList!=[] and preorder[i] > stackList[-1].val:
while stackList and currentNode.val > stackList[-1].val:
last = stackList.pop(-1)
last.right = currentNode
stackList.append(currentNode)
elif stackList!=[] and preorder[i] < stackList[-1].val:
stackList[-1].left = currentNode
stackList.append(currentNode)
elif stackList == []:
stackList.append(currentNode)
return root | [
"[email protected]"
] | |
4177d7077e1bed4f0b9976c80926b62327c04b29 | 62ccdb11daefaecc8e63f235c7519cc7594f705a | /images/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/command_lib/compute/sole_tenancy/node_groups/flags.py | e1dc5a919cd87800bfb7e546356817c7a02ba3cf | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | hiday1979/kalabasa-mas | eccc869bfe259bb474f9d2a4dc4b8561a481f308 | 53a9818eb2a6f35ee57c4df655e7abaaa3e7ef5b | refs/heads/master | 2021-07-05T16:34:44.962142 | 2018-07-10T10:22:24 | 2018-07-10T10:22:24 | 129,709,974 | 0 | 1 | null | 2020-07-24T22:15:29 | 2018-04-16T08:27:13 | Python | UTF-8 | Python | false | false | 2,212 | py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags for the `compute sole-tenancy node-groups` commands."""
from __future__ import absolute_import
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.command_lib.compute import flags as compute_flags
def MakeNodeGroupArg():
return compute_flags.ResourceArgument(
resource_name='node group',
zonal_collection='compute.nodeGroups',
zone_explanation=compute_flags.ZONE_PROPERTY_EXPLANATION)
def AddNoteTemplateFlagToParser(parser, required=True):
parser.add_argument(
'--node-template',
required=required,
help='The name of the node template resource to be set for this node '
'group.')
def AddCreateArgsToParser(parser):
"""Add flags for creating a node group to the argument parser."""
parser.add_argument(
'--description',
help='An optional description of this resource.')
AddNoteTemplateFlagToParser(parser)
parser.add_argument(
'--target-size',
required=True,
type=int,
help='The target initial number of nodes in the node group.')
def AddUpdateArgsToParser(parser):
"""Add flags for updating a node group to the argument parser."""
update_node_count_group = parser.add_group(mutex=True)
update_node_count_group.add_argument(
'--add-nodes',
type=int,
help='The number of nodes to add to the node group.')
update_node_count_group.add_argument(
'--delete-nodes',
metavar='NODE_INDEX',
type=arg_parsers.ArgList(element_type=int),
help='The indexes of the nodes to remove from the group.')
AddNoteTemplateFlagToParser(parser, required=False)
| [
"[email protected]"
] | |
362a7f37cbf41cc98839d61d62cb53d2eb8d9c2e | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/resolve/NumpyDocstringAttributeNameResolvesToInheritedClassAttribute.py | 63f5ca2af921064b5e58ab99eb759e16d26333fb | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 226 | py | class Foo:
"""
Attributes
----------
bar
Something cool
"""
bar = 1
class Baz(Foo):
"""
Attributes
----------
bar
<ref>
Re-documented but does exist still.
""" | [
"[email protected]"
] | |
84c352a89e220a60388723a844f438c3b940417c | caf8cbcafd448a301997770165b323438d119f5e | /.history/chapter01/python_05_if_condition_20201128215853.py | 959c6f854e6f730e5993c42e442d673bb15cbb06 | [
"MIT"
] | permissive | KustomApe/nerdape | 03e0691f675f13ce2aefa46ee230111247e90c72 | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | refs/heads/main | 2023-01-23T10:13:26.584386 | 2020-11-28T22:29:49 | 2020-11-28T22:29:49 | 309,897,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | """[if文について]
もし〜だったら、こうして
"""
# if 条件:
# 実行するブロック
# 条件によって処理を適応したい場合
# 3000kmごとにオイル交換しないといけない
distance = 3403
# if distance > 3000:
# print('オイル交換時期です')
# 文字列を比較する/リストを比較する
# if 'abc' == 'ABC':
# print('1同類です')
# if 'CDE' == 'CDE':
# print('2同類です')
# if 'あいうえお' == 'あいうえお':
# print('3同類です')
# if ['apple', 'banana'] == ['apple', 'banana']:
# print('1リスト同類')
# if ['apple', 'banana'] == ['APPLE', 'BANANA']:
# print('2リスト同類')
# if [1, 2, 3] == ['1', '2', '3']:
# print('3リスト同類')
# if [1, 2, 3] == [1, 2, 3]:
# print('4リスト同類')
# 文字列を検索する/リストの要素を検索する
if 'abc' in 'ABC':
print('1ヒットしました!')
if 'ドリフト' in '僕はドリフトが好きです':
print('2ヒットしました!')
if 'japan' in 'japanese domestic market vehicle':
print('3ヒットしました!')
if 12 in [12, 3, 4]:
print('1あります!')
if 345 in [3, 4, 5]:
print('2あります!')
# else文
# elif文
| [
"[email protected]"
] | |
8b09045d5961b139ce54084cacb2092c4503929d | 352b4d34a5d6f9b5fb6949f92f32cb1154c738c3 | /bin/quota-alignment/scripts/bed_utils.py | 52af9c802e37b56f44eb4667f6be7ef0494dfab8 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | LyonsLab/coge | f03d812273a5ff18880c61663a4114f3cbc6d3d5 | 1d9a8e84a8572809ee3260ede44290e14de3bdd1 | refs/heads/master | 2022-01-21T22:29:57.269859 | 2021-12-21T19:56:46 | 2021-12-21T19:56:46 | 11,158,196 | 41 | 24 | null | 2017-02-03T20:57:44 | 2013-07-03T18:28:31 | Perl | UTF-8 | Python | false | false | 3,433 | py | """
Classes to handle the .bed file and .raw file
"""
# get the gene order given a Bed object
get_order = lambda bed: dict((f['accn'], (i, f)) for (i, f) in enumerate(bed))
class BedLine(object):
# the Bed format supports more columns. we only need
# the first 4, but keep the information in 'stuff'.
__slots__ = ("seqid", "start", "end", "accn", "stuff")
def __init__(self, sline):
args = sline.strip().split("\t")
self.seqid = args[0]
self.start = int(args[1])
self.end = int(args[2])
self.accn = args[3]
self.stuff = args[4:] if len(args) > 4 else None
def __str__(self):
s = "\t".join(map(str, [getattr(self, attr) \
for attr in BedLine.__slots__[:-1]]))
if self.stuff:
s += "\t" + "\t".join(self.stuff)
return s
def __getitem__(self, key):
return getattr(self, key)
class Bed(object):
def __init__(self, filename):
self.filename = filename
self.beds = []
for line in open(filename):
if line[0] == "#": continue
if line.startswith('track'): continue
self.beds.append(BedLine(line))
self.seqids = sorted(set(b.seqid for b in self.beds))
self.beds.sort(key=lambda a: (a.seqid, a.start, a.accn))
def __getitem__(self, i):
return self.beds[i]
def __len__(self):
return len(self.beds)
def __iter__(self):
for b in self.beds:
yield b
def get_order(self):
return dict((f.accn, (i, f)) for (i, f) in enumerate(self))
def get_simple_bed(self):
return [(b.seqid, i) for (i, b) in enumerate(self)]
class RawLine(object):
__slots__ = ("seqid_a", "pos_a", "seqid_b", "pos_b", "score")
def __init__(self, sline):
args = sline.strip().split("\t")
self.seqid_a = args[0]
self.pos_a = int(args[1])
self.seqid_b = args[2]
self.pos_b = int(args[3])
self.score = int(args[4])
def __str__(self):
return "\t".join(map(str, [getattr(self, attr) \
for attr in RawLine.__slots__]))
def __getitem__(self, key):
return getattr(self, key)
class Raw(list):
def __init__(self, filename):
self.filename = filename
for line in open(filename):
if line[0] == "#": continue
self.append(RawLine(line))
class BlastLine(object):
__slots__ = ('query', 'subject', 'pctid', 'hitlen', 'nmismatch', 'ngaps', \
'qstart', 'qstop', 'sstart', 'sstop', 'evalue', 'score', \
'qseqid', 'sseqid', 'qi', 'si')
def __init__(self, sline):
args = sline.split("\t")
self.query = args[0]
self.subject = args[1]
self.pctid = float(args[2])
self.hitlen = int(args[3])
self.nmismatch = int(args[4])
self.ngaps = int(args[5])
self.qstart = int(args[6])
self.qstop = int(args[7])
self.sstart = int(args[8])
self.sstop = int(args[9])
self.evalue = float(args[10])
self.score = float(args[11])
def __repr__(self):
return "BlastLine('%s' to '%s', eval=%.3f, score=%.1f)" % \
(self.query, self.subject, self.evalue, self.score)
def __str__(self):
return "\t".join(map(str, [getattr(self, attr) \
for attr in BlastLine.__slots__][:-4]))
| [
"[email protected]"
] | |
dfe9d224723292a459b863bc082a0024999c5ff6 | f971b59661f080752f5ff09daf1afc6eed855c25 | /genteams.py | da3c97bf653d54f382f847d46da7b92f8745b586 | [] | no_license | firstwiki/_scripts | ec11790e788e6627612711f018108d872e0edde0 | c7471955d4dc2368489c5270cbf05a8db714aec0 | refs/heads/master | 2020-04-06T04:24:33.468256 | 2018-05-12T06:52:12 | 2018-05-12T06:52:12 | 56,561,704 | 1 | 2 | null | 2016-12-18T00:13:13 | 2016-04-19T03:28:46 | Python | UTF-8 | Python | false | false | 4,972 | py | #!/usr/bin/env python
from collections import OrderedDict
import csv
import sys
import os
from os.path import abspath, dirname, exists, join
import optparse
import frontmatter
import code_from_gh
import yaml
def read_team_csv(csv_fname):
with open(csv_fname) as fp:
reader = csv.reader(fp)
for row in reader:
yield [r.strip() for r in row]
def add_maybe(d, f, v):
if not v:
if f not in d:
d[f] = None
else:
d[f] = v
def add_maybe_web(d, k, nv):
if nv:
v = d.get(k)
if v is None or v.lower().strip('/') != nv.lower().strip('/'):
d[k] = nv
def main():
# input is teams csv datafile from TBA
# -> https://github.com/the-blue-alliance/the-blue-alliance-data
csv_fname = abspath(sys.argv[1])
max_team = int(sys.argv[2])
mode = sys.argv[3]
if mode not in ['new', 'update']:
print("Error: invalid mode")
return
os.chdir(abspath(join(dirname(__file__), '..')))
cwd = os.getcwd()
for row in read_team_csv(csv_fname):
# this changes on occasion...
number, name, sponsors, l1, l2, l3, website, rookie_year, \
facebook, twitter, youtube, github, instagram, periscope = row
name = name
rookie_year = rookie_year
if rookie_year:
rookie_year = int(rookie_year)
number = number[3:]
if int(number) > max_team:
continue
d1 = '%04d' % (int(int(number)/1000)*1000,)
d2 = '%03d' % (int(int(number)/100)*100,)
f = join(cwd, 'frc%s' % d1, '_frc', d2, '%s.md' % number)
if mode == 'new' and exists(f):
continue
if 'firstinspires' in website:
website = ''
if l3:
location = '%s, %s, %s' % (l1, l2, l3)
elif l2:
location = '%s, %s' % (l1, l2)
else:
location = l1
sponsors = [s.strip() for s in sponsors.split('/')]
if sponsors == ['']:
sponsors = None
else:
if '&' in sponsors[-1]:
sN = sponsors[-1].split('&')
del sponsors[-1]
sponsors += [s.strip() for s in sN]
if mode == 'update':
try:
fm = frontmatter.load(f)
except:
print("Error at %s" % f)
raise
reformatted = str(frontmatter.dumps(fm))
if 'team' not in fm.metadata:
raise Exception("Error in %s" % f)
team = fm.metadata['team']
if 'links' not in fm.metadata['team']:
links = OrderedDict()
else:
links = fm.metadata['team']['links']
else:
data = OrderedDict()
team = OrderedDict()
links = OrderedDict()
data['title'] = 'FRC Team %s' % number
data['team'] = team
team['type'] = 'FRC'
team['number'] = int(number)
add_maybe(team, 'name', name)
add_maybe(team, 'rookie_year', rookie_year)
add_maybe(team, 'location', location)
if sponsors and mode != 'update':
team['sponsors'] = sponsors
if 'Github' in links:
links['GitHub'] = links['Github']
del links['Github']
add_maybe_web(links, 'Website', website)
add_maybe_web(links, 'Facebook', facebook)
add_maybe_web(links, 'Twitter', twitter)
add_maybe_web(links, 'YouTube', youtube)
add_maybe_web(links, 'GitHub', github)
add_maybe_web(links, 'Instagram', instagram)
add_maybe_web(links, 'Periscope', periscope)
if mode == 'update':
if links:
fm.metadata['team']['links'] = links
if fm.content.strip() == 'No content has been added for this team':
fm.content = '{% include remove_this_line_and_add_a_paragraph %}'
page = str(frontmatter.dumps(fm))
if reformatted == page:
# don't make gratuitious changes
continue
elif mode == 'new':
if links:
team['links'] = links
page = '---\n%s\n---\n\n{%% include remove_this_line_and_add_a_paragraph %%}\n' % (
yaml.safe_dump(data)
)
# roundtrip through frontmatter to get the formatting consistent
page = frontmatter.dumps(frontmatter.loads(page))
if not exists(dirname(f)):
os.makedirs(dirname(f))
with open(f, 'w') as fp:
fp.write(page)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0bfc3d132f23f8bb46e283bf3c17aac860f485f1 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/errorlog_for_lts_request.py | 7d39c203c27e140395863f4f45c45340b5c45009 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 8,936 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ErrorlogForLtsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'start_time': 'str',
'end_time': 'str',
'level': 'str',
'line_num': 'str',
'limit': 'int',
'search_type': 'str'
}
attribute_map = {
'start_time': 'start_time',
'end_time': 'end_time',
'level': 'level',
'line_num': 'line_num',
'limit': 'limit',
'search_type': 'search_type'
}
def __init__(self, start_time=None, end_time=None, level=None, line_num=None, limit=None, search_type=None):
"""ErrorlogForLtsRequest
The model defined in huaweicloud sdk
:param start_time: 开始日期,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:type start_time: str
:param end_time: 结束时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。只能查询当前时间前一个月内的慢日志。
:type end_time: str
:param level: 日志级别,默认为ALL。
:type level: str
:param line_num: 日志单行序列号,第一次查询时不需要此参数,后续分页查询时需要使用,可从上次查询的返回信息中获取。line_num应在start_time和end_time之间。
:type line_num: str
:param limit: 每页多少条记录(查询结果),取值范围是1~100,不填时默认为10。
:type limit: int
:param search_type: 搜索方式。默认forwards。配合line_num使用,以line_num为起点,向前搜索或向后搜索。
:type search_type: str
"""
self._start_time = None
self._end_time = None
self._level = None
self._line_num = None
self._limit = None
self._search_type = None
self.discriminator = None
self.start_time = start_time
self.end_time = end_time
if level is not None:
self.level = level
if line_num is not None:
self.line_num = line_num
if limit is not None:
self.limit = limit
if search_type is not None:
self.search_type = search_type
@property
def start_time(self):
"""Gets the start_time of this ErrorlogForLtsRequest.
开始日期,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:return: The start_time of this ErrorlogForLtsRequest.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this ErrorlogForLtsRequest.
开始日期,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:param start_time: The start_time of this ErrorlogForLtsRequest.
:type start_time: str
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this ErrorlogForLtsRequest.
结束时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。只能查询当前时间前一个月内的慢日志。
:return: The end_time of this ErrorlogForLtsRequest.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ErrorlogForLtsRequest.
结束时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。只能查询当前时间前一个月内的慢日志。
:param end_time: The end_time of this ErrorlogForLtsRequest.
:type end_time: str
"""
self._end_time = end_time
@property
def level(self):
"""Gets the level of this ErrorlogForLtsRequest.
日志级别,默认为ALL。
:return: The level of this ErrorlogForLtsRequest.
:rtype: str
"""
return self._level
@level.setter
def level(self, level):
"""Sets the level of this ErrorlogForLtsRequest.
日志级别,默认为ALL。
:param level: The level of this ErrorlogForLtsRequest.
:type level: str
"""
self._level = level
@property
def line_num(self):
"""Gets the line_num of this ErrorlogForLtsRequest.
日志单行序列号,第一次查询时不需要此参数,后续分页查询时需要使用,可从上次查询的返回信息中获取。line_num应在start_time和end_time之间。
:return: The line_num of this ErrorlogForLtsRequest.
:rtype: str
"""
return self._line_num
@line_num.setter
def line_num(self, line_num):
"""Sets the line_num of this ErrorlogForLtsRequest.
日志单行序列号,第一次查询时不需要此参数,后续分页查询时需要使用,可从上次查询的返回信息中获取。line_num应在start_time和end_time之间。
:param line_num: The line_num of this ErrorlogForLtsRequest.
:type line_num: str
"""
self._line_num = line_num
@property
def limit(self):
"""Gets the limit of this ErrorlogForLtsRequest.
每页多少条记录(查询结果),取值范围是1~100,不填时默认为10。
:return: The limit of this ErrorlogForLtsRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ErrorlogForLtsRequest.
每页多少条记录(查询结果),取值范围是1~100,不填时默认为10。
:param limit: The limit of this ErrorlogForLtsRequest.
:type limit: int
"""
self._limit = limit
@property
def search_type(self):
"""Gets the search_type of this ErrorlogForLtsRequest.
搜索方式。默认forwards。配合line_num使用,以line_num为起点,向前搜索或向后搜索。
:return: The search_type of this ErrorlogForLtsRequest.
:rtype: str
"""
return self._search_type
@search_type.setter
def search_type(self, search_type):
"""Sets the search_type of this ErrorlogForLtsRequest.
搜索方式。默认forwards。配合line_num使用,以line_num为起点,向前搜索或向后搜索。
:param search_type: The search_type of this ErrorlogForLtsRequest.
:type search_type: str
"""
self._search_type = search_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorlogForLtsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
c06ba9ae8cbb221882a80e62e7997bda650aa489 | 3a69627c55058a4c0933d16b29d0d7a7c91e172c | /students/management/commands/enroll_reminder.py | 60b687eb6604cecdd298fc70012469af8e5fe88a | [] | no_license | xor0x/educa | bb3647eba80fbabd07cb3604dfb3bb76e3a7fe12 | 6b97146d2e9f412645ccf04b63bfaee246a43b9e | refs/heads/master | 2020-11-28T03:29:37.144939 | 2020-01-05T10:17:50 | 2020-01-05T10:17:50 | 229,693,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,350 | py | import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.mail import send_mass_mail
from django.contrib.auth.models import User
from django.db.models import Count
"""
from django.core import management
management.call_command('enroll_reminder', days=20)
"""
class Command(BaseCommand):
help = 'Sends an e-mail reminder to users registered more \
than N days that are not enrolled into any courses yet'
def add_arguments(self, parser):
parser.add_argument('--days', dest='days', type=int)
def handle(self, *args, **options):
emails = []
subject = 'Enroll in a course'
date_joined = datetime.date.today() - datetime.timedelta(days=options['days'])
users = User.objects.annotate(course_count=Count('courses_joined'))\
.filter(course_count=0, date_joined__lte=date_joined)
for user in users:
message = f"Dear {user.first_name},\n\n We noticed that you didn't" \
f"enroll in any courses yet. What are you waiting for?"
emails.append((subject,
message,
settings.DEFAULT_FROM_EMAIL,
[user.email]))
send_mass_mail(emails)
self.stdout.write(f'Sent {len(emails)} reminders') | [
"[email protected]"
] | |
e08edfe5a814aa3fdc8decd50ddae5f935b47d4f | f462679e25ee5dbae2a761f0222bc547f7b9da65 | /backup/srcPython/srcPython_desk_100119/out_minxss_sma_average.py | 780a1e8e0761b427b46f5edd893a2329c3362ac5 | [
"Apache-2.0"
] | permissive | FengYongQ/spock | f31a2f9cac58fbb1912f8e7b066b5318e0223835 | 08c01c01521429a70b5387e8769558e788f7cd3e | refs/heads/master | 2021-06-13T03:09:23.903196 | 2020-01-25T03:32:41 | 2020-01-25T03:32:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | # Reads the output of minxss_sma_average.py
from read_input_file import *
from read_output_file import *
from orbit_average import *
def out_minxss_sma_average(main_input_filename_list, date_ini):
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.