blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0dfb630e3c0d0a11d8d97f1a3ea11e4ee039b3f | 9139abff4dfec3c4c0623e42db2d44a866595752 | /test/test_metric.py | 7a707dd862755c51471ed48bea7b3115fd862a22 | [
"MIT"
] | permissive | LorinChen/lagom | 42e7c0c89c356b1cc7c160f12517fa01693e91c5 | 273bb7f5babb1f250f6dba0b5f62c6614f301719 | refs/heads/master | 2020-07-20T17:46:45.648603 | 2019-08-13T13:20:57 | 2019-08-13T13:20:57 | 206,686,173 | 1 | 0 | MIT | 2019-09-06T01:16:27 | 2019-09-06T01:16:24 | null | UTF-8 | Python | false | false | 12,830 | py | import pytest
import numpy as np
import torch
import gym
from gym.wrappers import TimeLimit
from lagom import RandomAgent
from lagom import EpisodeRunner
from lagom import StepRunner
from lagom.utils import numpify
from lagom.envs import make_vec_env
from lagom.envs.wrappers import StepInfo
from lagom.envs.wrappers import VecStepInfo
from lagom.metric import returns
from lagom.metric import bootstrapped_returns
from lagom.metric import td0_target
from lagom.metric import td0_error
from lagom.metric import gae
from lagom.metric import vtrace
from .sanity_env import SanityEnv
@pytest.mark.parametrize('gamma', [0.1, 0.99, 1.0])
def test_returns(gamma):
assert np.allclose(returns(1.0, [1, 2, 3]), [6, 5, 3])
assert np.allclose(returns(0.1, [1, 2, 3]), [1.23, 2.3, 3])
assert np.allclose(returns(1.0, [1, 2, 3, 4, 5]), [15, 14, 12, 9, 5])
assert np.allclose(returns(0.1, [1, 2, 3, 4, 5]), [1.2345, 2.345, 3.45, 4.5, 5])
assert np.allclose(returns(1.0, [1, 2, 3, 4, 5, 6, 7, 8]), [36, 35, 33, 30, 26, 21, 15, 8])
assert np.allclose(returns(0.1, [1, 2, 3, 4, 5, 6, 7, 8]), [1.2345678, 2.345678, 3.45678, 4.5678, 5.678, 6.78, 7.8, 8])
y1 = [0.1]
y2 = [0.1 + gamma*0.2, 0.2]
y3 = [0.1 + gamma*(0.2 + gamma*0.3),
0.2 + gamma*0.3,
0.3]
y4 = [0.1 + gamma*(0.2 + gamma*(0.3 + gamma*0.4)),
0.2 + gamma*(0.3 + gamma*0.4),
0.3 + gamma*0.4,
0.4]
y5 = [0.1 + gamma*(0.2 + gamma*(0.3 + gamma*(0.4 + gamma*0.5))),
0.2 + gamma*(0.3 + gamma*(0.4 + gamma*0.5)),
0.3 + gamma*(0.4 + gamma*0.5),
0.4 + gamma*0.5,
0.5]
y6 = [0.1 + gamma*(0.2 + gamma*(0.3 + gamma*(0.4 + gamma*(0.5 + gamma*0.6)))),
0.2 + gamma*(0.3 + gamma*(0.4 + gamma*(0.5 + gamma*0.6))),
0.3 + gamma*(0.4 + gamma*(0.5 + gamma*0.6)),
0.4 + gamma*(0.5 + gamma*0.6),
0.5 + gamma*0.6,
0.6]
assert np.allclose(returns(gamma, [0.1]), y1)
assert np.allclose(returns(gamma, [0.1, 0.2]), y2)
assert np.allclose(returns(gamma, [0.1, 0.2, 0.3]), y3)
assert np.allclose(returns(gamma, [0.1, 0.2, 0.3, 0.4]), y4)
assert np.allclose(returns(gamma, [0.1, 0.2, 0.3, 0.4, 0.5]), y5)
assert np.allclose(returns(gamma, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]), y6)
@pytest.mark.parametrize('gamma', [0.1, 0.99, 1.0])
@pytest.mark.parametrize('last_V', [-3.0, 0.0, 2.0])
def test_bootstrapped_returns(gamma, last_V):
y = [0.1 + gamma*(0.2 + gamma*(0.3 + gamma*(0.4 + gamma*last_V))),
0.2 + gamma*(0.3 + gamma*(0.4 + gamma*last_V)),
0.3 + gamma*(0.4 + gamma*last_V),
0.4 + gamma*last_V]
reach_terminal = False
rewards = [0.1, 0.2, 0.3, 0.4]
assert np.allclose(bootstrapped_returns(gamma, rewards, last_V, reach_terminal), y)
assert np.allclose(bootstrapped_returns(gamma, rewards, torch.tensor(last_V), reach_terminal), y)
y = [0.1 + gamma*(0.2 + gamma*(0.3 + gamma*(0.4 + gamma*last_V*0.0))),
0.2 + gamma*(0.3 + gamma*(0.4 + gamma*last_V*0.0)),
0.3 + gamma*(0.4 + gamma*last_V*0.0),
0.4 + gamma*last_V*0.0]
reach_terminal = True
rewards = [0.1, 0.2, 0.3, 0.4]
assert np.allclose(bootstrapped_returns(gamma, rewards, last_V, reach_terminal), y)
assert np.allclose(bootstrapped_returns(gamma, rewards, torch.tensor(last_V), reach_terminal), y)
y = [0.1 + gamma*(0.2 + gamma*(0.3 + gamma*(0.4 + gamma*(0.5 + gamma*last_V)))),
0.2 + gamma*(0.3 + gamma*(0.4 + gamma*(0.5 + gamma*last_V))),
0.3 + gamma*(0.4 + gamma*(0.5 + gamma*last_V)),
0.4 + gamma*(0.5 + gamma*last_V),
0.5 + gamma*last_V]
reach_terminal = False
rewards = [0.1, 0.2, 0.3, 0.4, 0.5]
assert np.allclose(bootstrapped_returns(gamma, rewards, last_V, reach_terminal), y)
assert np.allclose(bootstrapped_returns(gamma, rewards, torch.tensor(last_V), reach_terminal), y)
y = [0.1 + gamma*(0.2 + gamma*(0.3 + gamma*(0.4 + gamma*(0.5 + gamma*last_V*0.0)))),
0.2 + gamma*(0.3 + gamma*(0.4 + gamma*(0.5 + gamma*last_V*0.0))),
0.3 + gamma*(0.4 + gamma*(0.5 + gamma*last_V*0.0)),
0.4 + gamma*(0.5 + gamma*last_V*0.0),
0.5 + gamma*last_V*0.0]
reach_terminal = True
rewards = [0.1, 0.2, 0.3, 0.4, 0.5]
assert np.allclose(bootstrapped_returns(gamma, rewards, last_V, reach_terminal), y)
assert np.allclose(bootstrapped_returns(gamma, rewards, torch.tensor(last_V), reach_terminal), y)
@pytest.mark.parametrize('gamma', [0.1, 0.99, 1.0])
@pytest.mark.parametrize('last_V', [-3.0, 0.0, 2.0])
def test_td0_target(gamma, last_V):
y = [0.1 + gamma*2,
0.2 + gamma*3,
0.3 + gamma*4,
0.4 + gamma*last_V*0.0]
rewards = [0.1, 0.2, 0.3, 0.4]
Vs = [1, 2, 3, 4]
reach_terminal = True
assert np.allclose(td0_target(gamma, rewards, Vs, last_V, reach_terminal), y)
assert np.allclose(td0_target(gamma, rewards, torch.tensor(Vs), torch.tensor(last_V), reach_terminal), y)
y = [0.1 + gamma*2,
0.2 + gamma*3,
0.3 + gamma*4,
0.4 + gamma*last_V]
rewards = [0.1, 0.2, 0.3, 0.4]
Vs = [1, 2, 3, 4]
reach_terminal = False
assert np.allclose(td0_target(gamma, rewards, Vs, last_V, reach_terminal), y)
assert np.allclose(td0_target(gamma, rewards, torch.tensor(Vs), torch.tensor(last_V), reach_terminal), y)
y = [0.1 + gamma*2,
0.2 + gamma*3,
0.3 + gamma*4,
0.4 + gamma*5,
0.5 + gamma*6,
0.6 + gamma*last_V*0.0]
rewards = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
Vs = [1, 2, 3, 4, 5, 6]
reach_terminal = True
assert np.allclose(td0_target(gamma, rewards, Vs, last_V, reach_terminal), y)
assert np.allclose(td0_target(gamma, rewards, torch.tensor(Vs), torch.tensor(last_V), reach_terminal), y)
y = [0.1 + gamma*2,
0.2 + gamma*3,
0.3 + gamma*4,
0.4 + gamma*5,
0.5 + gamma*6,
0.6 + gamma*last_V]
rewards = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
Vs = [1, 2, 3, 4, 5, 6]
reach_terminal = False
assert np.allclose(td0_target(gamma, rewards, Vs, last_V, reach_terminal), y)
assert np.allclose(td0_target(gamma, rewards, torch.tensor(Vs), torch.tensor(last_V), reach_terminal), y)
@pytest.mark.parametrize('gamma', [0.1, 0.99, 1.0])
@pytest.mark.parametrize('last_V', [-3.0, 0.0, 2.0])
def test_td0_error(gamma, last_V):
y = [0.1 + gamma*2 - 1,
0.2 + gamma*3 - 2,
0.3 + gamma*4 - 3,
0.4 + gamma*last_V*0.0 - 4]
rewards = [0.1, 0.2, 0.3, 0.4]
Vs = [1, 2, 3, 4]
reach_terminal = True
assert np.allclose(td0_error(gamma, rewards, Vs, last_V, reach_terminal), y)
assert np.allclose(td0_error(gamma, rewards, torch.tensor(Vs), torch.tensor(last_V), reach_terminal), y)
y = [0.1 + gamma*2 - 1,
0.2 + gamma*3 - 2,
0.3 + gamma*4 - 3,
0.4 + gamma*last_V - 4]
rewards = [0.1, 0.2, 0.3, 0.4]
Vs = [1, 2, 3, 4]
reach_terminal = False
assert np.allclose(td0_error(gamma, rewards, Vs, last_V, reach_terminal), y)
assert np.allclose(td0_error(gamma, rewards, torch.tensor(Vs), torch.tensor(last_V), reach_terminal), y)
y = [0.1 + gamma*2 - 1,
0.2 + gamma*3 - 2,
0.3 + gamma*4 - 3,
0.4 + gamma*5 - 4,
0.5 + gamma*6 - 5,
0.6 + gamma*last_V*0.0 - 6]
rewards = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
Vs = [1, 2, 3, 4, 5, 6]
reach_terminal = True
assert np.allclose(td0_error(gamma, rewards, Vs, last_V, reach_terminal), y)
assert np.allclose(td0_error(gamma, rewards, torch.tensor(Vs), torch.tensor(last_V), reach_terminal), y)
y = [0.1 + gamma*2 - 1,
0.2 + gamma*3 - 2,
0.3 + gamma*4 - 3,
0.4 + gamma*5 - 4,
0.5 + gamma*6 - 5,
0.6 + gamma*last_V - 6]
rewards = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
Vs = [1, 2, 3, 4, 5, 6]
reach_terminal = False
assert np.allclose(td0_error(gamma, rewards, Vs, last_V, reach_terminal), y)
assert np.allclose(td0_error(gamma, rewards, torch.tensor(Vs), torch.tensor(last_V), reach_terminal), y)
def test_gae():
rewards = [1, 2, 3]
Vs = [0.1, 1.1, 2.1]
assert np.allclose(gae(1.0, 0.5, rewards, Vs, 10, True),
[3.725, 3.45, 0.9])
assert np.allclose(gae(1.0, 0.5, rewards, torch.tensor(Vs), torch.tensor(10), True),
[3.725, 3.45, 0.9])
assert np.allclose(gae(0.1, 0.2, rewards, Vs, 10, True),
[1.03256, 1.128, 0.9])
assert np.allclose(gae(0.1, 0.2, rewards, torch.tensor(Vs), torch.tensor(10), True),
[1.03256, 1.128, 0.9])
rewards = [1, 2, 3]
Vs = [0.5, 1.5, 2.5]
assert np.allclose(gae(1.0, 0.5, rewards, Vs, 99, True),
[3.625, 3.25, 0.5])
assert np.allclose(gae(1.0, 0.5, rewards, torch.tensor(Vs), torch.tensor(99), True),
[3.625, 3.25, 0.5])
assert np.allclose(gae(0.1, 0.2, rewards, Vs, 99, True),
[0.6652, 0.76, 0.5])
assert np.allclose(gae(0.1, 0.2, rewards, torch.tensor(Vs), torch.tensor(99), True),
[0.6652, 0.76, 0.5])
rewards = [1, 2, 3, 4, 5]
Vs = [0.5, 1.5, 2.5, 3.5, 4.5]
assert np.allclose(gae(1.0, 0.5, rewards, Vs, 20, False),
[6.40625, 8.8125, 11.625, 15.25, 20.5])
assert np.allclose(gae(1.0, 0.5, rewards, torch.tensor(Vs), torch.tensor(20), False),
[6.40625, 8.8125, 11.625, 15.25, 20.5])
assert np.allclose(gae(0.1, 0.2, rewards, Vs, 20, False),
[0.665348, 0.7674, 0.87, 1, 2.5])
assert np.allclose(gae(0.1, 0.2, rewards, torch.tensor(Vs), torch.tensor(20), False),
[0.665348, 0.7674, 0.87, 1, 2.5])
rewards = [1, 2, 3, 4, 5]
Vs = [0.1, 1.1, 2.1, 3.1, 4.1]
assert np.allclose(gae(1.0, 0.5, rewards, Vs, 10, False),
[5.80625, 7.6125, 9.225, 10.45, 10.9])
assert np.allclose(gae(1.0, 0.5, rewards, torch.tensor(Vs), torch.tensor(10), False),
[5.80625, 7.6125, 9.225, 10.45, 10.9])
assert np.allclose(gae(0.1, 0.2, rewards, Vs, 10, False),
[1.03269478, 1.1347393, 1.23696, 1.348, 1.9])
assert np.allclose(gae(0.1, 0.2, rewards, torch.tensor(Vs), torch.tensor(10), False),
[1.03269478, 1.1347393, 1.23696, 1.348, 1.9])
rewards = [1, 2, 3, 4, 5, 6, 7, 8]
Vs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
assert np.allclose(gae(1.0, 0.5, rewards, Vs, 30, True),
[5.84375, 7.6875, 9.375, 10.75, 11.5, 11., 8, 0.])
assert np.allclose(gae(1.0, 0.5, rewards, torch.tensor(Vs), torch.tensor(30), True),
[5.84375, 7.6875, 9.375, 10.75, 11.5, 11., 8, 0.])
assert np.allclose(gae(0.1, 0.2, rewards, Vs, 30, True),
[0.206164098, 0.308204915, 0.410245728, 0.5122864, 0.61432, 0.716, 0.8, 0])
assert np.allclose(gae(0.1, 0.2, rewards, torch.tensor(Vs), torch.tensor(30), True),
[0.206164098, 0.308204915, 0.410245728, 0.5122864, 0.61432, 0.716, 0.8, 0])
@pytest.mark.parametrize('gamma', [0.1, 1.0])
@pytest.mark.parametrize('last_V', [0.3, [0.5]])
@pytest.mark.parametrize('reach_terminal', [True, False])
@pytest.mark.parametrize('clip_rho', [0.5, 1.0])
@pytest.mark.parametrize('clip_pg_rho', [0.3, 1.1])
def test_vtrace(gamma, last_V, reach_terminal, clip_rho, clip_pg_rho):
behavior_logprobs = [1, 2, 3]
target_logprobs = [4, 5, 6]
Rs = [7, 8, 9]
Vs = [10, 11, 12]
vs_test, As_test = vtrace(behavior_logprobs, target_logprobs, gamma, Rs, Vs, last_V, reach_terminal, clip_rho, clip_pg_rho)
# ground truth calculation
behavior_logprobs = numpify(behavior_logprobs, np.float32)
target_logprobs = numpify(target_logprobs, np.float32)
Rs = numpify(Rs, np.float32)
Vs = numpify(Vs, np.float32)
last_V = numpify(last_V, np.float32)
rhos = np.exp(target_logprobs - behavior_logprobs)
clipped_rhos = np.minimum(clip_rho, rhos)
cs = np.minimum(1.0, rhos)
deltas = clipped_rhos*td0_error(gamma, Rs, Vs, last_V, reach_terminal)
vs = np.array([Vs[0] + gamma**0*1*deltas[0] + gamma*cs[0]*deltas[1] + gamma**2*cs[0]*cs[1]*deltas[2],
Vs[1] + gamma**0*1*deltas[1] + gamma*cs[1]*deltas[2],
Vs[2] + gamma**0*1*deltas[2]])
vs_next = np.append(vs[1:], (1. - reach_terminal)*last_V)
clipped_pg_rhos = np.minimum(clip_pg_rho, rhos)
As = clipped_pg_rhos*(Rs + gamma*vs_next - Vs)
assert np.allclose(vs, vs_test)
assert np.allclose(As, As_test)
| [
"[email protected]"
] | |
b00094732bf4e992d6d1f015c038435845b72877 | f15d9b6513a053258e1b96fe54d14ee84e92b33a | /wirefox/db/moz_places.py | 561d64e34c1c1363ef89b2f1850d4faf8d44a68b | [] | no_license | amol9/wirefox | 867cbd00c9e250bd91509dfbc1baf751871d8f42 | 3f8561d0c6d01773d510bae337b9348132e3a5c8 | refs/heads/master | 2021-01-13T21:14:12.091991 | 2017-03-13T14:29:14 | 2017-03-13T14:29:14 | 81,830,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py |
from .time_window import TimeWindow
from .firefox_db import FirefoxDB
class MozPlaces:
def __init__(self):
self._firefox_db = None
def query(self, url=None, title=None, start_time=None, end_time=None, period=None):
if self._firefox_db is None:
self._firefox_db = FirefoxDB()
self._firefox_db.open_places_db()
session = self._firefox_db.session
moz_places = self._firefox_db.get_moz_places()
q = session.query(moz_places)
if url is not None:
q = q.filter(moz_places.url.like('%s'%url))
if title is not None:
q = q.filter(moz_places.title.like('%%%s%%'%title))
if start_time is not None or end_time is not None or period is not None:
tw = TimeWindow(start=start_time, end=end_time, period=period)
q = q.filter(moz_places.last_visit_date >= (tw.start_time.timestamp() * 1000000))
q = q.filter(moz_places.last_visit_date <= (tw.end_time.timestamp() * 1000000))
for r in q:
print(r.url, '|', r.visit_count)
| [
"[email protected]"
] | |
b25ed1e03530488687d2617d36d418c6756cd81b | 0b2d1b4e2b7e8aa9440e616ab2528b13ff5918e5 | /babi/buf.py | e1b0ee3e8e4f097d9fda0e371aac8ceeebe0316b | [
"MIT"
] | permissive | tech-chad/babi | 2b332568460fa78acabb9913fdfe084b3a15fe44 | 144bbb9daf8d879f5f3ceb30a6c20d47ea2f538b | refs/heads/master | 2022-11-02T06:12:43.419506 | 2020-05-27T22:50:30 | 2020-05-27T22:50:30 | 272,864,505 | 0 | 0 | MIT | 2020-06-17T03:00:15 | 2020-06-17T03:00:15 | null | UTF-8 | Python | false | false | 9,033 | py | import bisect
import contextlib
from typing import Callable
from typing import Generator
from typing import Iterator
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from babi._types import Protocol
from babi.horizontal_scrolling import line_x
from babi.horizontal_scrolling import scrolled_line
from babi.horizontal_scrolling import wcwidth
from babi.margin import Margin
SetCallback = Callable[['Buf', int, str], None]
DelCallback = Callable[['Buf', int, str], None]
InsCallback = Callable[['Buf', int], None]
def _offsets(s: str, tab_size: int) -> Tuple[int, ...]:
ret = [0]
for c in s:
if c == '\t':
ret.append(ret[-1] + (tab_size - ret[-1] % tab_size))
else:
ret.append(ret[-1] + wcwidth(c))
return tuple(ret)
class Modification(Protocol):
def __call__(self, buf: 'Buf') -> None: ...
class SetModification(NamedTuple):
idx: int
s: str
def __call__(self, buf: 'Buf') -> None:
buf[self.idx] = self.s
class InsModification(NamedTuple):
idx: int
s: str
def __call__(self, buf: 'Buf') -> None:
buf.insert(self.idx, self.s)
class DelModification(NamedTuple):
idx: int
def __call__(self, buf: 'Buf') -> None:
del buf[self.idx]
class Buf:
def __init__(self, lines: List[str], tab_size: int = 4) -> None:
self._lines = lines
self.tab_size = tab_size
self.file_y = self.y = self._x = self._x_hint = 0
self._set_callbacks: List[SetCallback] = [self._set_cb]
self._del_callbacks: List[DelCallback] = [self._del_cb]
self._ins_callbacks: List[InsCallback] = [self._ins_cb]
self._positions: List[Optional[Tuple[int, ...]]] = []
# read only interface
def __repr__(self) -> str:
return (
f'{type(self).__name__}('
f'{self._lines!r}, x={self.x}, y={self.y}, file_y={self.file_y}'
f')'
)
def __bool__(self) -> bool:
return bool(self._lines)
def __getitem__(self, idx: int) -> str:
return self._lines[idx]
def __iter__(self) -> Iterator[str]:
yield from self._lines
def __len__(self) -> int:
return len(self._lines)
# mutators
def __setitem__(self, idx: int, val: str) -> None:
if idx < 0:
idx %= len(self)
victim = self._lines[idx]
self._lines[idx] = val
for set_callback in self._set_callbacks:
set_callback(self, idx, victim)
def __delitem__(self, idx: int) -> None:
if idx < 0:
idx %= len(self)
victim = self._lines[idx]
del self._lines[idx]
for del_callback in self._del_callbacks:
del_callback(self, idx, victim)
def insert(self, idx: int, val: str) -> None:
if idx < 0:
idx %= len(self)
self._lines.insert(idx, val)
for ins_callback in self._ins_callbacks:
ins_callback(self, idx)
# also mutators, but implemented using above functions
def append(self, val: str) -> None:
self.insert(len(self), val)
def pop(self, idx: int = -1) -> str:
victim = self[idx]
del self[idx]
return victim
def restore_eof_invariant(self) -> None:
"""the file lines will always contain a blank empty string at the end'
to simplify rendering. call this whenever the last line may change
"""
if self[-1] != '':
self.append('')
def set_tab_size(self, tab_size: int) -> None:
self.tab_size = tab_size
self._positions = [None]
# event handling
def add_set_callback(self, cb: SetCallback) -> None:
self._set_callbacks.append(cb)
def remove_set_callback(self, cb: SetCallback) -> None:
self._set_callbacks.remove(cb)
def add_del_callback(self, cb: DelCallback) -> None:
self._del_callbacks.append(cb)
def remove_del_callback(self, cb: DelCallback) -> None:
self._del_callbacks.remove(cb)
def add_ins_callback(self, cb: InsCallback) -> None:
self._ins_callbacks.append(cb)
def remove_ins_callback(self, cb: InsCallback) -> None:
self._ins_callbacks.remove(cb)
@contextlib.contextmanager
def record(self) -> Generator[List[Modification], None, None]:
modifications: List[Modification] = []
def set_cb(buf: 'Buf', idx: int, victim: str) -> None:
modifications.append(SetModification(idx, victim))
def del_cb(buf: 'Buf', idx: int, victim: str) -> None:
modifications.append(InsModification(idx, victim))
def ins_cb(buf: 'Buf', idx: int) -> None:
modifications.append(DelModification(idx))
self.add_set_callback(set_cb)
self.add_del_callback(del_cb)
self.add_ins_callback(ins_cb)
try:
yield modifications
finally:
self.remove_ins_callback(ins_cb)
self.remove_del_callback(del_cb)
self.remove_set_callback(set_cb)
def apply(self, modifications: List[Modification]) -> List[Modification]:
with self.record() as ret_modifications:
for modification in reversed(modifications):
modification(self)
return ret_modifications
# position properties
@property
def displayable_count(self) -> int:
return len(self._lines) - self.file_y
@property
def x(self) -> int:
return self._x
@x.setter
def x(self, x: int) -> None:
self._x = x
self._x_hint = self._cursor_x
def _extend_positions(self, idx: int) -> None:
self._positions.extend([None] * (1 + idx - len(self._positions)))
def _set_cb(self, buf: 'Buf', idx: int, victim: str) -> None:
self._extend_positions(idx)
self._positions[idx] = None
def _del_cb(self, buf: 'Buf', idx: int, victim: str) -> None:
self._extend_positions(idx)
del self._positions[idx]
def _ins_cb(self, buf: 'Buf', idx: int) -> None:
self._extend_positions(idx)
self._positions.insert(idx, None)
def line_positions(self, idx: int) -> Tuple[int, ...]:
self._extend_positions(idx)
value = self._positions[idx]
if value is None:
value = _offsets(self._lines[idx], self.tab_size)
self._positions[idx] = value
return value
def line_x(self, margin: Margin) -> int:
return line_x(self._cursor_x, margin.cols)
@property
def _cursor_x(self) -> int:
return self.line_positions(self.y)[self.x]
def cursor_position(self, margin: Margin) -> Tuple[int, int]:
y = self.y - self.file_y + margin.header
x = self._cursor_x - self.line_x(margin)
return y, x
# rendered lines
def rendered_line(self, idx: int, margin: Margin) -> str:
x = self._cursor_x if idx == self.y else 0
expanded = self._lines[idx].expandtabs(self.tab_size)
return scrolled_line(expanded, x, margin.cols)
# movement
def scroll_screen_if_needed(self, margin: Margin) -> None:
# if the `y` is not on screen, make it so
if not (self.file_y <= self.y < self.file_y + margin.body_lines):
self.file_y = max(self.y - margin.body_lines // 2, 0)
def _set_x_after_vertical_movement(self) -> None:
positions = self.line_positions(self.y)
x = bisect.bisect_left(positions, self._x_hint)
x = min(len(self._lines[self.y]), x)
if positions[x] > self._x_hint:
x -= 1
self._x = x
def up(self, margin: Margin) -> None:
if self.y > 0:
self.y -= 1
if self.y < self.file_y:
self.file_y = max(self.file_y - margin.scroll_amount, 0)
self._set_x_after_vertical_movement()
def down(self, margin: Margin) -> None:
if self.y < len(self._lines) - 1:
self.y += 1
if self.y >= self.file_y + margin.body_lines:
self.file_y += margin.scroll_amount
self._set_x_after_vertical_movement()
def right(self, margin: Margin) -> None:
if self.x >= len(self._lines[self.y]):
if self.y < len(self._lines) - 1:
self.down(margin)
self.x = 0
else:
self.x += 1
def left(self, margin: Margin) -> None:
if self.x == 0:
if self.y > 0:
self.up(margin)
self.x = len(self._lines[self.y])
else:
self.x -= 1
# screen movement
def file_up(self, margin: Margin) -> None:
if self.file_y > 0:
self.file_y -= 1
if self.y > self.file_y + margin.body_lines - 1:
self.up(margin)
def file_down(self, margin: Margin) -> None:
if self.file_y < len(self._lines) - 1:
self.file_y += 1
if self.y < self.file_y:
self.down(margin)
| [
"[email protected]"
] | |
a7d0ef36c591c177b20512e93fa42951fb059e14 | e5202e0f36c15b8898920a461a866168fa059947 | /clirad/h2o_trp/band_8/atmpro_trp/cliradlw_1013f91/param.py | ad74245f6dbe533491fcea57312d88984757cf90 | [] | no_license | qAp/analysis_-_new_kdist_param | 653c9873751646f6fa9481544e98ed6065a16155 | 272dc3667030cdb18664108d0bd78fee03736144 | refs/heads/master | 2021-06-11T04:21:35.105924 | 2019-08-04T13:13:07 | 2019-08-04T13:13:07 | 136,108,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | PARAM = {'commitnumber': '1013f91', 'band': [8], 'molecule': {'h2o': 'atmpro'}, 'atmpro': 'trp', 'tsfc': 300}
PARAM_LBLNEW = {'atmpro': 'trp', 'band': '6', 'commitnumber': '5014a19', 'conc': None, 'dv': 0.001, 'klin': 5e-25, 'molecule': 'h2o', 'ng_adju': [0], 'ng_refs': [4], 'nv': 1000, 'option_compute_btable': 0, 'option_compute_ktable': 0, 'option_wgt_flux': 1, 'option_wgt_k': 1, 'ref_pts': [[600, 250]], 'tsfc': 300, 'vmax': 1215, 'vmin': 1100, 'w_diffuse': [[1.66, 1.66, 1.7, 1.8]], 'wgt': [[0.3, 0.45, 0.6, 0.95]]} | [
"[email protected]"
] | |
758eb49de79ccd0c369067efc4283f514df09080 | f77b0f2cc709b9670e6b4dc7145a6ea5368585d2 | /templates/compiled/macros/grids.py | 6c7879946fd4f337eb9525497945a87c107b0aa0 | [] | no_license | sgammon/StonerHub | 45ccac6bd349200bbc75c494002c3ffeb082dcb8 | a81f7fdd2c7118c6cea3c25ef9f53f272d27b0cc | refs/heads/master | 2021-01-20T10:54:47.546251 | 2011-11-07T12:02:20 | 2011-11-07T12:02:20 | 2,664,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | from __future__ import division
from jinja2.runtime import LoopContext, TemplateReference, Macro, Markup, TemplateRuntimeError, missing, concat, escape, markup_join, unicode_join, to_string, identity, TemplateNotFound
def run(environment):
name = 'source/macros/grids.html'
def root(context, environment=environment):
if 0: yield None
blocks = {}
debug_info = ''
return locals() | [
"[email protected]"
] | |
e5735a8778ca5d33468e7d9caf2b4a808fae37fb | bbe0cb13a7d9ba461b2af6a69f18f8e7ef2301fe | /code_processing/Lc2D.1.0e-02_LcSlab.1.0e-02/calc_k_vs_t.py | 772e49ece97cb6b3a37614a19f8f80df774f52ae | [] | no_license | jimsrc/corpad | b52b841f0a1b8ddca98236d4a61a9d6079699aff | 60756729b3cc1206352b95b2038f87b75ac749ef | refs/heads/master | 2021-03-27T10:10:39.756497 | 2018-02-04T18:04:58 | 2018-02-04T18:04:58 | 99,851,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,055 | py | ##
from funcs import *
import os
def generate_k_vs_t(Ek, dir_data):
dir_info = '%s/info' % dir_data
fname_orient = '%s/orientations.in' % dir_info
fname_plas = '%s/plas.in' % dir_info
fname_turb = '%s/turb.in' % dir_info
NPLAS = count_lines_in_file(fname_orient)
# orden del nro max de giroperiodos
order_tmax = int(log10(value(fname_plas, 'nmax_gyroperiods')))
# nro de filas x archivo (nro de puntos q le pedi a la simulacion)
nfil = int(order_tmax*value(fname_plas, 'npoints') + 1)
ncol = 6 # nro de columnas x archivo: (t, x, y, z, mu, err-gamma)
NBs = int(value(fname_plas, 'nro_Bfield_realizations'))
rigidity = value(fname_plas, 'rigidity')
#--------------
Nm = int(value(fname_turb, 'n_modos'))
Bo = value(fname_turb, 'Bunif')
Sig = value(fname_turb, 'sigma_Bo_ratio')
perc_2d = value(fname_turb, 'percent_2d')
perc_slab = value(fname_turb, 'percent_slab')
Lc_2d = value(fname_turb, 'Lc_2d')
Lc_slab = value(fname_turb, 'Lc_slab')
lambda_min = value(fname_turb, 'lambda_min')
lambda_max = value(fname_turb, 'lambda_max')
print " ------> Ek [eV]: %g" % Ek
calc_k_versus_t(dir_data, Ek, Sig, NPLAS, NBs, nfil, ncol, Bo,
Lc_2d, Lc_slab, Nm, perc_slab)
def calc_k_versus_t(dir_data, Ek, Sig, NPLAS, NBs, nfil, ncol, Bo,
Lc_2d, Lc_slab, Nm, perc_slab):
dir_plots = '../../plots'
#dir_data= '../../output/Ek.%1.1eeV/sig%d' % (Ek, Sig)
"""dir_out = '../../post/Ek.%1.1eeV/Nm%03d/slab%1.2f/sig.%1.1e/Lc2D.%1.1e_LcSlab.%1.1e' % (Ek, Nm, perc_slab, Sig, Lc_2d, Lc_slab)
try: os.system('mkdir %s' % dir_out)
except: print ""
"""
dir_out = '../../post'
fname_out = '%s/k_vs_t_Ek.%1.1eeV_Nm%03d_slab%1.2f_sig.%1.1e_Lc2d.%1.1e_LcSlab.%1.1e.dat' % (dir_out, Ek, Nm, perc_slab, Sig, Lc_2d, Lc_slab)
#---------------------
# nok : nro de files q existe Y tienen data
# nbad : nro de files q solicite y no existen
# time : grilla temporal
DATA, time, nok, nbad = load_trajectories(NBs, NPLAS, nfil, ncol, dir_data)
print " nro de plas: ", NPLAS
print " nro de B-realizations: ", NBs
print " nro de ptos por trayectoria: %d\n" % nfil
print " nro de archivos q existe c/data: %d/%d " % (nok, nok+nbad)
print " nro de archivos q pedi y NO existen: %d/%d " % (nbad, nok+nbad)
#---------------------
every = 1 # no en c/tiempo, sino cada 'every'
tt, x2, y2, z2 = sqr_deviations(DATA, time, every)
AUinkm = 1.5e8
AUincm = AUinkm*1e5 # [cm]
r2 = x2 + y2
r2 = r2*AUincm**2 # [cm^2]
x2 = x2*AUincm**2 # [cm^2]
y2 = y2*AUincm**2 # [cm^2]
z2 = z2*AUincm**2 # [cm^2]
wc = calc_omega(Bo, Ek) #4.781066E-01 #4.325188E-01 #Ek=1e8eV #4.735689E-01 # Ek=1e7eV #4.781066E-01 # Ek=1e6eV
print " wc[s-1]: ", wc
tt = tt*wc # [1]
#-------------------
kxx = x2/(2.*tt/wc) # [cm2/s]
kyy = y2/(2.*tt/wc) # [cm2/s]
kzz = z2/(2.*tt/wc) # [cm2/s]
#-- guarda data kxx(t)
data_out = array([tt, kxx, kyy, kzz]).T
data_out = data_out[1:] # el 1er tiempo no lo guardo xq es division por zero 1/2t
print " ---> guardando: %s" % fname_out
print ""
savetxt(fname_out, data_out, fmt='%12.2f')
| [
"[email protected]"
] | |
566ce5e5e8474e086d441fd7c8275b972cf78d88 | 45c170fb0673deece06f3055979ece25c3210380 | /toontown/building/DoorTypes.py | 80898b9b1397fad9906528cf98341b9f7b720109 | [] | no_license | MTTPAM/PublicRelease | 5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f | 825f562d5021c65d40115d64523bb850feff6a98 | refs/heads/master | 2021-07-24T09:48:32.607518 | 2018-11-13T03:17:53 | 2018-11-13T03:17:53 | 119,129,731 | 2 | 6 | null | 2018-11-07T22:10:10 | 2018-01-27T03:43:39 | Python | UTF-8 | Python | false | false | 207 | py | #Embedded file name: toontown.building.DoorTypes
EXT_STANDARD = 1
INT_STANDARD = 2
EXT_HQ = 3
INT_HQ = 4
EXT_HOUSE = 5
INT_HOUSE = 6
EXT_COGHQ = 7
INT_COGHQ = 8
EXT_KS = 9
INT_KS = 10
EXT_ANIM_STANDARD = 11
| [
"[email protected]"
] | |
de4286376cdead9bc0b4496d09485374ed60e6cc | 7202b4cf562fcacf2f684c1985b448b5780c4967 | /itp1/07c.py | 6be9c7cdedee836bbe698a934c291f7e9f692031 | [] | no_license | mskt4440/AOJ | ce523182dbd75e85c1bba43d7d23217711b8e617 | f6d9ca36e77a88ed9ddbeb53340a745bf8cac157 | refs/heads/master | 2021-07-07T00:34:23.034606 | 2020-09-24T02:25:43 | 2020-09-24T02:25:43 | 188,768,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | #
# 7c
#
def main():
r, c = map(int, input().split())
T = []
S = [0 for _ in range(c+1)]
for i in range(r):
C = list(map(int, input().split()))
C.append(sum(C))
T.append(C)
for i in range(r):
for j in range(c+1):
S[j] += T[i][j]
T.append(S)
for i in range(r+1):
print(*T[i])
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ed43ef6c060ebc2daed645b5f334029f9b430d09 | 95d20fe737d711cf92d68130eb59b6aef4435ec2 | /正则表达式/非捕获分组.py | 651491d6f2bb7a136ac85dd6e1b2fef4dc0d1d70 | [] | no_license | CCH21/Python | d11b93851579d85f972828c760a96c5be1f79531 | 33e218810856971f3f1f97a2b8a4c8dce761362e | refs/heads/master | 2022-04-29T11:48:01.816283 | 2022-03-17T11:53:01 | 2022-03-17T11:53:01 | 226,452,057 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import re
s = 'imgl.jpg, img2.jpg, img3.bmp'
# 捕获分组
p = r'\w+(\.jpg)'
mlist = re.findall(p, s)
print(mlist)
# 非捕获分组
p = r'\w+(?:\.jpg)'
mlist = re.findall(p, s)
print(mlist)
| [
"[email protected]"
] | |
82b049d66f3602dbc6cae01e74fd629f0634ef53 | b4cf3438011c9521561143e677736c611ff19a0c | /boxx/ylsys.py | 7643ca805dfe8ff7b284b931db1a46f85be72c4c | [] | no_license | BUCT-Vision/boxx | 3e5c24af20c06d4943dc04859e6cbfb577fe8a48 | 3d405c9ad744d2ff9f6f5d9efb1e31962474565b | refs/heads/master | 2020-03-18T17:35:18.573106 | 2018-09-18T02:49:10 | 2018-09-18T02:49:10 | 135,037,392 | 2 | 0 | null | 2018-09-18T02:49:11 | 2018-05-27T10:44:44 | Python | UTF-8 | Python | false | false | 3,683 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
A module provide system info and Python Info for boxx
@author: yanglei
"""
import sys, os
from os import environ
def jupyterNotebookOrQtConsole():
env = 'Unknow'
cmd = 'ps -ef'
try:
with os.popen(cmd) as stream:
if not py2:
stream = stream._stream
s = stream.read()
pid = os.getpid()
ls = list(filter(lambda l:'jupyter' in l and str(pid) in l.split(' '), s.split('\n')))
if len(ls) == 1:
l = ls[0]
import re
pa = re.compile(r'kernel-([-a-z0-9]*)\.json')
rs = pa.findall(l)
if len(rs):
r = rs[0]
if len(r)<12:
env = 'qtipython'
else :
env = 'jn'
return env
except:
return env
print(r, env)
pyv = sys.version_info.major
py3 = (pyv == 3)
py2 = (pyv == 2)
linuxYl = sys.platform.startswith('linux')
winYl = sys.platform.startswith('win')
osxYl = sys.platform.startswith('darwin')
import multiprocessing as __module
cpun = __module.cpu_count()
cloud = cpun > 16
if linuxYl or osxYl:
cuda = not os.system('nvcc --version> /dev/null 2>&1')
elif winYl:
import subprocess
try:
cuda = not subprocess.call('nvcc --version', creationflags=0x00000008)
except FileNotFoundError:
cuda = False
usecuda = 'auto' # auto: auto, False: not use
if linuxYl or osxYl:
homeYl = os.getenv('HOME') + '/'
tmpYl = '/tmp/'
elif winYl:
homeYl = os.path.expanduser("~")
tmpYl = os.getenv('TMP') + '\\'
class __TmpboxxWithCall(str):
'''
the tmp dir for boxx
use tmpboxx() to get tmpdir
if not exist then will auto mkdir of boxxTmp in `/tmp`
'''
def __call__(self):
if not os.path.isdir(self):
os.makedirs(self)
return self
tmpboxx = __TmpboxxWithCall(os.path.join(tmpYl,'boxxTmp/'))
class PythonInfo():
'''
python info
plt : Bool
mean plt avaliable
env :
belong [cmd, cmdipython, qtipython, spyder, jn]
'''
pid = os.getpid()
gui = 'ipykernel' in sys.modules
cmdipython = 'IPython' in sys.modules and not gui
ipython = cmdipython or gui
spyder = 'spyder' in sys.modules
if gui:
env = 'spyder' if spyder else jupyterNotebookOrQtConsole()
else:
env = 'cmdipython' if ipython else 'cmd'
cmd = not ipython
qtipython = env == 'qtipython'
jn = env == 'jn'
interactive = bool(getattr(sys, 'ps1', sys.flags.interactive))
plt = True
if not gui and linuxYl and 'DISPLAY' not in os.environ :
plt = False
reloadplt = False
def __str__(self):
from boxx import strMethodForDiraAttrs
return strMethodForDiraAttrs(self)
__repr__ = __str__
pyi = PythonInfo()
class SystemInfo():
'''
sys info
'''
pyv = pyv
cpun = cpun
cuda = cuda
tmp = tmpYl
linux = linuxYl
win = winYl
osx = osxYl
os = sys.platform
display = True
if linuxYl:
display = 'DISPLAY' in environ and environ['DISPLAY']
gui = pyi.gui or display
if 0:
@property
def ip(self):
'''
TODO:
'''
return '127.0.0.1'
@property
def user(self):
import getpass
return getpass.getuser()
@property
def host(self):
import platform
return platform.node()
def __str__(self):
from boxx import strMethodForDiraAttrs
return strMethodForDiraAttrs(self)
__repr__ = __str__
sysi = SystemInfo()
| [
"[email protected]"
] | |
05f71de710935c9f61b9d68da2dd6130d14c0aef | 76027f6d013e12ca4fda95957e0cedbef1779def | /leetcode/84-Hard-Largest-Rectangle-In-Histogram/answer.py | 73fea12149d06b2de3f14d2834336d5e705517c6 | [
"Unlicense"
] | permissive | BenDataAnalyst/Practice-Coding-Questions | 79a56617f27a5b2b8d5d9650057a9b0128b9becf | 4c21ab38b75389cfb71f12f995e3860e4cd8641a | refs/heads/master | 2020-07-23T13:31:48.965116 | 2019-09-09T16:23:09 | 2019-09-09T16:23:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,555 | py | #!/usr/bin/python3
#------------------------------------------------------------------------------
# Solution O(n) Stack Solution
#------------------------------------------------------------------------------
class Solution(object):
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
heights.append(0)
stack = [-1]
result = 0
for i in range(len(heights)):
while heights[i] < heights[stack[-1]]:
h = heights[stack.pop()]
w = i - stack[-1] - 1
result = max(result, h * w)
stack.append(i)
return result
#------------------------------------------------------------------------------
# Solution O(n) Kinda DP Solution
#------------------------------------------------------------------------------
class Solution(object):
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
result = 0
# Left and right will be a cache to hold the number of bars to left and right >= curr height
left = [1 for _ in range(len(heights))]
right = [1 for _ in range(len(heights))]
# Calculate left
for i in range(len(heights)):
l = i-1
# Grow as far left as possible
# We make jumps based on the previously computed left values
while l >= 0:
if heights[l] >= heights[i]:
left[i] += left[l]
l -= left[l]
else:
break
# Calculate right
for i in range(len(heights)):
r = i+1
# Grow as far right as possible
# We make jumps based on the previously computed right values
while r < len(heights):
if heights[r] >= heights[i]:
right[i] += right[r]
r += right[r]
else:
break
# Now we can iterate through all of our possible rectangles
# We calculate our areas with our height * width (left+right)
for i in range(len(heights)):
result = max(result, heights[i] * (left[i] + right[i] - 1))
return result
#------------------------------------------------------------------------------
# Brute Force Solution (O(n^2))
#------------------------------------------------------------------------------
class Solution(object):
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
result = 0
# Find the max area for each bar i
for i in range(len(heights)):
area = heights[i]
l, r = i-1, i+1
# Grow as far left as possible
while l >= 0:
if heights[l] >= heights[i]:
area += heights[i]
l -= 1
else:
break
# Grow as far right as possible
while r < len(heights):
if heights[r] >= heights[i]:
area += heights[i]
r += 1
else:
break
result = max(result, area)
return result
#------------------------------------------------------------------------------
| [
"[email protected]"
] | |
1523bd6cee28d5286c62ffee8fe3728695519c71 | 7deda84f7a280f5a0ee69b98c6a6e7a2225dab24 | /Manage/migrations/0015_draft_request_user.py | 5f6519727e28bb0c0b16dca2a87445ff181ff05d | [] | no_license | Cornex-Inc/Coffee | 476e30f29412373fb847b2d518331e6c6b9fdbbf | fcd86f20152e2b0905f223ff0e40b1881db634cf | refs/heads/master | 2023-01-13T01:56:52.755527 | 2020-06-08T02:59:18 | 2020-06-08T02:59:18 | 240,187,025 | 0 | 0 | null | 2023-01-05T23:58:52 | 2020-02-13T05:47:41 | Python | UTF-8 | Python | false | false | 401 | py | # Generated by Django 2.1.15 on 2020-05-04 22:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Manage', '0014_auto_20200503_1043'),
]
operations = [
migrations.AddField(
model_name='draft',
name='request_user',
field=models.CharField(default='', max_length=64),
),
]
| [
"[email protected]"
] | |
02d97dde72d76aa6fba06b86768511a2925fe6f3 | d9b53673b899a9b842a42060740b734bf0c63a31 | /leetcode/python/medium/p494_findTargetSumWays.py | 44d6f3cbbfbba1910bfe6c636afae6b5d7481421 | [
"Apache-2.0"
] | permissive | kefirzhang/algorithms | a8d656774b576295625dd663154d264cd6a6a802 | 549e68731d4c05002e35f0499d4f7744f5c63979 | refs/heads/master | 2021-06-13T13:05:40.851704 | 2021-04-02T07:37:59 | 2021-04-02T07:37:59 | 173,903,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | class Solution:
def __init__(self):
self.helper = {}
def findTargetSumWays(self, nums, S: int) -> int:
if not nums:
return 0
if self.helper.__contains__(tuple([len(nums), S])):
return self.helper[tuple([len(nums), S])]
if len(nums) == 1:
if S == nums[0] and S == -nums[0]:
self.helper[tuple([len(nums), S])] = 2
return 2
elif S == nums[0] or S == -nums[0]:
self.helper[tuple([len(nums), S])] = 1
return 1
else:
self.helper[tuple([len(nums), S])] = 0
return 0
num = self.findTargetSumWays(nums[:-1], S + nums[-1]) + self.findTargetSumWays(nums[:-1], S - nums[-1])
self.helper[tuple([len(nums), S])] = num
return num
slu = Solution()
print(slu.findTargetSumWays([2, 20, 24, 38, 44, 21, 45, 48, 30, 48, 14, 9, 21, 10, 46, 46, 12, 48, 12, 38], 48))
| [
"[email protected]"
] | |
9454fedfe20dd2763d21f24cf97f9d757233075e | ea5a2999512a58f45a899d2dd2fa812dc83ef73b | /an_huiV3/AnHui.py | 0be7b33debb675ba76cf85f6109809f9b9969624 | [] | no_license | nanqianbeiquan/GsClawlerV2 | f3190968818ae9d108c247527ca3e717da2fc8a8 | 884de1f94dad63e6b256592309a9425b7423e66f | refs/heads/master | 2021-07-22T13:44:31.148097 | 2017-11-02T07:02:35 | 2017-11-02T07:02:35 | 109,227,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81,648 | py | # coding=utf-8
import requests
import os
import uuid
import urllib
from urllib import quote
import PackageTool
from PIL import Image
from bs4 import BeautifulSoup
import json
import re
from gs.KafkaAPI import KafkaAPI
import datetime
from requests.exceptions import RequestException
import sys
import PyV8
import random
import subprocess
import time
from Tables_dict import *
from gs.Searcher import Searcher
from gs.Searcher import get_args
from gs.ProxyConf import *
from gs.TimeUtils import *
from gs.ProxyConf import ProxyConf, key1 as app_key
from requests.exceptions import ReadTimeout
from requests.exceptions import ConnectTimeout
from requests.exceptions import ProxyError
from requests.exceptions import ConnectionError
from requests.exceptions import ChunkedEncodingError
from gs.MyException import StatusCodeException
class AnHui(Searcher):
json_result = {}
pattern = re.compile("\s")
save_tag_a = True
flag = True
lock_id = 0
cur_time = None
cur_mc = None
cur_zch = None
entName = None
entId = None
entNo = None
creditt = None
credit_ticket = None
iframe_src = {}
def __init__(self, use_proxy=True):
super(AnHui, self).__init__(use_proxy=use_proxy)
# self.session = requests.session()
# self.session.proxies = {'http': '123.56.238.200:8123', 'https': '123.56.238.200:8123'}
# self.session.proxies = {'http': '121.28.134.29:80'}
self.headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
"Connection": "keep-alive",
"Content-type": "application/x-www-form-urlencoded",
"Host": "www.ahcredit.gov.cn",
# "Referer": "http://www.ahcredit.gov.cn/search.jspx",
"Referer": "http://www.ahcredit.gov.cn/",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0"
}
# self.cur_time = '%d' % (time.time() * 1000)
self.get_credit_ticket()
self.json_result = {} # json输出结果
self.iframe_name = {'qyjbqk': u'基本信息', 'tzrczxx': u'股东信息', 'qybgxx': u'变更信息',
'qybaxxzyryxx': u'主要人员信息', 'qybaxxfgsxx': u'分支机构信息', 'qybaxxqsxx': u'清算信息',
'gqczxx': u'股权出质登记信息', 'dcdyxx': u'动产抵押登记信息', 'jyycxx':u'经营异常信息',
'yzwfxx': u'严重违法信息', 'xzcfxx': u'行政处罚信息', 'ccjcxx':u'抽查检查信息'}
self.domain = 'http://www.ahcredit.gov.cn'
# self.add_proxy(app_key)
self.set_config()
self.log_name = self.topic + "_" + str(uuid.uuid1())
def set_config(self):
# headers = {}
# rt = self.get_request('http://1212.ip138.com/ic.asp', headers=headers)
# rt.encoding = 'gb2312'
# print rt.text
self.plugin_path = sys.path[0] + r'\..\an_hui\ocr\pinyin.bat'
self.group = 'Crawler' # 正式
self.kafka = KafkaAPI("GSCrawlerResult") # 正式
# self.group = 'CrawlerTest' # 测试
# self.kafka = KafkaAPI("GSCrawlerTest") # 测试
self.topic = 'GsSrc34'
self.province = u'安徽省'
self.kafka.init_producer()
# try:
# self.go_cookies()
# except AttributeError:
# pass
def download_yzm(self):
pass
# self.lock_id = self.proxy_config.get_lock_id()
# self.cur_time = '%d' % (time.time() * 1000)
# params = {'currentTimeMillis': self.cur_time}
# image_url = 'http://www.ahcredit.gov.cn/validateCode.jspx?type=2'
# r = self.get_request(url=image_url, params={})
# # print r.headers
# # print r.status_code,r.text
# yzm_path = os.path.join(sys.path[0], str(random.random())[2:]+'.jpg')
# with open(yzm_path, 'wb') as f:
# for chunk in r.iter_content(chunk_size=1024):
# if chunk: # filter out keep-alive new chunks
# f.write(chunk)
# f.flush()
# f.close()
# return yzm_path
def get_credit_ticket(self):
# r = self.get_request('http://qyxy.baic.gov.cn/gjjbj/gjjQueryCreditAction!toIndex.dhtml')
# print 'credit_headers',r.headers
# soup = BeautifulSoup(r.text, 'lxml')
# # print soup
# self.credit_ticket = soup.select('input#credit_ticket')[0].attrs['value']
pass
def go_cookies(self):
url = 'http://www.ahcredit.gov.cn/search.jspx#'
r = self.get_request(url=url)
r.encoding = 'utf-8'
set_cookie = [r.headers['Set-Cookie']]
soup = BeautifulSoup(r.text, 'lxml')
script = soup.select('script')[0].text
script = script[len('eval(')+1:-1]
# print 'script', script
ctxt = PyV8.JSContext()
ctxt.enter()
res = ctxt.eval(script)
# print 'eval_after', res
res = res.replace('if(findDimensions()) {} else ', '')
res = res.replace('window.location=dynamicurl', '')
res = res.replace('document.cookie = cookieString; var confirm = QWERTASDFGXYSF()', 'res=cookieString; var confirm = QWERTASDFGXYSF()')
res = res.replace("document.cookie = cookieString;", "res = res+', '+cookieString;return res")
# print 'res', res
js_res_text = ctxt.eval(res)
# print 'dealt_JSresult', js_res_text
set_cookie.extend(js_res_text.split(', '))
# print set_cookie
for x in set_cookie:
y = x.split(';')[0]
idx_1 = y.index('=')
name = y[:idx_1]
value = y[idx_1+1:]
self.session.cookies.set(name=name, value=value, domain='www.ahcredit.gov.cn', path='/')
def get_tag_a_from_db(self, keyword):
return None
def save_tag_a_to_db(self, keyword):
pass
def get_the_mc_or_code(self, keyword):
if keyword:
if len(keyword) == 15 or len(keyword) == 18:
cnt = 0
for i in keyword:
if i in 'abcdefghijklmnopqrstuvwxyz1234567890':
cnt += 1
if cnt > 10:
return False
else:
return True
else:
self.info(u'输入keyword有误')
return True
def get_tag_a_from_page(self, keyword, ac=0):
return self.get_tag_a_from_page0(keyword)
def get_tag_a_from_page0(self, keyword):
self.flag = self.get_the_mc_or_code(keyword)
for t in range(50):
# time.sleep(3)
# print u'验证码识别中...第%s次' %(t+1)
self.info(u'验证码识别中...第%s次' %(t+1))
self.today = str(datetime.date.today()).replace('-', '')
# yzm = self.get_yzm()
# print 'yzm', yzm
# url = 'http://www.ahcredit.gov.cn/searchList.jspx'
# params = {'checkNo': yzm, 'entName': keyword}
url = 'http://www.ahcredit.gov.cn/queryListData.jspx'
params = {'currentPageIndex': 1, 'entName': keyword, 'searchType': 1, }
# print 'params:', params
# r = self.post_request(url=url, params=params)
# print 'r.headers',r.headers
r = self.post_request(url=url, params=params)
r.encoding = 'utf-8'
# print '**************************', r.text
soup = BeautifulSoup(r.text, 'html5lib')
# print 'soup:', soup, r.status_code
# print '*******cpn_request_ok?:', soup.select('.list')[0], 'next_siblings', soup.select('.list')[0].find_next_sibling()
# tgr = soup.find(id='alert_win').find(id='MzImgExpPwd').get('alt')
# print '*************', soup.select('#gggscpnametext')[0]
# if u'请开启JavaScript并刷新该页' in soup.text:
# print u'cookie失效,更新cookie' # 安徽360特色
# self.go_cookies()
#
if r.status_code == 200:
# print '*'*100
if not soup.text.strip():
# print u'***验证码识别通过***no_result***'
self.info(u'***验证码识别通过***no_result***')
break
if soup.find(id='gggscpnametext'):
# print 'r.headers', r.headers
# print u'**********验证码识别通过***安徽*********' #, soup.find(class_='list')
self.info(u'**********验证码识别通过***安徽*********')
if soup.find(id='gggscpnametext').text.strip() != u'':
return soup.select('#gggscpnametext')
break
return None
def get_search_args(self, tag_a, keyword):
# print 'tag_a', tag_a # 不是连接地址tagA
if len(tag_a) > 1:
for ta in tag_a:
cm = ta.find_all('p')[0].text.strip().split('\n')[0]
# print 'cmpame', cm
self.save_company_name_to_db(cm)
tag_a = tag_a[0]
name = tag_a.find_all('p')[0].text.strip().split('\n')[0] # name为公司查询结果名;keyword为查询前数据库公司名
# name_link = tag_a.find('a').get('href')
# mainID = re.search(r'(?<=id=).*',name_link).group()
mainID = tag_a.find_all('p')[0].find_all('span')[-1].get('class')[0]
code = tag_a.find_all('p')[1].find_all('span')[0].text.strip().replace(' ', '').split(u':')[1] # 注册号
# tagA = self.domain + name_link # 验证码通过后链接地址
# print '+++++++', name, '##', code, 'mainID:', mainID
self.mainID = mainID # 安徽有分页情况可能用到
self.cur_mc = name.replace('(', u'(').replace(')', u')').strip()
self.cur_zch = code
# self.tagA = tagA # 安徽三大参数,公司名称name,注册号code, 跳过验证码的地址tagA
self.xydm_if = ''
self.zch_if = ''
if len(code) == 18:
self.xydm_if = code
else:
self.zch_if = code
# print u'公司名(name)cur_mc: %s, 注册号(code)cur_zch: %s, 链接地址tagA: %s' % (name, code, tagA), 'mainID:', mainID
if self.flag:
if self.cur_mc == keyword:
# print 'same'
self.info(u'查询结果一致')
return 1
else:
# print 'insane'
self.info(u'查询结果不一致')
self.save_company_name_to_db(self.cur_mc)
return 0
else:
self.info(self.cur_mc)
return 1
def parse_detail(self):
"""
解析公司详情信息
:param kwargs:
:return:
"""
# print '****************HIHI************************'
# r = self.get_request(self.tagA)
# print r.text
# bs = BeautifulSoup(r.text, 'html5lib')
# bd = bs.find(class_='dConBox')
# print '#hei_long_soup', bd, '**'
# print '&&&&&&&&&&&&&&&&&&&&&&&', len(bs.select('#dConBox div iframe')), bs.select('#dConBox div iframe')
self.get_ji_ben()
# print 'jb_step_json', self.json_result
self.get_gu_dong()
# print 'gd_step_json', self.json_result
self.get_bian_geng()
# print 'bg_step_json', self.json_result
self.get_zhu_yao_ren_yuan()
self.get_fen_zhi_ji_gou()
self.get_qing_suan()
self.get_dong_chan_di_ya()
self.get_gu_quan_chu_zhi()
self.get_xing_zheng_chu_fa()
self.get_jing_ying_yi_chang()
self.get_yan_zhong_wei_fa()
self.get_chou_cha_jian_cha()
# self.get_nian_bao()
# print 'the_last_json_result', len(self.json_result), self.json_result
json_go = json.dumps(self.json_result, ensure_ascii=False)
# print 'the_last_json_result:', len(self.json_result), get_cur_time(), json_go
def get_ji_ben(self):
"""
查询基本信息
:return: 基本信息结果
"""
json_list = []
family = 'Registered_Info'
table_id = '01'
self.json_result[family] = []
url = 'http://www.ahcredit.gov.cn/business/YYZZ.jspx?id='+self.mainID
# print 'jiben_url', url
# r = self.get_request(url=url, params={})
# r.encoding = 'gbk'
r = self.get_request_302(url=url, params={})
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html5lib')
# soup = bd
# print '*******ji_ben*******', soup
tr_element_list = soup.find_all('tr')#(".//*[@id='jbxx']/table/tbody/tr")
values = {}
for tr_element in tr_element_list:
# th_element_list = tr_element.find_all('th')
td_element_list = tr_element.find_all('td')
for td in td_element_list:
if td.text.strip():
td_list = td.text.replace(u'·', '').replace(u' ', '').strip().replace(u' ', '').split(u':',1)
col = td_list[0].strip()
val = td_list[1].strip()
# print col, val
col = jiben_column_dict[col]
values[col] = val
# if len(th_element_list) == len(td_element_list):
# col_nums = len(th_element_list)
# for i in range(col_nums):
# col_dec = th_element_list[i].text.strip()
# val = td_element_list[i].text.strip()
# if col_dec != u'':
# col = jiben_column_dict[col_dec]
# values[col] = val
values['rowkey'] = '%s_%s_%s_' % (self.cur_mc, table_id, self.cur_zch)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':tyshxy_code'] = self.xydm_if
values[family + ':zch'] = self.zch_if
values[family + ':lastupdatetime'] = get_cur_time()
values[family + ':province'] = u'安徽省'
json_list.append(values)
self.json_result[family] = json_list
# print 'jiben_values', values
def get_gu_dong(self):
"""
查询股东信息
:param param_pripid:
:param param_type:
:return:
"""
family = 'Shareholder_Info'
table_id = '04'
# self.json_result[family] = []
json_list = []
json_dict = {}
url = 'http://www.ahcredit.gov.cn/business/GDCZ.jspx?id='+self.mainID
# print 'gudongurl', url
r = self.get_request(url=url)
soup = BeautifulSoup(r.text, 'html5lib')
# soup = aa
# try:
# url = soup.find(id='invDiv').text # 此处url不是连接地址,是判断内容是否为空的参数
# except:
# url = ''
# print 'gudong_url', self.cur_time, url
# print '******gudong_soup******', soup
if soup.text.strip():
try:
title = soup.find('span').text.strip()
except:
title = ''
return
# print 'title:', title
soup = soup.find(id='paging')
# print '*******gudong*******', soup
# print 'gu_dong_turn_page', turn_page
# print 'body_tr',len(soup.select('#table2 tr a')),soup.select('#table2 tr a')
# print 'gd_tr1',soup.select('#tr1')
tr_num = soup.find_all(class_='detailsList')
if len(tr_num) >= 2:
gd_th = soup.find_all(class_='detailsList')[0].find_all('tr')[0].find_all('th')
iftr = soup.find_all(class_='detailsList')[1].find_all('tr')
# if len(iftr) > 0:
cnt = 1
thn = len(gd_th)
if thn == 4:
family = 'Partner_Info'
elif thn == 6:
family = 'DIC_Info'
elif thn == 2:
family = 'Investor_Info'
else:
family = 'Shareholder_Info'
# print 'len(th):', thn
for i in range(len(iftr)):
gd_td = iftr[i].find_all('td')
for j in range(len(gd_th)):
th = gd_th[j].text.strip()
td = gd_td[j].text.strip()
if td == u'查看':
td = gd_td[j].a.get('onclick')
# print 'gudong', td
td = re.search(r'(?<=[(]).*(?=[)])', td).group().strip("'")
detail_url = 'http://www.ahcredit.gov.cn/queryInvDetailAction.jspx?invId='+td
# print 'detail_url', detail_url
td = detail_url
self.get_gu_dong_detail(detail_url, json_dict)
# self.load_func(td) 抓取详情页内容方法,见cnt分页内容
if title == u'主管部门(出资人)信息':
json_dict[DICInfo_column_dict[th]] = td
elif thn == 4:
json_dict[hehuoren_column_dict[th]] = td
elif thn == 6:
json_dict[DICInfo_column_dict[th]] = td
elif thn == 2:
json_dict[touziren_column_dict[th]] = td
else:
json_dict[gudong_column_dict[th]] = td
json_dict['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, cnt)
json_dict[family + ':registrationno'] = self.cur_zch
json_dict[family + ':enterprisename'] = self.cur_mc
json_dict[family + ':id'] = str(cnt)
json_list.append(json_dict)
json_dict = {}
cnt += 1
turn_pageo = soup.find_all('div', recursive=False)[1].ul.find_all('li')[1].text.strip()[1:-1] #判断是否有分页,需要post分页地址,暂不处理
# print 'gudong_turn_page:', turn_pageo
turn_page = int(turn_pageo)
# 股东分页情况处理
if turn_page > 1:
# print '*'*1000
# print 'len_gudong_page', turn_page
for p in range(2, turn_page+1):
# link = 'http://www.ahcredit.gov.cn/QueryInvList.jspx?pno='+str(p)+'&mainId='+self.mainID
fkurl = 'http://www.ahcredit.gov.cn/business/QueryInvList.jspx?pno='+str(p)+'&order=0&mainId='+self.mainID
# print '***********gudongfenyelink******************', link
url = fkurl
r = self.get_request(url=url, params={})
# r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html5lib')
# print '*******gudong**fenye*****',soup
# gd_th = soup.find_all(class_='detailsList')[0].find_all('tr')[1].find_all('th')
iftr = soup.find_all(class_='detailsList')[0].find_all('tr')[1:]
# print 'pp', p
for i in range(len(iftr)):
gd_td = iftr[i].find_all('td')
# print 'mm', i
for j in range(len(gd_th)):
th = gd_th[j].text.strip()
td = gd_td[j].text.strip()
if td == u'查看':
td = gd_td[j].a.get('onclick')
# print 'gudong', td
td = re.search(r'(?<=[(]).*(?=[)])', td).group().strip("'")
detail_url = self.domain+td
# print 'detail_url', detail_url
td = detail_url
self.get_gu_dong_detail(detail_url, json_dict)
# self.load_func(td) 抓取详情页内容方法,见cnt分页内容
if title == u'主管部门(出资人)信息':
json_dict[DICInfo_column_dict[th]] = td
elif thn == 4:
json_dict[hehuoren_column_dict[th]] = td
elif thn == 6:
json_dict[DICInfo_column_dict[th]] = td
elif thn == 2:
json_dict[touziren_column_dict[th]] = td
else:
json_dict[gudong_column_dict[th]] = td
json_dict['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, cnt)
json_dict[family + ':registrationno'] = self.cur_zch
json_dict[family + ':enterprisename'] = self.cur_mc
json_dict[family + ':id'] = str(cnt)
json_list.append(json_dict)
json_dict = {}
cnt += 1
if json_list:
self.json_result[family] = json_list
# print '-,-**gudong_json_list', len(json_list), json_list
def get_gu_dong_detail(self, url, values):
"""
查询股东详情
:param param_pripid:
:param param_invid:
:return:
"""
family = 'Shareholder_Info'
table_id = '04'
# print 'gudong_detail_url',url
r = self.get_request_302(url=url, params={})
# r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html5lib')
# print '***__****gudong_detail*******',soup
detail_tb_list = soup.find_all(class_='detailsList')
# detail_th_list = ['subscripted_capital','actualpaid_capital','subscripted_method','subscripted_amount','subscripted_time','actualpaid_method','actualpaid_amount','actualpaid_time']
# detail_th_new_list = [family+':'+x for x in detail_th_list]
# print 'detail_th_new_list', detail_th_new_list
n = 0
for tr_ele in detail_tb_list:
tr_ele_list = tr_ele.find_all('tr')
if n == 0:
for tr in tr_ele_list[1:]:
col = tr.th.text
val = tr.td.text
# print 'gddetails', col, val
values[gudong_column_dict[col]] = val
else:
th_list = tr_ele_list[0].find_all('th')
if len(tr_ele_list) == 1:
for c in range(len(th_list)):
col = th_list[c].text.strip()
val = u''
values[gudong_column_dict[col]] = val
if len(tr_ele_list) > 1:
for tr in tr_ele_list[1:]:
td_list = tr.find_all('td')
for c in range(len(th_list)):
col = th_list[c].text.strip()
val = td_list[c].text.strip()
# print col,val
values[gudong_column_dict[col]] = val
n += 1
# print 'gdl_values',len(values),values
def get_bian_geng(self):
"""
查询变更信息
:param param_pripid:
:param param_type:
:return:
"""
family = 'Changed_Announcement'
table_id = '05'
# self.json_result[family] = []
json_list = []
json_dict = {}
url = 'http://www.ahcredit.gov.cn/business/BGXX.jspx?id='+self.mainID
# print 'biangeng_url', url
r = self.get_request_302(url=url, params={})
# r.encoding = 'gbk'
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html5lib')
# print '*******biangeng*******',soup
try:
tr_num = len(soup.find_all(class_='detailsList'))
except:
tr_num = 0
if tr_num > 1:
gd_th = soup.find_all(class_='detailsList')[0].find_all('th')
# print 'th_previous',cc.find(id='altDiv').find_previous_sibling().text
iftr = soup.find_all(class_='detailsList')[1].find_all('tr')
if len(iftr) > 0:
cnt = 1
for i in range(len(iftr)):
gd_td = iftr[i].find_all('td')
for j in range(len(gd_th)):
th = gd_th[j].text.strip()
td = gd_td[j].text.strip()
# print i,j,th,td
json_dict[biangeng_column_dict[th]] = td
json_dict['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, cnt)
json_dict[family + ':registrationno'] = self.cur_zch
json_dict[family + ':enterprisename'] = self.cur_mc
json_dict[family + ':id'] = str(cnt)
json_list.append(json_dict)
json_dict = {}
cnt += 1
turn_pageo = soup.find(id='altDiv2').find_all('div', recursive=False)[1].ul.find_all('li')[1].text.strip()[1:-1] #判断是否有分页,需要post分页地址,暂不处理
# print 'biangeng_turn_page:', turn_pageo
turn_page = int(turn_pageo)
if turn_page > 1:
# print '*3'*1000
# sys.exit()
# print 'biangeng_page_splitter***************'
for p in range(2, turn_page+1):
# bgurl = 'http://www.ahcredit.gov.cn/QueryAltList.jspx?pno='+str(p)+'&mainId='+self.mainID
fkurl = 'http://www.ahcredit.gov.cn/business/QueryAltList.jspx?pno='+str(p)+'&order=0&mainId='+self.mainID
# print 'biangeng_fen_ye_link', fkurl
rc = self.get_request_302(url=fkurl)
soup = BeautifulSoup(rc.text, 'lxml')
# print 'biangeng_turn_soup', soup
# gd_th = soup.find_all(class_='detailsList')[0].find_all('tr')[1].find_all('th')
iftr = soup.find_all(class_='detailsList')[1].find_all('tr')
for i in range(len(iftr)):
gd_td = soup.find_all(class_='detailsList')[1].find_all('tr')[i].find_all('td')
for j in range(len(gd_th)):
th = gd_th[j].text.strip()
td = gd_td[j].text.strip()
# print i,j,th,td
json_dict[biangeng_column_dict[th]] = td
# print '****************json_dict',json_dict
json_dict['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, cnt)
json_dict[family + ':registrationno'] = self.cur_zch
json_dict[family + ':enterprisename'] = self.cur_mc
json_dict[family + ':id'] = str(cnt)
json_list.append(json_dict)
json_dict = {}
cnt += 1
if json_list:
self.json_result[family] = json_list
# print '-,-**biangeng_json_list****', len(json_list), json_list
def get_detail(self, sop): # 北京变更详情专用, 其他省份暂时无用
row_data = []
# tables=self.driver.find_elements_by_xpath("//*[@id='tableIdStyle']/tbody")
tables=sop.find_all(id='tableIdStyle')
for t in tables:
time.sleep(1)
trs = t.find_all("tr")
bt = trs[0].text
ths = trs[1].find_all("th")
for tr in trs[2:]:
tds = tr.find_all("td")
col_nums = len(ths)
for j in range(col_nums):
col = ths[j].text.strip().replace('\n','')
td = tds[j]
val = td.text.strip()
row = col+u':'+val
# print 'row',row
row_data.append(row)
if u'变更前' in bt:
self.bgq = u';'.join(row_data)
# print 'bgq',self.bgq
elif u'变更后' in bt:
self.bgh = u';'.join(row_data)
# print 'bgh',self.bgh
row_data = []
def get_zhu_yao_ren_yuan(self):
"""
查询主要人员信息
:param param_pripid:
:param param_type:
:return:
"""
family = 'KeyPerson_Info'
table_id = '06'
# self.json_result[family] = []
json_list = []
values = {}
url = 'http://www.ahcredit.gov.cn/business/ZYRY.jspx?id='+self.mainID
# print 'zhuyaorenyuan_url', url
r = self.get_request_302(url=url, params={})
# r.encoding = 'gbk'
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html5lib')
# print '*******zhuyaorenyuan*******', soup
try:
tr_num = len(soup.find_all(class_='keyPerInfo'))
except:
tr_num = 0
# print 'zyry()*)(', tr_num
if tr_num > 0:
soup = soup.find_all(class_='keyPerInfo') # 有几个人员
# print '*******zhuyaorenyuan*******',soup
cnt = 1
for t in range(tr_num):
pson = soup[t].find_all('p')
if len(pson):
name = pson[0].text.strip()
posn = pson[1].text.strip()
# print '******', t, 'name:', name, 'position:', posn
values[zhuyaorenyuan_column_dict[u'姓名']] = name
values[zhuyaorenyuan_column_dict[u'职务']] = posn
values['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, cnt)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(cnt)
json_list.append(values)
values = {}
cnt += 1
if json_list:
# print 'zhuyaorenyuan_jsonlist:', json_list
self.json_result[family] = json_list
def get_fen_zhi_ji_gou(self):
"""
查询分支机构信息
:param param_pripid:
:param param_type:
:return:
"""
family = 'Branches'
table_id = '08'
# self.json_result[family] = []
url = 'http://www.ahcredit.gov.cn/business/FZJG.jspx?id='+self.mainID
# print 'fenzhijigou_url', url
r = self.get_request_302(url=url, params={})
# r.encoding = 'gbk'
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html5lib')
# print '*******fenzhijigou*******', soup
try:
tr_num = len(soup.find_all('p'))
except:
tr_num = 0
# print 'fenzhijigou:', tr_num
if tr_num > 1:
soup = soup.find(class_='xxwk')
# print '*******fenzhijigou*******', soup
values = {}
json_list = []
if soup.text.strip():
tr_element_list = soup.find_all(class_='fenzhixinxin')
idn = 1
for tr_element in tr_element_list:
if tr_element.text == u'':
# print 'fenzhijigou_boom_breaker'
break
td_element_list = tr_element.find_all("p")
values[fenzhijigou_column_dict[u'名称']] = td_element_list[0].text.strip()
values[fenzhijigou_column_dict[u'注册号']] = td_element_list[1].text.strip().replace(u'·', '').split(u':')[1]
values[fenzhijigou_column_dict[u'登记机关']] = td_element_list[2].text.strip().replace(u'·', '').split(u':')[1]
values['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, idn)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(idn)
json_list.append(values)
# json_fenzhijigou=json.dumps(values,ensure_ascii=False)
# print 'json_fenzhijigou',json_fenzhijigou
values = {}
idn += 1
if json_list:
self.json_result[family] = json_list
# print '-,-**fenzhijigou_json_list', len(json_list), json_list
def get_qing_suan(self):
"""
查询清算信息
:param param_pripid:
:param param_type:
:return:
"""
family = 'liquidation_Information'
table_id = '09'
# self.json_result[family] = []
values = {}
json_list = []
url = 'http://www.ahcredit.gov.cn/business/QSXX.jspx?id='+self.mainID
# print 'qingsuan_url:', url
r = self.get_request(url=url, params={})
# r.encoding = 'gbk'
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html5lib')
# print 'qingsuan', soup
if u'清算信息' in soup.text:
table = soup.find_all(class_='details')[0]
tr_list = table.find_all('tr')
try:
fzr = tr_list[0] # 清算负责人
cy = tr_list[1] # 清算组成员
fzrtd = fzr.td.text.strip()
cytd = cy.td.text.strip()
# print '****qingsuanyisen**'
if fzrtd or cytd:
# print u'清算有内容'
self.info(u'清算有内容')
values[qingsuan_column_dict[u'清算负责人']] = fzrtd
values[qingsuan_column_dict[u'清算组成员']] = cytd
values['rowkey'] = '%s_%s_%s_%s_' % (self.cur_mc, table_id, self.cur_zch, self.today)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
json_list.append(values)
if json_list:
# print 'qingsuan', json_list
self.json_result[family] = json_list
except:
return
def get_dong_chan_di_ya(self):
"""
查询动产抵押信息
:param param_pripid:
:param param_type:
:return:
"""
family = 'Chattel_Mortgage'
table_id = '11'
# self.json_result[family] = []
values = {}
json_list = []
url = 'http://www.ahcredit.gov.cn/business/DCDY.jspx?id='+self.mainID
# print 'dcdyurl:', url
try:
r = self.get_request(url=url, params={})
except:
r = self.get_request_302(url=url, params={})
# r.encoding = 'gbk'
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html5lib')
# print '*******dongchandiya*******', soup
try:
tr_num = len(soup.find_all('p'))
except:
tr_num = 0
# print 'fenzhijigou:', tr_num
if tr_num:
soup = soup.find(id='mortDiv2')
row_cnt = len(soup.find_all(class_="detailsList")[1].find_all('tr'))
if row_cnt > 0:
# print 'come_on_bb_not_OK'
tr_element_list = soup.find_all(class_="detailsList")[1].find_all('tr')
th_element_list = soup.find_all(class_="detailsList")[0].find_all('tr')[0].find_all('th')
idn = 1
for tr_element in tr_element_list:
td_element_list = tr_element.find_all('td')
col_nums = len(th_element_list)
# print '*****', col_nums
for j in range(col_nums):
col_dec = th_element_list[j].text.strip().replace('\n','')
col = dongchandiyadengji_column_dict[col_dec]
td = td_element_list[j]
val = td.text.strip()
if val == u'查看':
mex = td.a.get('onclick')
td = re.search(r'(?<=[(]).*(?=[)])', mex).group().replace("'", "")
# urlsample = 'http://www.ahcredit.gov.cn/business/mortInfoDetail.jspx?id=400000000122041524'
# link = self.domain+td
link = 'http://www.ahcredit.gov.cn/business/mortInfoDetail.jspx?id='+td
# print u'动产抵押详情', link
values[col] = link
else:
values[col] = val
# values['RegistrationNo']=self.cur_code
# values['EnterpriseName']=self.org_name
# values['rowkey'] = values['EnterpriseName']+'_11_'+ values['RegistrationNo']+'_'+str(id)
values['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, idn)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(idn)
json_list.append(values)
# json_dongchandiyadengji=json.dumps(values,ensure_ascii=False)
# print 'json_dongchandiyadengji',json_dongchandiyadengji
values = {}
idn += 1
if json_list:
# print '-,-**dongchandiya_json_list',len(json_list),json_list
self.json_result[family] = json_list
def get_gu_quan_chu_zhi(self):
"""
查询股权出置信息
:param param_pripid:
:param param_type:
:return:
"""
family = 'Equity_Pledge'
table_id = '12'
# self.json_result[family] = []
json_list = []
values = {}
url = 'http://www.ahcredit.gov.cn/business/GQCZ.jspx?id='+self.mainID
# print 'gqczurl:', url
try:
r = self.get_request(url=url, params={})
except:
r = self.get_request_302(url=url, params={})
# r.encoding = 'gbk'
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html5lib')
# print '*******guquanchuzhi*******', soup
try:
tr_num = len(soup.find_all('p'))
except:
tr_num = 0
# print 'fenzhijigou:', tr_num
if tr_num:
# print '*******guquanchuzhi*******',soup
soup = soup.find(id='pledgeDiv2')
table_element = soup.find_all(class_="detailsList")
row_cnt = len(soup.find_all(class_="detailsList")[1].find_all('tr'))
if row_cnt > 0:
tr_element_list = soup.find_all(class_="detailsList")[1].find_all('tr')
th_element_list = soup.find_all(class_="detailsList")[0].find_all('tr')[0].find_all('th')
idn = 1
for tr_element in tr_element_list:
td_element_list = tr_element.find_all('td')
col_nums = len(th_element_list)
for j in range(col_nums):
col_dec = th_element_list[j].text.strip().replace('\n','')
# print 'col_dec',col_dec
if col_dec == u'证照/证件号码' and th_element_list[j-1].text.strip().replace('\n','') == u'出质人':
# print '**',col_dec
col = guquanchuzhidengji_column_dict[col_dec]
elif col_dec == u'证照/证件号码' and th_element_list[j-1].text.strip().replace('\n','') == u'质权人':
# print '***',col_dec
col = guquanchuzhidengji_column_dict[u'证照/证件号码1']
else:
col = guquanchuzhidengji_column_dict[col_dec]
td = td_element_list[j]
val = td.text.strip()
if val == u'查看':
mex = td.a.get('onclick')
td = re.search(r'(?<=[(]).*(?=[)])', mex).group().replace("'", "")
# link = self.domain+td
link = 'http://www.ahcredit.gov.cn/business/altPleInfo.jspx?pleId='+td
# print 'gqcz_link', link
values[col] = link
# print u'股权出质详情', link
else:
values[col] = val
# values['RegistrationNo']=self.cur_code
# values['EnterpriseName']=self.org_name
# values['rowkey'] = values['EnterpriseName']+'_12_'+ values['RegistrationNo']+'_'+str(id)
values['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, idn)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(idn)
json_list.append(values)
# json_guquanchuzhidengji=json.dumps(values,ensure_ascii=False)
# print 'json_guquanchuzhidengji',json_guquanchuzhidengji
values = {}
idn += 1
if len(table_element) == 3:
turn_page = table_element[2].find_all('a')
# if len(turn_page) > 1:
# print u'股权出质有分页'
if json_list:
self.json_result[family] = json_list
# print '-,-**guquanchuzhi_json_list**',len(json_list),json_list
def get_xing_zheng_chu_fa(self):
"""
查询行政处罚信息
:param param_pripid:
:param param_type:
:return:
"""
family = 'Administrative_Penalty'
table_id = '13'
# self.json_result[family] = []
values = {}
json_list = []
url = 'http://www.ahcredit.gov.cn/business/XZCF.jspx?id='+self.mainID
# print 'xzcfurl:', url
try:
r = self.get_request(url=url, params={})
except:
r = self.get_request_302(url=url, params={})
# r.encoding = 'gbk'
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html5lib')
# print '*******xingzhengchufa*******', soup
try:
tr_num = len(soup.find_all('p'))
except:
tr_num = 0
# print 'xingzhengchufa:', tr_num
if tr_num:
# print '*******xingzhengchufa*******',soup
soup = soup.find(id='punDiv2')
table_element = soup.find_all(class_='detailsList')
row_cnt = len(soup.find_all(class_="detailsList")[1].find_all('tr'))
if row_cnt > 0:
tr_element_list = soup.find_all(class_="detailsList")[1].find_all('tr')
th_element_list = soup.find_all(class_="detailsList")[0].find_all('tr')[0].find_all('th')
idn = 1
for tr_element in tr_element_list:
td_element_list = tr_element.find_all('td')
col_nums = len(th_element_list)
for j in range(col_nums):
col_dec = th_element_list[j].text.strip().replace('\n','')
col = xingzhengchufa_column_dict[col_dec]
td = td_element_list[j]
val = td.text.strip()
if val == u'查看':
mex = td.a.get('onclick')
td = re.search(r'(?<=[(]).*(?=[)])', mex).group().replace("'", "")
# val = self.domain+td
val = 'http://www.ahcredit.gov.cn/business/punishInfoDetail.jspx?id='+td
# print 'xingzhengchufa__val', val
values[col] = val
# values['RegistrationNo']=self.cur_code
# values['EnterpriseName']=self.org_name
# values['rowkey'] = values['EnterpriseName']+'_13_'+ values['RegistrationNo']+'_'+str(id)
values['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, idn)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(idn)
json_list.append(values)
# json_xingzhengchufa=json.dumps(values,ensure_ascii=False)
# print 'json_xingzhengchufa',json_xingzhengchufa
values = {}
idn += 1
if len(table_element) == 3:
turn_page = table_element[2].find_all('a')
# if len(turn_page) > 1:
# print u'行政处罚有分页'
if json_list:
self.json_result[family] = json_list
# print '-,-**xingzhengchufa_jsonlist***', len(json_list), json_list
def get_jing_ying_yi_chang(self):
"""
查询经营异常信息
:param param_pripid:
:param param_type:
:return:
"""
family = 'Business_Abnormal'
table_id = '14'
# self.json_result[family] = []
values = {}
json_list = []
url = 'http://www.ahcredit.gov.cn/business/JYYC.jspx?id='+self.mainID
# print 'jyycurl:', url
try:
r = self.get_request(url=url, params={})
except:
r = self.get_request_302(url=url, params={})
# r.encoding = 'gbk'
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html5lib')
# print '*******jingyingyichang*******', soup
try:
tr_num = len(soup.find_all('p'))
except:
tr_num = 0
# print 'jingyingyichang:', tr_num
if tr_num:
soup = soup.find(id='excDiv2')
table_element = soup.find_all(class_='detailsList')
row_cnt = len(soup.find_all(class_="detailsList")[1].find_all('tr'))
if row_cnt > 0:
idn = 1
tr_element_list = soup.find_all(class_="detailsList")[1].find_all('tr')
th_element_list = soup.find_all(class_="detailsList")[0].find_all('tr')[0].find_all('th')
for tr_element in tr_element_list:
td_element_list = tr_element.find_all('td')
col_nums = len(th_element_list)
for j in range(col_nums):
col_dec = th_element_list[j].text.strip().replace('\n','')
# print 'col_dec',col_dec
col = jingyingyichang_column_dict[col_dec]
td = td_element_list[j]
val = td.text.strip().replace('\t','').replace('\n','')
values[col] = val
# print 'iii',col,val
# values['RegistrationNo']=self.cur_code
# values['EnterpriseName']=self.org_name
# values['rowkey'] = values['EnterpriseName']+'_14_'+ values['RegistrationNo']+'_'+str(id)
values['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, idn)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(idn)
json_list.append(values)
# json_jingyingyichang=json.dumps(values,ensure_ascii=False)
# print 'json_jingyingyichang',json_jingyingyichang
values = {}
idn += 1
if len(table_element) == 3:
turn_page = table_element[2].find_all('a')
# if len(turn_page) > 1:
# print u'经营异常有分页'
if json_list:
self.json_result[family] = json_list
# print '-,-**jingyingyichang',json_list
def get_yan_zhong_wei_fa(self):
"""
查询严重违法信息
:param param_pripid:
:param param_type:
:return:
"""
family = 'Serious_Violations'
table_id = '15'
# self.json_result[family] = []
values = {}
json_list = []
url = 'http://www.ahcredit.gov.cn/business/YZWF.jspx?id='+self.mainID
# print 'yzwfurl:', url
try:
r = self.get_request(url=url, params={})
except:
r = self.get_request_302(url=url, params={})
# r.encoding = 'gbk'
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html5lib')
# print '*******yanzhongweifa*******', soup
try:
tr_num = len(soup.find_all('p'))
except:
tr_num = 0
# print 'yanzhongweifa:', tr_num
if tr_num:
soup = soup.find(id='serillDiv2')
table_element = soup.find_all(class_='detailsList')
row_cnt = len(soup.find_all(class_="detailsList")[1].find_all('tr'))
if row_cnt > 0:
tr_element_list = soup.find_all(class_="detailsList")[1].find_all('tr')
th_element_list = soup.find_all(class_="detailsList")[0].find_all('tr')[0].find_all('th')
idn = 1
for tr_element in tr_element_list:
td_element_list = tr_element.find_all('td')
col_nums = len(th_element_list)
for j in range(col_nums):
col_dec = th_element_list[j].text.strip().replace('\n','')
col = yanzhongweifa_column_dict[col_dec]
td = td_element_list[j]
val = td.text.strip()
values[col]=val
# values['RegistrationNo']=self.cur_code
# values['EnterpriseName']=self.org_name
# values['rowkey'] = values['EnterpriseName']+'_15_'+ values['RegistrationNo']+'_'+str(id)
values['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, idn)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(idn)
json_list.append(values)
# json_yanzhongweifa=json.dumps(values,ensure_ascii=False)
# print 'json_yanzhongweifa',json_yanzhongweifa
values = {}
idn += 1
if len(table_element) == 3:
turn_page = table_element[2].find_all('a')
# if len(turn_page) > 1:
# print u'严重违法有分页'
if json_list:
self.json_result[family] = json_list
# print '-,-**yanzhongweifa_json_list', len(json_list), json_list
def get_chou_cha_jian_cha(self):
"""
查询抽查检查信息
:param param_pripid:
:param param_type:
:return:
"""
family = 'Spot_Check'
table_id = '16'
# self.json_result[family] = []
values = {}
json_list = []
url = 'http://www.ahcredit.gov.cn/business/CCJC.jspx?id='+self.mainID
# print 'ccjcurl:', url
try:
r = self.get_request(url=url, params={})
except:
r = self.get_request_302(url=url, params={})
# r.encoding = 'gbk'
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html5lib')
# print '*******chouchajiancha*******', soup
try:
tr_num = len(soup.find_all('p'))
except:
tr_num = 0
# print 'chouchajiancha:', tr_num
if tr_num:
soup = soup.find(id='spotCheck2')
try:
row_cnt = len(soup.find_all(class_='detailsList')[1].find_all('tr'))
except:
row_cnt = 0
# print 'ccjc_row_cnt',row_cnt
if row_cnt > 0:
# print '*****mmmm****'
table_element = soup.find_all(class_='detailsList')
tr_element_list = soup.find_all(class_='detailsList')[1].find_all('tr')
th_element_list = soup.find_all(class_='detailsList')[0].find_all('tr')[0].find_all('th')
idn = 1
for tr_element in tr_element_list:
td_element_list = tr_element.find_all('td')
col_nums = len(th_element_list)
for j in range(col_nums):
col_dec = th_element_list[j].text.strip().replace('\n','')
col = chouchajiancha_column_dict[col_dec]
td = td_element_list[j]
val = td.text.strip()
values[col] = val
# values['RegistrationNo']=self.cur_code
# values['EnterpriseName']=self.org_name
# values['rowkey'] = values['EnterpriseName']+'_16_'+ values['RegistrationNo']+'_'+str(id)
values['rowkey'] = '%s_%s_%s_%s%d' % (self.cur_mc, table_id, self.cur_zch, self.today, idn)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(idn)
json_list.append(values)
# json_chouchajiancha=json.dumps(values,ensure_ascii=False)
# print 'json_chouchajiancha',json_chouchajiancha
values = {}
idn += 1
if len(table_element) == 3:
turn_page = table_element[2].find_all('a')
# if len(turn_page) > 1:
# print u'抽查检查有分页'
if json_list:
self.json_result[family] = json_list
# print '-,-**chouchajiancha', len(json_list), json_list
def get_nian_bao(self):
dic = {} # 年份容器
self.nbjb_list = [] # 年报基本信息
self.nbzczk_list = [] # 年报资产状况
self.nbdwdb_list = []
self.nbgdcz_list = []
self.nbgqbg_list = []
self.nbwz_list = []
self.nbxg_list = []
self.nbdwtz_list = []
url = 'http://www.ahcredit.gov.cn/yearExm/QYNBXX.jspx?ram='+str(random.random())+'&id='+self.mainID
# print 'nianbaourl:', url
try:
r = self.get_request(url=url)
except:
self.info(u'该企业暂无年度报告信息')
return
soup = BeautifulSoup(r.text, 'html5lib')
soup = soup.find(class_='panel_state_content')
# print '****niaobao_soup****', soup
div_list = soup.find_all('div', recursive=False)
year_opt = div_list[0].select('option')[1]
nball = div_list[0].select('option')[1:]
for yb in nball:
yr = yb.text.strip()[:4]
yid = yb.get('value').strip()
# print '**', yr, '*', yid
dic[yr] = yid
# for y in year_opt:
# print 'year', year_opt
self.y = year_opt.text.strip()[:4]
# print 'y', self.y
cnt = 0
for div in div_list[1:]:
cnt += 1
dn = div.find_all('span')[0].text.strip()
# print cnt, dn
if dn == u'基本信息':
self.load_nianbaojiben(div)
elif dn == u'网站或网店信息':
self.load_nianbaowangzhan(div)
elif dn == u'股东及出资信息':
self.load_nianbaogudongchuzi(div)
elif dn == u'对外投资信息':
self.load_nianbaoduiwaitouzi(div)
elif dn == u'行政许可情况': # 个体工商户出现 e.g.濉溪县界沟鸿星尔克专卖
self.load_nianbaoxingzhengxuke(div)
elif dn == u'资产状况信息': # 个体工商户出现
self.load_nianbaozichangeti(div)
elif dn == u'分支机构情况': # 个体工商户出现 e.g. 青阳县五梅经济林农民专业合作社
self.load_nianbaofenzhijigou(div)
elif dn == u'生产经营情况': # 与企业资产状况信息区别? e.g. 安徽修正堂药房连锁经营有限公司阜南县任庙店
self.load_nianbaoshengchanjingying(div)
elif dn == u'企业资产状况信息':
self.load_nianbaozichanzhuangkuang(div)
elif dn == u'对外提供保证担保信息':
self.load_nianbaoduiwaidanbao(div)
elif dn == u'股权变更信息':
self.load_nianbaoguquanbiangeng(div)
elif dn == u'修改记录':
self.load_nianbaoxiugai(div)
else:
# print u'未知区域div,看看是什么', dn
self.info(u'未知区域div,看看是什么'+ dn)
# print 'dic_before:', dic
if len(dic) > 1:
dic.pop(self.y)
# print 'dic_after:', dic
for y in dic.keys():
urln = 'http://www.ahcredit.gov.cn/yearExm/QYNBXX.jspx?ram='+str(random.random())+'&id='+self.mainID+'&yearId='+dic[y]
# print 'url', y, urln
self.y = y
try:
r = self.get_request(url=urln)
except:
self.info(u'该企业年度报告信息无法打开')
break
soup = BeautifulSoup(r.text, 'html5lib')
soup = soup.find(class_='panel_state_content')
# print '****niaobao_soup****', soup
div_list = soup.find_all('div', recursive=False)
year_opt = div_list[0].select('option')[1]
nball = div_list[0].select('option')[1:]
cnt = 0
for div in div_list[1:]:
cnt += 1
dn = div.find_all('span')[0].text.strip()
# print cnt, dn
if dn == u'基本信息':
self.load_nianbaojiben(div)
elif dn == u'网站或网店信息':
self.load_nianbaowangzhan(div)
elif dn == u'股东及出资信息':
self.load_nianbaogudongchuzi(div)
elif dn == u'对外投资信息':
self.load_nianbaoduiwaitouzi(div)
elif dn == u'行政许可情况': # 个体工商户出现 e.g.濉溪县界沟鸿星尔克专卖
self.load_nianbaoxingzhengxuke(div)
elif dn == u'资产状况信息': # 个体工商户出现
self.load_nianbaozichangeti(div)
elif dn == u'分支机构情况': # 个体工商户出现 e.g. 青阳县五梅经济林农民专业合作社
self.load_nianbaofenzhijigou(div)
elif dn == u'生产经营情况': # 与企业资产状况信息区别? e.g. 安徽修正堂药房连锁经营有限公司阜南县任庙店
self.load_nianbaoshengchanjingying(div)
elif dn == u'企业资产状况信息':
self.load_nianbaozichanzhuangkuang(div)
elif dn == u'对外提供保证担保信息':
self.load_nianbaoduiwaidanbao(div)
elif dn == u'股权变更信息':
self.load_nianbaoguquanbiangeng(div)
elif dn == u'修改记录':
self.load_nianbaoxiugai(div)
else:
# print u'未知区域div,看看是什么', dn
self.info(u'未知区域div,看看是什么' + dn)
if self.nbjb_list:
self.json_result['report_base'] = self.nbjb_list # 年报基本信息
if self.nbzczk_list:
self.json_result['industry_status'] = self.nbzczk_list # 年报资产状况
if self.nbdwdb_list:
self.json_result['guarantee'] = self.nbdwdb_list
if self.nbgdcz_list:
self.json_result['enterprise_shareholder'] = self.nbgdcz_list
if self.nbgqbg_list:
self.json_result['equity_transfer'] = self.nbgqbg_list
if self.nbwz_list:
self.json_result['web_site'] = self.nbwz_list
if self.nbxg_list:
self.json_result['modify'] = self.nbxg_list
if self.nbdwtz_list:
self.json_result['investment'] = self.nbdwtz_list
def load_nianbaojiben(self, soup):
family = 'report_base'
table_id = '40'
tr_element_list = soup.find_all('tr')#(".//*[@id='jbxx']/table/tbody/tr")
values = {}
json_list = []
for tr_element in tr_element_list[1:]:
# th_element_list = tr_element.find_all('th')
td_element_list = tr_element.find_all('td')
for td in td_element_list:
if td.text.strip():
td_list = td.text.replace(u'·', '').replace(u' ', '').strip().replace(u' ', '').split(u':',1)
col = td_list[0].strip()
val = td_list[1].strip()
# print col, val
col = qiyenianbaojiben_column_dict[col]
values[col] = val
values['rowkey'] = '%s_%s_%s_' %(self.cur_mc, self.y, table_id)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
json_list.append(values)
self.nbjb_list.append(values)
# if json_list:
# # print 'nianbaojibenxinxi', json_list
# self.json_result[family] = json_list
def load_nianbaowangzhan(self, soup):
family = 'web_site'
table_id = '41'
values = {}
json_list = []
try:
tr_num = len(soup.find_all(class_='detailsList'))
except:
tr_num = 0
# print 'lentr', tr_num
if tr_num > 1:
gd_th = soup.find_all(class_='detailsList')[0].find_all('th')
# print 'th_previous',cc.find(id='altDiv').find_previous_sibling().text
try:
iftr = soup.find_all(class_='detailsList')[1].find_all('tr')
except:
iftr = []
if len(iftr) > 0:
cnt = 1
for i in range(len(iftr)):
if iftr[i].text.strip():
gd_td = iftr[i].find_all('td')
for j in range(len(gd_th)):
th = gd_th[j].text.strip()
td = gd_td[j].text.strip()
# print i,j,th,td
values[qiyenianbaowangzhan_column_dict[th]] = td
values['rowkey'] = '%s_%s_%s_%d' %(self.cur_mc, self.y, table_id, cnt)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(cnt)
json_list.append(values)
self.nbwz_list.append(values)
values = {}
cnt += 1
# if json_list:
# # print 'nianbaowangzhan', json_list
# self.json_result[family] = json_list
def load_nianbaogudongchuzi(self, soup):
family = 'enterprise_shareholder'
table_id = '42'
values = {}
json_list = []
try:
tr_num = len(soup.find_all(class_='detailsList'))
except:
tr_num = 0
if tr_num > 1:
gd_th = soup.find_all(class_='detailsList')[0].find_all('th')
# print 'th_previous',cc.find(id='altDiv').find_previous_sibling().text
try:
iftr = soup.find_all(class_='detailsList')[1].find_all('tr')
except:
iftr = []
if len(iftr) > 0:
cnt = 1
for i in range(len(iftr)):
if iftr[i].text.strip():
gd_td = iftr[i].find_all('td')
for j in range(len(gd_th)):
th = gd_th[j].text.strip()
td = gd_td[j].text.strip()
# print i,j,th,td
values[qiyenianbaogudong_column_dict[th]] = td
values['rowkey'] = '%s_%s_%s_%d' %(self.cur_mc, self.y, table_id, cnt)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(cnt)
json_list.append(values)
self.nbgdcz_list.append(values)
values = {}
cnt += 1
# if json_list:
# # print 'nianbaogudongchuzi', json_list
# self.json_result[family] = json_list
def load_nianbaoxingzhengxuke(self, soup):
# 个体工商户年报·行政许可暂缺字段
pass
def load_nianbaozichangeti(self, soup):
# 个体工商户年报·资产状况暂缺字段
pass
def load_nianbaofenzhijigou(self, soup):
# 个体工商户年报·分支机构情况暂缺字段
pass
def load_nianbaoshengchanjingying(self, soup):
# 年报生产经营情况
pass
def load_nianbaoduiwaitouzi(self, soup):
family = 'investment'
table_id = '47'
values = {}
json_list = []
try:
tr_num = len(soup.find_all(class_='detailsList'))
except:
tr_num = 0
if tr_num > 1:
gd_th = soup.find_all(class_='detailsList')[0].find_all('th')
# print 'th_previous',cc.find(id='altDiv').find_previous_sibling().text
try:
iftr = soup.find_all(class_='detailsList')[1].find_all('tr')
except:
iftr = []
if len(iftr) > 0:
cnt = 1
for i in range(len(iftr)):
if iftr[i].text.strip():
gd_td = iftr[i].find_all('td')
for j in range(len(gd_th)):
th = gd_th[j].text.strip()
td = gd_td[j].text.strip()
# print i,j,th,td
values[qiyenianbaoduiwaitouzi_column_dict[th]] = td
values['rowkey'] = '%s_%s_%s_%d' %(self.cur_mc, self.y, table_id, cnt)
values[family + ':id'] = str(cnt)
json_list.append(values)
self.nbdwtz_list.append(values)
values = {}
cnt += 1
# if json_list:
# # print 'nianbaoduiwaitouzi', json_list
# self.json_result[family] = json_list
def load_nianbaozichanzhuangkuang(self, soup):
family = 'industry_status'
table_id = '43'
tr_element_list = soup.find_all("tr")
values = {}
json_list = []
for tr_element in tr_element_list[1:]:
# th_element_list = tr_element.find_all('th')
td_element_list = tr_element.find_all('td')
if len(td_element_list) > 0:
col_nums = len(td_element_list)
for i in range(col_nums/2):
col = td_element_list[i*2].get_text().strip().replace('\n','')
val = td_element_list[i*2+1].get_text().strip().replace('\n','')
if col != u'':
values[qiyenianbaozichanzhuangkuang_column_dict[col]] = val
# print col,val
values['rowkey'] = '%s_%s_%s_' %(self.cur_mc, self.y, table_id)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
json_list.append(values)
self.nbzczk_list.append(values)
# if json_list:
# # print 'json_nianbaozichan', json_list
# self.json_result[family] = json_list
def load_nianbaoduiwaidanbao(self, soup):
family = 'guarantee'
table_id = '44'
values = {}
json_list = []
try:
tr_num = len(soup.find_all(class_='detailsList'))
except:
tr_num = 0
if tr_num > 1:
gd_th = soup.find_all(class_='detailsList')[0].find_all('th')
# print 'th_previous',cc.find(id='altDiv').find_previous_sibling().text
try:
iftr = soup.find_all(class_='detailsList')[1].find_all('tr')
except:
iftr = []
if len(iftr) > 0:
cnt = 1
for i in range(len(iftr)):
if iftr[i].text.strip():
gd_td = iftr[i].find_all('td')
for j in range(len(gd_th)):
th = gd_th[j].text.strip()
td = gd_td[j].text.strip()
# print i,j,th,td
values[qiyenianbaoduiwaidanbao_column_dict[th]] = td
values['rowkey'] = '%s_%s_%s_%d' %(self.cur_mc, self.y, table_id, cnt)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(cnt)
json_list.append(values)
self.nbdwdb_list.append(values)
values = {}
cnt += 1
# if json_list:
# # print 'nianbaoduiwaidanbao', json_list
# self.json_result[family] = json_list
def load_nianbaoguquanbiangeng(self, soup):
family = 'equity_transfer'
table_id = '45'
values = {}
json_list = []
try:
tr_num = len(soup.find_all(class_='detailsList'))
except:
tr_num = 0
if tr_num > 1:
gd_th = soup.find_all(class_='detailsList')[0].find_all('th')
# print 'th_previous',cc.find(id='altDiv').find_previous_sibling().text
try:
iftr = soup.find_all(class_='detailsList')[1].find_all('tr')
except:
iftr = []
if len(iftr) > 0:
cnt = 1
for i in range(len(iftr)):
if iftr[i].text.strip():
gd_td = iftr[i].find_all('td')
for j in range(len(gd_th)):
th = gd_th[j].text.strip()
td = gd_td[j].text.strip()
# print i,j,th,td
values[qiyenianbaoguquanbiangeng_column_dict[th]] = td
values['rowkey'] = '%s_%s_%s_%d' %(self.cur_mc, self.y, table_id, cnt)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(cnt)
json_list.append(values)
self.nbgqbg_list.append(values)
values = {}
cnt += 1
# if json_list:
# # print 'nianbaoguquanbiangeng', json_list
# self.json_result[family] = json_list
def load_nianbaoxiugai(self, soup):
family = 'modify'
table_id = '46'
values = {}
json_list = []
try:
tr_num = len(soup.find_all(class_='detailsList'))
except:
tr_num = 0
if tr_num > 1:
gd_th = soup.find_all(class_='detailsList')[0].find_all('th')
# print 'th_previous',cc.find(id='altDiv').find_previous_sibling().text
try:
iftr = soup.find_all(class_='detailsList')[1].find_all('tr')
except:
iftr = []
if len(iftr) > 0:
cnt = 1
for i in range(len(iftr)):
if iftr[i].text.strip():
gd_td = iftr[i].find_all('td')
for j in range(len(gd_th)):
th = gd_th[j].text.strip()
td = gd_td[j].text.strip()
# print i,j,th,td
values[qiyenianbaoxiugaijilu_column_dict[th]] = td
values['rowkey'] = '%s_%s_%s_%d' %(self.cur_mc, self.y, table_id, cnt)
values[family + ':registrationno'] = self.cur_zch
values[family + ':enterprisename'] = self.cur_mc
values[family + ':id'] = str(cnt)
json_list.append(values)
self.nbxg_list.append(values)
values = {}
cnt += 1
# if json_list:
# # print 'nianbaoxiugai', json_list
# self.json_result[family] = json_list
def get_request_302(self, url, t=0, **kwargs):
"""
手动处理包含302的请求
:param url:
:param t:
:return:
"""
try:
self.get_lock_id()
# print self.lock_id
for i in range(10):
if self.use_proxy:
self.headers['Proxy-Authorization'] = self.proxy_config.get_auth_header(lock_id=self.lock_id, release_id=self.release_id)
r = self.session.get(url=url, headers=self.headers, allow_redirects=False, timeout=self.timeout, **kwargs)
if r.status_code != 200:
if 300 <= r.status_code < 400:
self.release_id = '0'
# print '302a', url,r.headers
if i == 0:
urlo = url
protocal, addr = urllib.splittype(url)
# print 'protocal', protocal, 'addr', addr
# url = protocal + '://' + urllib.splithost(addr)[0] + r.headers['Location']
url = urlo + '&' + r.headers['Set-Cookie']
# print '302b', url
continue
elif self.province in (u'浙江省', u'北京市') and r.status_code == 504:
del self.session
self.session = requests.session()
self.session.proxies = self.proxy_config.get_proxy()
raise Exception(u'504错误')
elif r.status_code == 403:
if self.use_proxy:
if self.lock_id != '0':
self.proxy_config.release_lock_id(self.lock_id)
self.lock_id = self.proxy_config.get_lock_id()
self.release_id = self.lock_id
else:
raise Exception(u'IP被封')
raise StatusCodeException(u'错误的响应代码 -> %d' % r.status_code)
else:
if self.release_id != '0':
self.release_id = '0'
return r
except (ChunkedEncodingError, StatusCodeException, ReadTimeout, ConnectTimeout, ProxyError, ConnectionError) as e:
if t == 5:
raise e
else:
return self.get_request_302(url, t+1, **kwargs)
def post_request(self, url, t=0, **kwargs):
"""
发送post请求,包含添加代理,锁定ip与重试机制
:param url: 请求的url
:param t: 重试次数
:return:
"""
try:
if 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
if 'headers' not in kwargs:
kwargs['headers'] = self.headers
if self.use_proxy:
kwargs['headers']['Proxy-Authorization'] = self.proxy_config.get_auth_header(lock_id=self.lock_id, release_id=self.release_id)
r = self.session.post(url=url, **kwargs)
# print r.status_code,r.headers,r.text
if r.status_code != 200:
self.info(u'错误的响应代码 -> %d\n%s' % (r.status_code, url))
if self.province in (u'浙江省', u'北京市') and r.status_code == 504:
del self.session
self.session = requests.session()
self.session.proxies = self.proxy_config.get_proxy()
raise Exception(u'504错误')
if r.status_code == 403:
if self.use_proxy:
if self.lock_id != '0':
self.proxy_config.release_lock_id(self.lock_id)
self.lock_id = self.proxy_config.get_lock_id()
self.release_id = self.lock_id
else:
raise Exception(u'IP被封')
raise StatusCodeException(u'错误的响应代码 -> %d\n%s' % (r.status_code, url))
else:
if self.release_id != '0':
self.release_id = '0'
return r
except (ChunkedEncodingError, StatusCodeException, ReadTimeout, ConnectTimeout, ProxyError, ConnectionError) as e:
if t == 15:
raise e
else:
return self.post_request(url, t+1, **kwargs)
def get_args():
args = dict()
for arg in sys.argv:
kv = arg.split('=')
if kv[0] == 'companyName':
args['companyName'] = kv[1].decode(sys.stdin.encoding, 'ignore')
elif kv[0] == 'taskId':
args['taskId'] = kv[1].decode(sys.stdin.encoding, 'ignore')
args['taskId'] = kv[1].decode(sys.stdin.encoding, 'ignore')
elif kv[0] == 'accountId':
args['accountId'] = kv[1].decode(sys.stdin.encoding, 'ignore')
return args
if __name__ == '__main__':
args_dict = get_args()
searcher = AnHui()
searcher.submit_search_request(u"安徽海德石油化工有限公司") # 安徽省茶叶公司蚌埠市批发站")#安徽海德石油化工有限公司") #
#安徽古酒业有限公司")#安徽华隆塑料有限责任公司")#安徽宝庭门业有限公司")
#合肥浍溪新能源汽车技术有限公司")#颍上县艺之家装饰工程有限公司")#霍山县工艺厂")#芜湖歌斐证捷投资中心(有限合伙)")
#阜阳市物资回收总公司第一经营部")#肥西合宴福酒店(普通合伙)")#合肥钢铁集团有限公司")#泾县月亮湾吊车租赁部")#亳州市元一置业有限公司")芜湖县城北友谊印刷厂
# searcher.get_tag_a_from_page(u"银川塔木金商贸有限公司")
# searcher.parse_detail(1)
# rst = searcher.parse_detail(1)
# print 'rst',rst
# searcher.get_credit_ticket()
# print searcher.credit_ticket
# print json.dumps(args_dict, ensure_ascii=False)
# searcher = LiaoNing()
# searcher.submit_search_request(keyword=args_dict['companyName'], account_id=args_dict['accountId'], task_id=args_dict['taskId'])
# p, t = '210200000011992092800017', '6210'
# p, t = '21060200002200908053570X', '1151'
# print searcher.get_gu_dong_detail('210200000011992092800017', '754198044')
# pattern = re.compile("\s")
# print pattern.sub('', '12 434 5')
| [
"[email protected]"
] | |
74dc0695aadeb764d830515c0fcfee9c3c7fce09 | 5f4e13201d4c5b7edc8dbbda289380682a187bec | /deps/scikit-image/doc/examples/filters/plot_deconvolution.py | e934221396c76735c9652da0a75a70f6b0ab65a3 | [
"MIT",
"BSD-3-Clause"
] | permissive | intellivoid/CoffeeHousePy | 92f4fb344de757837c3d3da05cb5513e90408039 | 57c453625239f28da88b88ddd0ae5f1ecdd4de3c | refs/heads/master | 2023-02-23T14:32:01.606630 | 2021-01-28T02:57:10 | 2021-01-28T02:57:10 | 324,419,067 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | """
=====================
Image Deconvolution
=====================
In this example, we deconvolve an image using Richardson-Lucy
deconvolution algorithm ([1]_, [2]_).
The algorithm is based on a PSF (Point Spread Function),
where PSF is described as the impulse response of the
optical system. The blurred image is sharpened through a number of
iterations, which needs to be hand-tuned.
.. [1] William Hadley Richardson, "Bayesian-Based Iterative
Method of Image Restoration",
J. Opt. Soc. Am. A 27, 1593-1607 (1972), :DOI:`10.1364/JOSA.62.000055`
.. [2] https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import convolve2d as conv2
from skimage import color, data, restoration
astro = color.rgb2gray(data.astronaut())
psf = np.ones((5, 5)) / 25
astro = conv2(astro, psf, 'same')
# Add Noise to Image
astro_noisy = astro.copy()
astro_noisy += (np.random.poisson(lam=25, size=astro.shape) - 10) / 255.
# Restore Image using Richardson-Lucy algorithm
deconvolved_RL = restoration.richardson_lucy(astro_noisy, psf, iterations=30)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(8, 5))
plt.gray()
for a in (ax[0], ax[1], ax[2]):
a.axis('off')
ax[0].imshow(astro)
ax[0].set_title('Original Data')
ax[1].imshow(astro_noisy)
ax[1].set_title('Noisy data')
ax[2].imshow(deconvolved_RL, vmin=astro_noisy.min(), vmax=astro_noisy.max())
ax[2].set_title('Restoration using\nRichardson-Lucy')
fig.subplots_adjust(wspace=0.02, hspace=0.2,
top=0.9, bottom=0.05, left=0, right=1)
plt.show()
| [
"[email protected]"
] | |
739a057d4d441fd18cf2d851c70c964bdf3dd74e | 9f1b8a1ada57198e2a06d88ddcdc0eda0c683df7 | /submission - Homework1/ALEXANDER T KOHLROSER_2240_assignsubmission_file_HW1/HW1/index_nested_list.py | ca01168c4a25471ab1064be8afb0e0ea61dca486 | [] | no_license | sendurr/spring-grading | 90dfdced6327ddfb5c311ae8f42ae1a582768b63 | 2cc280ee3e0fba02e95b6e9f45ad7e13bc7fad54 | refs/heads/master | 2020-04-15T17:42:10.781884 | 2016-08-29T20:38:17 | 2016-08-29T20:38:17 | 50,084,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | q = [['a','b','c'],['d','e','f'],['g','h',]]
print q[0][0]
#calls the top right
print q[1]
#calls the middle row
print q[2][1]
#calls the second column of the third row
print q[-1][-2]
print("This calls [2][0] because it goes one left from the first place which would then call the equivalent of [2]. Then it travels two left of the g, which would call g also.")
#ends the code | [
"[email protected]"
] | |
60ef1583477da2f9e5ba9f8242678d5c50047692 | 7da12f580db1c2d4e5e9d3348476c83a6c901c15 | /shop/admin.py | a9189a8cde6f32a5393b8ba54541cfd01c3228c4 | [] | no_license | aeksei/shop_step9 | 10967756f58f2d001804efca77c3f2da3dbaa616 | 80a0dc4d468a025863db2f32d54cae7b087b4a4c | refs/heads/master | 2022-09-29T19:57:43.324752 | 2020-04-22T17:10:16 | 2020-04-22T17:10:16 | 257,964,572 | 0 | 0 | null | 2020-06-04T17:40:33 | 2020-04-22T16:55:52 | CSS | UTF-8 | Python | false | false | 116 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Products)
| [
"[email protected]"
] | |
8b307a46ca1af76374ddabfb9bce3c7ff2c5d912 | bf80f309b6deb240be5fa82428ccffaedcb7ecf1 | /contrib/devtools/update-translations.py | a65cd759c1fa4b440ea27665cd95756623258d97 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | danxb827/vektorcoin | b4677be9320de92a514d40eac213f51d1bc5e106 | be2a7adba6d00d1e378c969c77ea90daeecef9f7 | refs/heads/main | 2023-03-06T11:49:18.688978 | 2021-02-09T14:54:10 | 2021-02-09T14:54:10 | 336,577,132 | 1 | 1 | MIT | 2021-02-09T14:54:12 | 2021-02-06T16:05:36 | null | UTF-8 | Python | false | false | 8,441 | py | #!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'vektorcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
# Regexp to check for VEKTORCOIN addresses
ADDRESS_REGEXP = re.compile('([13]|bc1)[a-zA-Z0-9]{30,}')
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
sys.exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
sys.exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def contains_vektorcoin_addr(text, errors):
if text != None and ADDRESS_REGEXP.search(text) != None:
errors.append('Translation "%s" contains a vektorcoin address. This will be removed.' % (text))
return True
return False
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus) and not contains_vektorcoin_addr(translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| [
"[email protected]"
] | |
de7b13d92631a345de71ca8e1d4f3dc2b0aafc2c | c4599b387df99da05475dc3e464c6df25511919f | /tensorflow/python/ops/nn_batchnorm_test.py | 3292a7bfd7401541a469eb317ce0b6ec0fa5475c | [
"Apache-2.0"
] | permissive | agrawalnishant/tensorflow | 0ca19b303d1d474febe4e895b4dc558c864706f7 | cce87661db51b78fb406525285c9402be44e019b | refs/heads/master | 2022-12-09T10:53:51.852543 | 2022-08-27T19:36:50 | 2022-08-27T19:36:50 | 60,673,664 | 2 | 9 | Apache-2.0 | 2022-11-26T16:19:03 | 2016-06-08T06:20:26 | C++ | UTF-8 | Python | false | false | 23,420 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import gen_nn_ops
class BatchNormalizationTest(tf.test.TestCase):
def _npBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) / np.sqrt(v + epsilon)
y = y * gamma if scale_after_normalization else y
return y + beta if shift_after_normalization else y
def _opsBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) * tf.rsqrt(v + epsilon)
if scale_after_normalization:
y = gamma * y
return y + beta if shift_after_normalization else y
def _tfBatchNormV1(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Original implementation."""
# _batch_norm_with_global_normalization is deprecated in v9
tf.get_default_graph().graph_def_versions.producer = 8
# pylint: disable=protected-access
return gen_nn_ops._batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
# pylint: enable=protected-access
def _tfBatchNormV1BW(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Re-implementation of the original kernel for backward compatibility."""
return tf.nn.batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
def _tfBatchNormV2(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
"""New implementation."""
return tf.nn.batch_normalization(
x, m, v, beta if shift_after_normalization else None,
gamma if scale_after_normalization else None, epsilon)
def testBatchNorm(self):
x_shape = [3, 5, 4, 2]
param_shape = [2]
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn2 = self._tfBatchNormV2(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
bn1bw = self._tfBatchNormV1BW(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
bn1 = self._tfBatchNormV1(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
on = self._opsBatchNorm(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
np_bn = self._npBatchNorm(
x_val, m_val, v_val, beta_val, gamma_val, epsilon,
scale_after_normalization, shift_after_normalization)
tf_bn_v2, tf_bn_v1bw, tf_bn_v1, ops_bn = sess.run(
[bn2, bn1bw, bn1, on])
self.assertAllClose(np_bn, ops_bn, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v2, atol=0.00001)
self.assertAllClose(tf_bn_v2, ops_bn, atol=0.00001)
# shift_after_normalization=False is not supported in v1.
if shift_after_normalization:
self.assertAllClose(np_bn, tf_bn_v1bw, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v1, atol=0.00001)
self.assertAllClose(tf_bn_v1, ops_bn, atol=0.00001)
self.assertAllClose(tf_bn_v1bw, ops_bn, atol=0.00001)
def _testBatchNormGradient(self, param_index, tag, scale_after_normalization,
shift_after_normalization, version,
err_tolerance=1e-11):
x_shape = [3, 5, 4, 5]
param_shape = [5]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
m_val = np.random.random_sample(param_shape).astype(np.float64)
v_val = np.random.random_sample(param_shape).astype(np.float64)
beta_val = np.random.random_sample(param_shape).astype(np.float64)
gamma_val = np.random.random_sample(param_shape).astype(np.float64)
with self.test_session():
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
epsilon = 0.001
if version == 1:
output = self._tfBatchNormV1(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
elif version == 2:
output = self._tfBatchNormV2(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
else:
print("Invalid version", version)
raise ValueError()
all_params = [x, m, v, beta, gamma]
all_shapes = [x_shape, param_shape, param_shape, param_shape, param_shape]
err = tf.test.compute_gradient_error(
all_params[param_index], all_shapes[param_index], output, x_shape)
print("Batch normalization v%d %s gradient %s scale and %s shift err = " %
(version, tag, "with" if scale_after_normalization else "without",
"with" if shift_after_normalization else "without"),
err)
self.assertLess(err, err_tolerance)
def _testBatchNormGradientInAllNeedConfigs(
self, param_index, tag, err_tolerance=1e-11):
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
# shift_after_normalization=False is not supported in version 1.
for v in ([1, 2] if shift_after_normalization else [2]):
self._testBatchNormGradient(
param_index, tag, scale_after_normalization,
shift_after_normalization, v, err_tolerance)
def testBatchNormInputGradient(self):
self._testBatchNormGradientInAllNeedConfigs(0, "x")
def testBatchNormMeanGradient(self):
self._testBatchNormGradientInAllNeedConfigs(1, "mean")
def testBatchNormVarianceGradient(self):
self._testBatchNormGradientInAllNeedConfigs(2, "variance",
err_tolerance=1e-03)
def testBatchNormBetaGradient(self):
# Since beta does not exist when scale_after_normalization=False, we only
# test for scale_after_normalization=True.
for scale_after_normalization in [True, False]:
for v in [1, 2]:
self._testBatchNormGradient(3, "beta", scale_after_normalization, True,
v)
def testBatchNormGammaGradient(self):
# If scale_after_normalization is False, backprop for gamma in v1
# will be 0. In version 2 of the API, if scale_after_normalization is False,
# gamma is not used at all, and the gradient is None, which displeases the
# gradient checker.
for scale_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", scale_after_normalization, True,
1)
for shift_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", True, shift_after_normalization,
2)
def testBatchNormGradImpl(self):
x_shape = [7, 5, 4, 6]
param_shape = [6]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
backprop_val = np.random.random_sample(x_shape).astype(np.float32)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
backprop = tf.constant(backprop_val, name="backprop")
epsilon = 0.001
for scale_after_normalization in [True, False]:
# _batch_norm_with_global_normalization_grad is deprecated in v9
tf.get_default_graph().graph_def_versions.producer = 8
grad = gen_nn_ops._batch_norm_with_global_normalization_grad(
x, m, v, gamma, backprop, epsilon, scale_after_normalization)
dx, dm, dv, db, dg = grad
self.assertEqual(grad.dx, dx)
self.assertEqual(grad.dm, dm)
self.assertEqual(grad.dv, dv)
self.assertEqual(grad.db, db)
self.assertEqual(grad.dg, dg)
on = self._opsBatchNorm(
x, m, v, beta, gamma, epsilon, scale_after_normalization, True)
odx, odm, odv, odb, odg = tf.gradients(
[on], [x, m, v, beta, gamma], [backprop])
if scale_after_normalization:
all_grads = sess.run([dx, dm, dv, db, dg, odx, odm, odv, odb, odg])
to_check = ["dx", "dm", "dv", "db", "dg"]
else:
all_grads = sess.run([dx, dm, dv, db, odx, odm, odv, odb])
to_check = ["dx", "dm", "dv", "db"]
for i, _ in enumerate(to_check):
self.assertAllClose(
all_grads[i + len(to_check)], all_grads[i], atol=0.000001)
def testBatchNormKeepDims(self):
"""Test for tf.nn.moments(..., keep_dims=True / False).
Make sure that parameters with shape (1, 1, 1, depth) yield the same
result as parameters with shape (depth)
"""
x_shape = (3, 5, 4, 2)
param_shape = (2)
keep_dims_param_shape = (1, 1, 1, 2)
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
keep_dims_m = tf.reshape(m, keep_dims_param_shape, name="keep_dims_m")
keep_dims_v = tf.reshape(v, keep_dims_param_shape, name="keep_dims_v")
keep_dims_beta = tf.reshape(
beta, keep_dims_param_shape, name="keep_dims_beta")
keep_dims_gamma = tf.reshape(
gamma, keep_dims_param_shape, name="keep_dims_gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
keep_dims_bn = self._tfBatchNormV2(
x, keep_dims_m, keep_dims_v, keep_dims_beta,
keep_dims_gamma, epsilon, scale_after_normalization,
shift_after_normalization)
tf_batch_norm, keep_dims_tf_batch_norm = sess.run(
[bn, keep_dims_bn])
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertEquals(x_shape, keep_dims_tf_batch_norm.shape)
self.assertAllClose(
tf_batch_norm, keep_dims_tf_batch_norm, atol=0.000001)
def _testBatchNormArbitraryShapes(self, x_shape, param_shape, atol=0.0001):
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
np_batch_norm = self._npBatchNorm(
x_val, m_val, v_val, beta_val, gamma_val, epsilon,
scale_after_normalization, shift_after_normalization)
[tf_batch_norm] = sess.run([bn])
self.assertEquals(x_shape, np_batch_norm.shape)
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertAllClose(np_batch_norm, tf_batch_norm, atol=atol)
def testBatchNormArbitraryShapes(self):
"""Test for a variety of shapes and moments.
Batch normalization is expected to work regardless of the position and
dimensionality of the 'depth' axis/axes.
"""
self._testBatchNormArbitraryShapes((3, 3), (1, 3))
self._testBatchNormArbitraryShapes((3, 3), (3, 1))
self._testBatchNormArbitraryShapes((3, 2, 4, 5), (1, 2, 1, 1))
self._testBatchNormArbitraryShapes((2, 3, 2, 4, 5), (1, 1, 1, 4, 5),
atol=0.005)
class SufficientStatisticsTest(tf.test.TestCase):
def _npSuffStats(self, x, axes, shift, keep_dims):
axis = tuple(axes)
if shift is not None:
m_ss = np.sum(x - shift, axis=axis, keepdims=keep_dims)
v_ss = np.sum((x - shift) * (x - shift), axis=axis, keepdims=keep_dims)
else:
m_ss = np.sum(x, axis=axis, keepdims=keep_dims)
v_ss = np.sum(x * x, axis=axis, keepdims=keep_dims)
count = 1.0
for d in xrange(x.ndim):
if d in set(axes):
count *= x.shape[d]
if not keep_dims:
shift = np.squeeze(shift, axis=axis)
return count, m_ss, v_ss, shift
def _opSuffStats(self, x, axes, shift, keep_dims):
return tf.nn.sufficient_statistics(x, axes, shift, keep_dims)
def _testSuffStats(self, x_shape, axes, shift, keep_dims, has_shape):
x_val = np.random.random_sample(x_shape).astype(np.float32)
np_c, np_m, np_v, np_s = self._npSuffStats(x_val, axes, shift, keep_dims)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
if has_shape:
x = tf.constant(x_val, name="x")
x.set_shape(x_shape)
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run([op_c, op_m, op_v, op_s])
else:
tf_c, tf_m, tf_v = sess.run([op_c, op_m, op_v])
else:
x = tf.placeholder(dtype=tf.float32,
shape=[None] * len(x_shape),
name="x")
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run(
[op_c, op_m, op_v, op_s],
feed_dict={x: x_val})
else:
tf_c, tf_m, tf_v = sess.run(
[op_c, op_m, op_v],
feed_dict={x: x_val})
self.assertAllClose(np_c, tf_c, atol=0.000001)
self.assertAllClose(np_m, tf_m, atol=0.000001)
self.assertAllClose(np_v, tf_v, atol=0.000001)
if shift:
self.assertAllClose(np_s, tf_s, atol=0.000001)
def testSuffStats(self):
for has_shape in [True, False]:
for keep_dims in [True, False]:
for shift in [None, 1.0]:
self._testSuffStats([2, 3], [1], shift, keep_dims, has_shape)
self._testSuffStats([2, 3], [0], shift, keep_dims, has_shape)
self._testSuffStats([1, 2, 3], [0, 2], shift, keep_dims, has_shape)
class NormalizeMomentsTest(tf.test.TestCase):
def _npNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
mean = mean_ss / counts
variance = variance_ss / counts - mean * mean
if shift is not None:
mean += shift
return mean, variance
def _opNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
return tf.nn.normalize_moments(counts, mean_ss, variance_ss, shift)
def _testNormalizeMoments(self, shape, shift):
counts = np.ones([1]).astype(np.float32)
mean_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss *= variance_ss
if shift:
shift_v = np.random.random_sample(shape).astype(np.float32)
else:
shift_v = None
npm, npv = self._npNormalizeMoments(counts, mean_ss, variance_ss, shift_v)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
tf_counts = tf.constant(counts, name="counts")
tf_mean_ss = tf.constant(mean_ss, name="mean_ss")
tf_variance_ss = tf.constant(variance_ss, name="variance_ss")
if shift:
tf_shift_v = tf.constant(shift_v, name="shift")
else:
tf_shift_v = None
opm, opv = self._opNormalizeMoments(tf_counts, tf_mean_ss,
tf_variance_ss, tf_shift_v)
tfm, tfv = sess.run([opm, opv])
self.assertAllClose(npm, tfm, atol=0.000001)
self.assertAllClose(npv, tfv, atol=0.000001)
def testNormalizeMoments(self):
for shift in [None, 4.0]:
self._testNormalizeMoments([3], shift)
self._testNormalizeMoments([2, 3], shift)
class MomentsTest(tf.test.TestCase):
def RunMomentTestWithDynamicShape(self, shape, axes, keep_dims):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = tf.placeholder(tf.float32, shape=[None] * len(shape))
mean, var = tf.nn.moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(
x_numpy, axis=ax, keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(
np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllClose(expected_mean, mean.eval(feed_dict={x: x_numpy}))
self.assertAllClose(expected_variance, var.eval(feed_dict={x: x_numpy}))
def RunMomentTest(self, shape, axes, keep_dims):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = tf.constant(x_numpy)
mean, var = tf.nn.moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(
x_numpy, axis=ax, keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(
np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllClose(expected_mean, mean.eval())
self.assertAllClose(expected_variance, var.eval())
def testBasic(self):
for keep_dims in [False, True]:
self.RunMomentTest(shape=[2, 3, 5, 4], axes=[0], keep_dims=keep_dims)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4], axes=[0], keep_dims=keep_dims)
def testGlobalNormalization(self):
for keep_dims in [False, True]:
self.RunMomentTest(
shape=[2, 3, 5, 4], axes=[0, 1, 2], keep_dims=keep_dims)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4], axes=[0, 1, 2], keep_dims=keep_dims)
def testAxes(self):
for keep_dims in [False, True]:
self.RunMomentTest(
shape=[2, 3, 5, 4], axes=[1, 2, 3], keep_dims=keep_dims)
self.RunMomentTestWithDynamicShape(
shape=[2, 3, 5, 4], axes=[1, 2, 3], keep_dims=keep_dims)
def _testGlobalGradient(self, from_y="mean"):
with self.test_session():
x_shape = [3, 5, 4, 2]
x_val = np.random.random_sample(x_shape).astype(np.float64)
x = tf.constant(x_val)
x.set_shape(x_shape)
axes = [0, 1, 2]
y_shape = [2] # Depth of x
out_mean, out_var = tf.nn.moments(x, axes)
if from_y == "mean":
y = out_mean
elif from_y == "var":
y = out_var
err = tf.test.compute_gradient_error(x, x_shape, y, y_shape)
print("Moments %s gradient err = %g" % (from_y, err))
self.assertLess(err, 1e-11)
def testMeanGlobalGradient(self):
self._testGlobalGradient(from_y="mean")
def testVarGlobalGradient(self):
self._testGlobalGradient(from_y="var")
def testOutputNamesNoKeep(self):
"""Make sure the output names are stable."""
with self.test_session():
mean, var = tf.nn.moments(tf.constant([1]), [0], keep_dims=False)
self.assertEquals(mean.op.name, "moments/normalize/mean")
self.assertEquals(var.op.name, "moments/normalize/variance")
def testOutputNamesKeep(self):
"""Make sure the output names are stable."""
with self.test_session():
mean, var = tf.nn.moments(tf.constant([1]), [0], keep_dims=True)
self.assertEquals(mean.op.name, "moments/normalize/mean")
self.assertEquals(var.op.name, "moments/normalize/variance")
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
] | |
fc884806482e9cabe64688601017f54570459f99 | 515a97129ce1b2b8eecca4b2087fde8985b82d5b | /Code-Scraps/old_modules/SpiceGif/gif/Thumbs_Up.py | cd3625bd689d898907897679c0cf84a12536f03c | [] | no_license | SpiceBot/scraps | 3ad6e81ac75e2b6a684fea64eb7e75477b0f4f63 | 90125e1397b57ac87cae5f3e506363aa04ddffdc | refs/heads/master | 2020-05-02T21:51:01.297114 | 2019-03-28T15:38:28 | 2019-03-28T15:38:28 | 178,232,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,676 | py | #!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
import sopel.module
import urllib2
import json
from BeautifulSoup import BeautifulSoup
from random import randint
import sys
import os
moduledir = os.path.dirname(__file__)
shareddir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(shareddir)
from BotShared import *
gifshareddir = os.path.dirname(os.path.dirname(__file__))
sys.path.append(gifshareddir)
from GifShared import *
@sopel.module.commands('thumbsup')
def mainfunction(bot, trigger):
enablestatus, triggerargsarray, botcom, instigator = spicebot_prerun(bot, trigger, trigger.group(1))
if not enablestatus:
# IF "&&" is in the full input, it is treated as multiple commands, and is split
commands_array = spicemanip(bot, triggerargsarray, "split_&&")
if commands_array == []:
commands_array = [[]]
for command_split_partial in commands_array:
triggerargsarray_part = spicemanip(bot, command_split_partial, 'create')
execute_main(bot, trigger, triggerargsarray_part, botcom, instigator)
def execute_main(bot, trigger, triggerargsarray, botcom, instigator):
gif = getGif(bot, {"query": "thumbs up"})
instigator = trigger.nick
responsemsg = [' a thumbs up.', ' a pat on the back.', ' a sarcastic smile.', ' a high five.']
if not gif["error"]:
osd(bot, trigger.sender, 'say', "%s Result (#%s): %s" % (gif['gifapi'].title(), gif['returnnum'], gif['returnurl']))
else:
osd(bot, trigger.sender, 'action', 'gives ' + instigator + random.choice(responsemsg))
| [
"[email protected]"
] | |
afa189e5c781d2c121d42e882f1e8fcfd10bc8f1 | 30f8722391c3403a147a7a45a56013693141bb52 | /configs/htc/htc_hrnetv2p_w48_20e_kaggle_pku_no_semantic_translation_wudi.py | b3c55b456aff7ca11641bbe802d66234b3a7689c | [
"Apache-2.0"
] | permissive | fsxy1063200037/Kaggle_PKU_Baidu | 2bfbe1e04ae031b6f7856067824e7f03c31ca847 | 7ee39b60be25558539ac47d80a2f89f39698b4a6 | refs/heads/master | 2022-04-12T21:02:36.780457 | 2020-04-07T09:01:50 | 2020-04-07T09:01:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,316 | py | # model settings
model = dict(
type='HybridTaskCascade',
num_stages=3,
interleaved=True,
mask_info_flow=True,
car_cls_info_flow=False,
backbone=dict(
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,)),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384)))),
neck=dict(
type='HRFPN',
in_channels=[48, 96, 192, 384],
out_channels=256),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=81,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
with_semantic_loss=False,
with_car_cls_rot=True,
with_translation=True,
# This is DI WU's customised model
semantic_fusion=('bbox', 'mask', 'car_cls_rot'),
car_cls_rot_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
car_cls_rot_head=dict(
type='SharedCarClsRotHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=14,
num_classes=34, # There are total 34 car classes
reg_class_agnostic=True,
loss_car_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_quaternion=dict(type='L1', beta=1.0, loss_weight=1.0)),
translation_head=dict(
type='SharedTranslationHead',
in_channels_bboxes=4,
in_channels_carclsrot=1024,
fc_out_channels=100,
num_translation_reg=3,
bbox_relative=False, # if bbox_relative=False, then it requires training/test input the same
translation_bboxes_regression=False, # If set to True, we will have a SSD like offset regression
bboxes_regression=dict(type='maxIoU', iou_thresh=0.1),
#bboxes_regression=dict(type='allIoU', iou_thresh=0.1), # This is only effective during test
loss_translation=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
bayesian_weight_learning=True, # If set to true, the loss weight coefficient will be updated.
)
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25],
car_cls_weight=1.0,
rot_weight=10.,
translation_weight=10.,
)
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5),
keep_all_stages=False,
)
# dataset settings
dataset_type = 'KagglePKUDataset'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# Add albumentation transform
# albu_train_transforms = [
# dict(type='RandomBrightnessContrast', brightness_limit=0.2, contrast_limit=0.5, p=0.2),
# dict(type='GaussianBlur', blur_limit=20, p=0.1),
# dict(type='GaussNoise', var_limit=(10, 80.), p=0.1),
# dict(
# type='OneOf',
# transforms=[
# dict(
# type='RGBShift',
# r_shift_limit=30,
# g_shift_limit=30,
# b_shift_limit=30,
# p=0.2),
# dict(
# type='HueSaturationValue',
# hue_shift_limit=20,
# sat_shift_limit=20,
# val_shift_limit=20,
# p=0.1)
# ],
# p=0.1),
# ]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True,
with_carcls_rot=True, with_translation=True, with_camera_rot=True),
dict(type='CameraRotation'),
dict(type='CropBottom', bottom_half=1480),
dict(type='Resize', img_scale=(1664, 576), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
# dict(
# type='Albu',
# transforms=albu_train_transforms,
# update_pad_shape=False,
# skip_img_without_anno=True),
dict(type='DefaultFormatBundle'),
dict(type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks',
'carlabels', 'quaternion_semispheres', 'translations',
'scale_factor']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='CropBottom', bottom_half=1480),
#dict(type='CropCentreResize', top=50, bottom=100, left=25, right=50),
#dict(type='CropCentreResize', top=100, bottom=250, left=50, right=100),
dict(
type='MultiScaleFlipAug',
img_scale=(1664, 576), # (576, 1600, 3)
flip=False, # test pipelines doest not need this
transforms=[
dict(type='Resize', img_scale=(1664, 576), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.), # We always want to have this flip_ratio=1.0 for test
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# data_root = '/data/Kaggle/pku-autonomous-driving/'
data_root = '/data/Kaggle/ApolloScape_3D_car/train/'
data = dict(
imgs_per_gpu=1,
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_root=data_root,
#ann_file='/data/cyh/kaggle/kaggle_apollo_combine_6692.json',
# ann_file=data_root + 'apollo_kaggle_combined_6725_wudi.json',
ann_file='/data/Kaggle/kaggle_apollo_combined_6691_origin.json', # 6691 means the final cleaned data
img_prefix=data_root + 'train_images/',
pipeline=train_pipeline,
rotation_augmenation=True),
val=dict(
type=dataset_type,
data_root=data_root,
ann_file='/data/Kaggle/pku-autonomous-driving/validation.csv',
img_prefix='/data/Kaggle/pku-autonomous-driving/validation_images/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
ann_file="",
#ann_file='/data/Kaggle/ApolloScape_3D_car/train/split/validation-list.txt',
#img_prefix='/data/Kaggle/pku-autonomous-driving/validation_images_RandomBrightnessContrast', # valid variation
#img_prefix='/data/Kaggle/pku-autonomous-driving/validation_images_RGBShift', # valid variation
#img_prefix='/data/Kaggle/pku-autonomous-driving/validation_images_JpegCompression', # valid variation
#img_prefix='/data/Kaggle/pku-autonomous-driving/validation_images_GaussianBlur', # valid variation
#img_prefix='/data/Kaggle/pku-autonomous-driving/validation_images_GaussNoise', # valid variation
#img_prefix='/data/Kaggle/pku-autonomous-driving/validation_images_RandomContrast', # valid variation
#img_prefix='/data/Kaggle/pku-autonomous-driving/validation_images_HueSaturationValue', # valid variation
#img_prefix='/data/Kaggle/pku-autonomous-driving/validation_images_CLAHE', # valid variation
#img_prefix='/data/Kaggle/pku-autonomous-driving/validation_images', # We create 400 validation images
#img_prefix='/data/Kaggle/pku-autonomous-driving/test_images',
#img_prefix='/data/Kaggle/ApolloScape_3D_car/train/images',
img_prefix='/data/Kaggle/ApolloScape_3D_car/3d-car-understanding-test/test/images',
pipeline=test_pipeline))
evaluation = dict(
conf_thresh=0.1,
interval=1,
)
# optimizer
optimizer = dict(type='Adam', lr=0.0003) # We increase the learning rate to 3e-4 (It is supposed to be the best practice)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[80, 180])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 200
#dist_params = dict(backend='nccl')
dist_params = dict(backend='nccl', init_method="tcp://127.0.0.1:8001")
log_level = 'INFO'
work_dir = '/data/Kaggle/wudi_data/'
load_from = None
#load_from = '/data/Kaggle/mmdet_pretrained_weights/trimmed_htc_hrnetv2p_w48_20e_kaggle_pku.pth'
#load_from = '/data/Kaggle/wudi_data/Jan07-20-00-59/epoch_5.pth'
#load_from = '/data/Kaggle/checkpoints/all_cwxe99_3070100flip05resumme93Dec29-16-28-48_trimmed_translation.pth'
#load_from = '/data/Kaggle/wudi_data/Jan18-19-45/epoch_116.pth'
resume_from = '/data/Kaggle/checkpoints/all_cwxe99_3070100flip05resumme93Dec29-16-28-48/epoch_100.pth'
#load_from = '/data/Kaggle/wudi_data/Jan29-00-02/epoch_261.pth'
#resume_from = None
workflow = [('train', 1)]
# postprocessing flags here
pkl_postprocessing_restore_xyz = True # Use YYJ post processing
write_submission = True
valid_eval = False # evaluate validation set at the end
| [
"[email protected]"
] | |
3b5c2be55f423005b409da55eb4cb003a822b22f | 84c9a6fb5e18741f14a55d0d737e2a556383770d | /venv/Lib/site-packages/w3af/plugins/infrastructure/server_header.py | 9b95285e772beba1b79e247989486a937a290603 | [] | no_license | AravindChan96/Vulcan | 638a1db2f84df08bc50dd76c7f142014d529fbec | 5548a6f36f04108ac1a6ed8e707930f9821f0bd9 | refs/heads/master | 2022-11-05T15:05:54.224578 | 2020-06-19T20:44:14 | 2020-06-19T20:44:14 | 273,396,348 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,343 | py | """
server_header.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from threading import RLock
import w3af.core.controllers.output_manager as om
import w3af.core.data.kb.knowledge_base as kb
from w3af.core.controllers.plugins.infrastructure_plugin import InfrastructurePlugin
from w3af.core.data.url.helpers import is_no_content_response
from w3af.core.data.kb.info import Info
class server_header(InfrastructurePlugin):
"""
Identify the server type based on the server header.
:author: Andres Riancho ([email protected])
"""
def __init__(self):
InfrastructurePlugin.__init__(self)
# Internal variables
self._server_headers = set()
self._x_powered = set()
self._lock = RLock()
def discover(self, fuzzable_request, debugging_id):
"""
Nothing strange, just do a GET request to the url and save the server headers
to the kb. A smarter way to check the server type is with the hmap plugin.
:param debugging_id: A unique identifier for this call to discover()
:param fuzzable_request: A fuzzable_request instance that contains
(among other things) the URL to test.
"""
response = self._uri_opener.GET(fuzzable_request.get_url(), cache=True)
with self._lock:
self._check_server_header(fuzzable_request, response)
self._check_x_power(fuzzable_request, response)
def _check_server_header(self, fuzzable_request, response):
"""
HTTP GET and analyze response for server header
"""
if is_no_content_response(response):
#
# UrlOpenerProxy(), a helper class used by most plugins, will
# generate 204 HTTP responses for HTTP requests that fail.
# This makes plugins have less error handling code (try/except),
# and looks like this in the scan log:
#
# Generated 204 "No Content" response (id:2131)
#
# The problem is that in some strange cases, like this plugin,
# the 204 response will trigger a false positive. Because of
# that I had to add this if statement to completely ignore
# the HTTP responses with 204 status code
#
return
server, header_name = response.get_headers().iget('server')
if server in self._server_headers:
return
self._server_headers.add(server)
if server:
desc = 'The server header for the remote web server is: "%s".'
desc %= server
i = Info('Server header', desc, response.id, self.get_name())
i['server'] = server
i.add_to_highlight(header_name + ':')
om.out.information(i.get_desc())
# Save the results in the KB so the user can look at it
kb.kb.append(self, 'server', i)
# Also save this for easy internal use
# other plugins can use this information
kb.kb.raw_write(self, 'server_string', server)
else:
# strange !
desc = ('The remote HTTP Server omitted the "server" header in'
' its response.')
i = Info('Omitted server header', desc, response.id,
self.get_name())
om.out.information(i.get_desc())
# Save the results in the KB so that other plugins can use this
# information
kb.kb.append(self, 'omitted_server_header', i)
# Also save this for easy internal use
# other plugins can use this information
kb.kb.raw_write(self, 'server_string', '')
def _check_x_power(self, fuzzable_request, response):
"""
Analyze X-Powered-By header.
"""
for header_name in response.get_headers().keys():
for needle in ['ASPNET', 'POWERED']:
if needle in header_name.upper():
powered_by = response.get_headers()[header_name]
if powered_by in self._x_powered:
return
self._x_powered.add(powered_by)
desc = 'The %s header for the target HTTP server is "%s".'
desc %= (header_name, powered_by)
i = Info('Powered-by header', desc, response.id, self.get_name())
i['powered_by'] = powered_by
i.add_to_highlight(header_name + ':')
om.out.information(i.get_desc())
# Save the results in the KB so that other plugins can
# use this information. Before knowing that some servers
# may return more than one poweredby header I had:
#
# kb.kb.raw_write( self , 'powered_by' , powered_by )
#
# But I have seen an IIS server with PHP that returns
# both the ASP.NET and the PHP headers
kb.kb.append(self, 'powered_by', i)
# Save the list to the KB
kb.kb.raw_write(self, 'powered_by_string', list(powered_by))
def get_long_desc(self):
"""
:return: A DETAILED description of the plugin functions and features.
"""
return """
This plugin GETs the server header and saves the result to the
knowledge base.
Nothing strange, just do a GET request to the url and save the server
headers to the kb. A smarter way to check the server type is with the
hmap plugin.
"""
| [
"[email protected]"
] | |
4b83674f41dfe0e01b5e0deb3e30fb897f25bc5a | 977f1105b3a475055631689523b1dcef951c7f72 | /py_numpy_1.py | 2110065b1ebc86effcb568fa5a0643b2def8ba8a | [] | no_license | pbarton666/PES_Python_examples_and_solutions | 2f973296796d91a5c8c28000002b996ef143ebb2 | 94fc2c8101a6e654a3ab67b39d1878b9d9f6aa74 | refs/heads/master | 2021-01-10T22:45:27.446831 | 2017-05-14T17:11:52 | 2017-05-14T17:11:52 | 70,357,916 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,327 | py | #py_numpy_1.py
"""a snake-charming application"""
from PIL import Image
import numpy as np
import os
idir =os.getcwd()
iname= 'eastern_coral_1_clean.png'# 'white_snake.PNG'
saveas='new_snake.PNG'
#sets up an array for pixel processing
white=np.array([255,255,255,0]) #r, g, b, a
transparent = np.array([0, 0, 0, 0])
background = white
#open the image and convert it
raw_image = Image.open(iname)
raw_image.show()
converted_image = raw_image.convert('RGBA')
raw_image.close()
h, w = converted_image.size
converted_histo=converted_image.histogram()
converted_colors=converted_image.getcolors(w*h)
#dump the data into a numpy array and split the channels "bands"
data = np.array(converted_image) # h * w * 4 array (rgba)
r, g, b, a = data.T
#this sets the masking condition and replaces the background color
replace = (r == background[0]) & (b == background[1]) & (g == background[2])
data[replace.T] = (0,0,0,0)
#generate a new image, grab some stats, and save it.
new_image = Image.fromarray(data, 'RGBA')
h, w = new_image.size
new_histo=new_image.histogram()
new_colors=new_image.getcolors(w*h) #a list of tuples [count (rgba), ...]
new_image.save(saveas)
recovered_image = Image.open(saveas)
h, w = recovered_image.size
#we've successfully 'masked out' and replaced the background
new_image.show()
recovered_histo=recovered_image.histogram()
recovered_colors=recovered_image.getcolors(w*h) #a list of tuples [count (rgba), ...]
#but we can do more...
#strategy: make a list of color bins we expect to find. These will have pixel ranges
# that are human-friendly e.g., 'brownish', 'gold'. Each spec within the bin can be
# additively applied to a mask - functionally reducing the color palette.
reduced_image = recovered_image.convert('P', palette=Image.ADAPTIVE, colors=10)
reduc1 = reduced_image = recovered_image.convert('P', palette=Image.ADAPTIVE, colors=10)
reduc2 = reduc1.convert('RGB') #turns it to rgb
#save the image in a couple formats
reduc_fn = 'scratch.BMP'
reduc2.save(reduc_fn)
reduced_histo=reduced_image.histogram()
reduced_colors=reduced_image.getcolors(w*h) #a list of tuples [count (rgba), ...]
reduced_image.save(saveas+'reduced.BMP')
#now show them
recovered_image.show()
reduced_image.show()
recovered_image.close() | [
"[email protected]"
] | |
dca9fddc3b23f660445b7dfdc4fa69e6a6bfd984 | 56abd8f94a511ae0d163161cb2f5e0a91d4b8bed | /datahub/event/migrations/0014_update_permissions_django_21.py | c27dc191d9543f8aa33c9c96a70980890b259390 | [
"MIT"
] | permissive | cgsunkel/data-hub-api | 994c58bd975d902bf2bc44b415a5892919ff4539 | a92faabf73fb93b5bfd94fd465eafc3e29aa6d8e | refs/heads/develop | 2023-05-31T22:35:56.344904 | 2021-06-30T11:23:06 | 2021-06-30T11:23:06 | 303,947,456 | 0 | 0 | MIT | 2021-06-30T10:34:50 | 2020-10-14T08:14:46 | Python | UTF-8 | Python | false | false | 381 | py | # Generated by Django 2.0.8 on 2018-08-03 14:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('event', '0013_add_default_id_for_metadata'),
('core', '0003_rename_read_permissions'),
]
operations = [
migrations.AlterModelOptions(
name='event',
options={},
),
]
| [
"[email protected]"
] | |
80c5c7cf5342a44c5852c7740bd5710c955ced36 | 6b201605227f11880c1d32c9cad300f6e29ff4ae | /Python/Buch_Python3_Das_umfassende_Praxisbuch/Kapitel_07_Sequenzen_Mengen_und_Generatoren/08_chapter_07_repetition_questions.py | 0c221875dd13afd534b86ec96cd3820a2eaca0c7 | [
"MIT"
] | permissive | Apop85/Scripts | e2e8e6ed0c0da08a4d7c895aa366c9305197137b | 467c34e59f2708f2d2f8bb369c36fd782d365e8b | refs/heads/master | 2022-12-08T08:11:04.566376 | 2022-05-13T13:17:04 | 2022-05-13T13:17:04 | 164,251,836 | 0 | 0 | MIT | 2022-12-08T01:50:22 | 2019-01-05T21:16:45 | Python | UTF-8 | Python | false | false | 2,887 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
###
# File: 08_chapter_07_repetition_questions.py
# Project: Kapitel_07_Sequenzen_Mengen_und_Generatoren
# Created Date: Tuesday 05.03.2019, 16:17
# Author: Apop85
# -----
# Last Modified: Tuesday 05.03.2019, 16:43
# -----
# Copyright (c) 2019 Apop85
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
# -----
# Description:
###
import re
def output(title, string):
max_length=80
max_delta=20
string+=' '*max_length
print('╔'+'═'*max_length+'╗')
print('║'+title.center(max_length).upper()+'║')
print('╠'+'═'*max_length+'╣')
search_pattern=re.compile(r'(.{'+str(max_length-max_delta-10)+r','+str(max_length-10)+r'}[^\w"])')
reg_lines=search_pattern.findall(string)
for line in reg_lines:
print('║ '+line+' '*(max_length-len(line)-1)+'║')
print('╚'+'═'*max_length+'╝')
input()
output('Aufgabe 1','Die Ausgangsliste lautet ["mond","stoff","treib","raum","schiff"]. Wie lautet die Ausgabe von folgenden Anweisungen?')
output('Anweisung 1','print(liste[0])')
output('Lösung Anweisung 1:','Das erste Item der Liste wird ausgegeben: "mond"')
output('Anweisung 2','print(liste[2]+liste[1])')
output('Lösung Anweisung 2:','Das dritte und zweite Item der liste wird konkatiniert: "treibstoff"')
output('Anweisung 3','print(liste[-2]+liste[-1])')
output('Lösung Anweisung 3:','Das zweitletzte und letzte Item der Liste wird konkatiniert: "raumschiff"')
output('Anweisung 4','for wort in liste: if wort[0] == "s": print(wort)')
output('Lösung Anweisung 4:','Alle Items der Liste die mit einem "s" beginnen werden ausgegeben: "stoff", "schiff"')
output('Anweisung 5','for wort in liste: print(wort[1])')
output('Lösung Anweisung 5:','Von jedem Item der Liste wird der 2. Buchstabe ausgegeben: o,t,r,a,c')
output('Anweisung 6','liste=liste+["gestein"]')
output('Lösung Anweisung 6:','Fügt der Liste ein weiteres Item mit dem Inhalt "gestein" hinzu: ["mond","stoff","treib","raum","schiff", "gestein"]')
output('Anweisung 7','print(liste[0]+liste[-1])')
output('Lösung Anweisung 7:','Das erste und letzte Item der Liste wird konkatiniert: "mondgestein"')
output('Aufgabe 2','Welchen Wert haben die Listenobjekte s1,s2 und s3 jeweils nach folgenden Anweisungen:')
output('Anweisung 1','s1 = [1]: s1=[1,s1]: s1=[1,s1]')
output('Lösung Anweisung 1','s1=[1,[1,[1]]]')
output('Anweisung 2','A=["Haus","Garten"]: B=["bau","tier","pflanze"]: s2=[i+j for i in A for j in B]')
output('Lösung Anweisung 2','"Hausbau", "Haustier", "Hauspflanze", "Gartenbau", "Gartentier", "Gartenpflanze"')
output('Anweisung 3','A=[1,2,3,4]: B=[2,3,4,5]: s3=[i for i in A+B if (i not in A) or (i not in B)')
output('Lösung Anweisung 3','Es werden alle Zahlen ausgegeben welche nicht in beiden Listen vorkommen: 1,5')
| [
"[email protected]"
] | |
de11b77370f05d31f929a0e89ae8518b594ead80 | 8d47af9482444b07b52cf44cebcaf4b992df4d09 | /agents/14_MinimumPRB/14.py | 5f38c96b71c0088d5b0857b5e87d87b1363e01c9 | [] | no_license | w0lv3r1nix/retro-agents | f4dbce2db558c880b161062796e5397be65bdd10 | c7f93a737dc6c6fc5d8343c099e14bd2bc97aaf1 | refs/heads/master | 2020-08-01T01:19:41.660018 | 2018-06-13T04:28:09 | 2018-06-13T04:28:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,842 | py | #!/usr/bin/env python
"""
Train an agent on Sonic using an open source Rainbow DQN
implementation.
"""
import tensorflow as tf
from anyrl.algos import DQN
from anyrl.envs import BatchedGymEnv
from anyrl.envs.wrappers import BatchedFrameStack
from anyrl.models import rainbow_models
from anyrl.rollouts import BatchedPlayer, PrioritizedReplayBuffer, NStepPlayer
from anyrl.spaces import gym_space_vectorizer
import gym_remote.exceptions as gre
from sonic_util import AllowBacktracking, make_env
from MinimumPRB import MinimumPRB
def main():
"""Run DQN until the environment throws an exception."""
env = AllowBacktracking(make_env(stack=False, scale_rew=False))
env = BatchedFrameStack(BatchedGymEnv([[env]]), num_images=4, concat=False)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=E1101
with tf.Session(config=config) as sess:
dqn = DQN(*rainbow_models(sess,
env.action_space.n,
gym_space_vectorizer(env.observation_space),
min_val=-200,
max_val=200))
player = NStepPlayer(BatchedPlayer(env, dqn.online_net), 3)
optimize = dqn.optimize(learning_rate=1e-4)
sess.run(tf.global_variables_initializer())
dqn.train(num_steps=2000000, # Make sure an exception arrives before we stop.
player=player,
replay_buffer=MinimumPRB(500000, 0.5, 0.4, epsilon=0.1),
optimize_op=optimize,
train_interval=1,
target_interval=8192,
batch_size=32,
min_buffer_size=20000)
if __name__ == '__main__':
try:
main()
except gre.GymRemoteError as exc:
print('exception', exc)
| [
"[email protected]"
] | |
205a1f4bb79dfbfe132609918fb50ee0c8ed7da2 | 461cbe14775be116ea001ec36b8b9b4deb2f77bc | /lesson1.6_step7.py | 42f6193f82db311382de80a482bc63b4c18cc740 | [] | no_license | Adoyan-Grigor/stepik-auto-tests-course | 898a653062cfa4bdf484a363b956ed2004ef0629 | 406b1498362538ebec27118083c3de5a94898140 | refs/heads/master | 2023-05-10T18:25:09.085013 | 2021-06-02T13:16:35 | 2021-06-02T13:16:35 | 369,514,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | #!/usr/bin/python3
from selenium import webdriver
import time
try:
browser = webdriver.Chrome()
browser.get("http://suninjuly.github.io/huge_form.html")
elements = browser.find_elements_by_css_selector('[type="text"]')
for element in elements:
element.send_keys("Мой ответ")
button = browser.find_element_by_css_selector("button.btn")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
| [
"[email protected]"
] | |
aba19dd9fae018990c2a4be8d819f2689788d268 | a0fb29f99a852089193e4cc9a11e7263dc3f8b5f | /mayan/apps/documents/tests/test_document_version_models.py | b19f865060f817d3028cfe3263bdeaa1716f406d | [
"Apache-2.0"
] | permissive | ikang9712/Mayan-EDMS | 0e22a944d63657cea59c78023b604a01a622b52a | d6e57e27a89805329fe0c5582caa8e17882d94e6 | refs/heads/master | 2023-07-28T19:41:55.269513 | 2021-09-07T14:16:14 | 2021-09-07T14:16:14 | 402,884,683 | 1 | 0 | NOASSERTION | 2021-09-03T20:00:09 | 2021-09-03T20:00:09 | null | UTF-8 | Python | false | false | 2,826 | py | from ..literals import (
DOCUMENT_FILE_ACTION_PAGES_NEW, DOCUMENT_FILE_ACTION_PAGES_APPEND,
DOCUMENT_FILE_ACTION_PAGES_KEEP
)
from .base import GenericDocumentTestCase
class DocumentVersionTestCase(GenericDocumentTestCase):
def test_version_new_file_new_pages(self):
test_document_version_page_content_objects = self.test_document_version.page_content_objects
self.assertEqual(self.test_document.versions.count(), 1)
self._upload_test_document_file(action=DOCUMENT_FILE_ACTION_PAGES_NEW)
self.assertEqual(self.test_document.versions.count(), 2)
self.assertNotEqual(
self.test_document_version.page_content_objects,
test_document_version_page_content_objects
)
self.assertEqual(
self.test_document_version.page_content_objects,
list(self.test_document.file_latest.pages.all())
)
def test_version_new_version_keep_pages(self):
test_document_version_page_content_objects = self.test_document_version.page_content_objects
self.assertEqual(self.test_document.versions.count(), 1)
self._upload_test_document_file(action=DOCUMENT_FILE_ACTION_PAGES_KEEP)
self.assertEqual(self.test_document.versions.count(), 1)
self.assertEqual(
self.test_document_version.page_content_objects,
test_document_version_page_content_objects
)
self.assertNotEqual(
self.test_document_version.page_content_objects,
list(self.test_document.file_latest.pages.all())
)
def test_version_new_file_append_pages(self):
test_document_version_page_content_objects = self.test_document_version.page_content_objects
self.assertEqual(self.test_document.versions.count(), 1)
self.assertEqual(self.test_document.files.count(), 1)
self._upload_test_document_file(action=DOCUMENT_FILE_ACTION_PAGES_APPEND)
self.assertEqual(self.test_document.files.count(), 2)
self.assertEqual(self.test_document.versions.count(), 2)
test_document_version_expected_page_content_objects = list(
self.test_document.files.first().pages.all()
)
test_document_version_expected_page_content_objects.extend(
list(
self.test_document.files.last().pages.all()
)
)
self.assertNotEqual(
self.test_document_version.page_content_objects,
test_document_version_page_content_objects
)
self.assertEqual(
self.test_document_version.page_content_objects,
test_document_version_expected_page_content_objects
)
def test_method_get_absolute_url(self):
self.assertTrue(self.test_document.version_active.get_absolute_url())
| [
"[email protected]"
] | |
c4722abdfbd81b2b5a9a7eff8b02d361d255c7af | 8a29f983b122602ef960d8c1f6fc6451569ed2d2 | /te_discovery/conservation/extract_conservation_classes.py | 166c8a4c459770ec2d21599a94d48d05cbd58982 | [
"MIT"
] | permissive | oaxiom/hesc_lincrna | a4832841b49f2b9b0da6bf8a169857550a0e8797 | 7a87d426bba93a027794b6bea36f1ae61d5d205b | refs/heads/master | 2022-05-08T04:16:57.760960 | 2022-04-02T00:17:57 | 2022-04-02T00:17:57 | 187,936,591 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,718 | py | import sys, os, itertools
from collections import Counter
import numpy as np
import matplotlib.pyplot as plot
import matplotlib.tri as tri
from glbase3 import *
import shared_conservation
# collect three things:
# 1. The total PhyloP score of the transcript
# 2. score for TE-containing bits
# 3. score for non-TE containign bits;
dfam = genelist('../dfam/dfam_annotation.tsv', format={'force_tsv': True, 'name': 0, 'type': 3, 'subtype': 4})
dfam_dict = {}
for te in dfam:
dfam_dict[te['name']] = '{0}:{1}:{2}'.format(te['type'], te['subtype'], te['name'])
transcripts = glload('../te_transcripts/transcript_table_merged.mapped.glb')
gl = glload('phyloP_conservation_table.glb')
print(gl)
t = 0.25
not_counted = 0
both_conserved = []
te_conserved = []
lncrna_conserved = []
for item in gl:
if item['phyloP_tes'] > t and item['phyloP_nottes'] > t:
both_conserved.append(item)
elif item['phyloP_tes'] > t:
te_conserved.append(item)
elif item['phyloP_nottes'] > t:
lncrna_conserved.append(item)
else:
not_counted += 1
print('Not counted : {0:,}'.format(not_counted))
print('Both conserved : {0:,}'.format(len(both_conserved)))
print('TE conserved : {0:,}'.format(len(te_conserved)))
print('lncRNA conserved: {0:,}'.format(len(lncrna_conserved)))
print('Total TE-containing transcripts: {0:,}'.format(len(transcripts)))
gl = genelist()
gl.load_list(both_conserved)
both_conserved = gl
gl = genelist()
gl.load_list(te_conserved)
te_conserved = gl
gl = genelist()
gl.load_list(lncrna_conserved)
lncrna_conserved = gl
all_data = {'both_conserved': both_conserved.map(genelist=transcripts, key='transcript_id'),
'te_conserved': te_conserved.map(genelist=transcripts, key='transcript_id'),
'lncrna_conserved': lncrna_conserved.map(genelist=transcripts, key='transcript_id')
}
for k in all_data:
# convert to a list of doms:
doms = []
for t in all_data[k]:
doms += [dfam_dict[d['dom']] for d in t['doms']]
c = Counter(doms)
c = c.most_common(20)#.items()
print(c)
vals = [i[1] for i in c]
labs = [i[0] for i in c]
vals.reverse()
labs.reverse()
fig = plot.figure(figsize=[2,2])
fig.subplots_adjust(left=0.5, top=0.97)
ax = fig.add_subplot(111)
ys = np.arange(len(vals))
ax.barh(ys, vals)
ax.set_xlabel('Number of TE domains')
ax.set_yticks(ys)
ax.set_yticklabels(labs)
[t.set_fontsize(6) for t in ax.get_yticklabels()]
[t.set_fontsize(6) for t in ax.get_xticklabels()]
#for y, p, x in zip(ys, percs, num_hits):
# ax.text(x+4, y, s='{0} ({1:.1f}%)'.format(x, p), va='center', fontsize=6)
fig.savefig('class_summary-{0}.pdf'.format(k))
| [
"[email protected]"
] | |
7ae56f768a8f140ecb1394772602106519a9ee18 | eb38089224f1c2598f6ba17a28756bb040d4975a | /{{ cookiecutter.appname }}/program.py | 04dd0d1ae5812276dd17999afe12ac501fb3280d | [] | no_license | zooba/sqlazure-cookiecutter-demo | b21afe1775547ca3ecc014396f53fb12170c3df9 | eee1aed495cafc32d29ff5e335f4cf3963d1f7ba | refs/heads/master | 2021-06-08T10:02:56.428740 | 2016-11-04T22:40:58 | 2016-11-04T22:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | import pyodbc
CONNECTION_STRING = "{{cookiecutter.connection_string}}"
connection = pyodbc.connect(CONNECTION_STRING)
# Read all
cursor = connection.cursor()
cursor.execute(
"select LastName, Count(*) as 'Members' "
"from {{cookiecutter.namespace}}.{{cookiecutter.table}} "
"group by LastName "
"having Count(*) > 3 "
"order by 'Members' DESC")
row = cursor.fetchall()
print('Family Name | Members')
print('-------------------+--------')
print('\n'.join('{0[0]:<19}|{0[1]:>8}'.format(r) for r in row))
| [
"[email protected]"
] | |
d7c6e469c589e6a81c8cab720f7504fcc6b98f5c | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_quota_coupons_request.py | 32bac674877d7d2748d85e8b59d9ef77c75969ef | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,159 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListQuotaCouponsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'QueryCouponQuotasReqExt'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""ListQuotaCouponsRequest
The model defined in huaweicloud sdk
:param body: Body of the ListQuotaCouponsRequest
:type body: :class:`huaweicloudsdkbss.v2.QueryCouponQuotasReqExt`
"""
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this ListQuotaCouponsRequest.
:return: The body of this ListQuotaCouponsRequest.
:rtype: :class:`huaweicloudsdkbss.v2.QueryCouponQuotasReqExt`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ListQuotaCouponsRequest.
:param body: The body of this ListQuotaCouponsRequest.
:type body: :class:`huaweicloudsdkbss.v2.QueryCouponQuotasReqExt`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListQuotaCouponsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
41fa742a05fa6098767800003e945f5fca7db849 | b7ba98a9038f977f1efcbda5da2a41ae08e602ae | /exercises/21_jinja2/task_21_1c.py | d015a0565f2d71e065ba18dfbd812d00edb77cff | [] | no_license | Stanleygoo/pyneng-examples-exercises | f7914bc21aca119c7423af373a8b17b2917ea675 | c27b608ac954149d841c0a53f1108a6100295544 | refs/heads/master | 2021-05-11T15:35:11.043546 | 2018-01-12T06:42:04 | 2018-01-12T06:42:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py | # -*- coding: utf-8 -*-
'''
Задание 21.1c
Переделать функцию generate_cfg_from_template из задания 21.1, 21.1a или 21.1b:
* сделать автоматическое распознавание разных форматов для файла с данными
* для передачи разных типов данных, должен использоваться один и тот же параметр data
Должны поддерживаться такие форматы:
* YAML - файлы с расширением yml или yaml
* JSON - файлы с расширением json
* словарь Python
Если не получилось определить тип данных, вывести сообщение error_message (перенести текст сообщения в тело функции), завершить работу функции и вернуть None.
Проверить работу функции на шаблоне templates/for.txt и данных:
* data_files/for.yml
* data_files/for.json
* словаре data_dict
'''
error_message = '''
Не получилось определить формат данных.
Поддерживаются файлы с расширением .json, .yml, .yaml и словари Python
'''
data_dict = {'vlans': {
10: 'Marketing',
20: 'Voice',
30: 'Management'},
'ospf': [{'network': '10.0.1.0 0.0.0.255', 'area': 0},
{'network': '10.0.2.0 0.0.0.255', 'area': 2},
{'network': '10.1.1.0 0.0.0.255', 'area': 0}],
'id': 3,
'name': 'R3'}
| [
"[email protected]"
] | |
5778b74123229319dcbed1fffaeff1f0615fd64d | f4103057ccbe0c08ec51626cdd0c016acd446be9 | /tensorflow_ReinForcementLearning/Dueling_Double_DQN/DuelingDoubleDQN.py | 2fe050cc3bcf811ed260a73d7dd78486fb175870 | [] | no_license | leehongpyo/Tensorflow_Advanced_Tutorials | e7cb0fe84e65f90fefa430893083b36981f587ec | 1c76d35504b090a223294bc6648325dfe49a0e26 | refs/heads/master | 2020-04-29T11:00:50.485795 | 2019-03-16T14:24:13 | 2019-03-16T14:24:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,261 | py | import glob
import os
import shutil
import time
import cv2
import gym
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tqdm import tqdm
class ReplayMemory(object):
def __init__(self, maxlen, batch_size, with_replacement):
self.maxlen = maxlen
self.buffer = np.empty(shape=maxlen, dtype=np.object) # dtype = np.object인게 중요하다.
self.index = 0
self.length = 0
self.batch_size = batch_size
self.with_replacement = with_replacement
self.sequence_state = []
def remember(self, sequence_state, action, reward, next_state, gamestate):
self.sequence_state = np.concatenate((sequence_state[:, :, 1:], next_state[:, :, np.newaxis]), axis=-1)
self.buffer[self.index] = [sequence_state, action, reward, self.sequence_state, gamestate]
self.length = min(self.length + 1, self.maxlen)
self.index = (self.index + 1) % self.maxlen
@property
def next_sequence_state(self):
return self.sequence_state
@property
def sample(self):
if self.with_replacement:
indices = np.random.randint(self.length, size=self.batch_size)
else:
indices = np.random.permutation(self.length)[:self.batch_size]
return self.buffer[indices]
class model(object):
def __init__(self,
model_name="BreakoutDeterministic-v4",
training_display=(True, 100000),
training_step=200000000,
training_start_point=10000,
training_interval=4,
rememorystackNum=500000,
save_step=10000,
copy_step=10000,
framesize=4,
learning_rate=0.00025,
momentum=0.95,
egreedy_max=1,
egreedy_min=0.1,
egreedy_step=1000000,
discount_factor=0.99,
batch_size=32,
with_replacement=True,
only_draw_graph=False,
SaveGameMovie=True,
doubleQ=True,
dueling=True):
# 환경 만들기
if model_name == "BreakoutDeterministic-v4":
print("\n<<< ""{}"" 게임 환경 >>>".format(model_name))
elif model_name == "PongDeterministic-v4":
print("\n<<< ""{}"" 게임 환경 >>>".format(model_name))
else:
print("<<< 실행 불가 >>>")
print(
"<<< 현재의 _data_preprocessing 함수는 ""BreakoutDeterministic-v4"" 와 ""PongDeterministic-v4"" 이 두 게임 환경에 맞게 처리되어 있습니다. >>>")
print("<<< ""{}"" 게임 환경을 실행하려면 _data_preprocessing 함수의 수정이 필요 합니다. >>>".format(model_name))
print("<<< ""{}"" 게임 환경에 알맞게 전처리 함수를 수정한 후 조건문에 게임 환경을 추가해 주세요 >>>".format(
model_name, model_name))
exit(0)
self.game_name = model_name
self.model_name = model_name + "_IC" + str(framesize) # IC -> Input Channel
if doubleQ:
self.model_name = self.model_name + "DDQN"
if dueling:
self.model_name = self.model_name + "Dueling"
self.env = gym.make(self.game_name) # train , test
self.display = training_display[0]
self.display_step = training_display[1]
self.SaveGameMovie = SaveGameMovie
# 학습 하이퍼파라미터
self.framesize = framesize
self.training_step = training_step
self.training_start_point = training_start_point
self.training_interval = training_interval
self.learning_rate = learning_rate
self.momentum = momentum
self.egreedy_min = egreedy_min
self.egreedy_max = egreedy_max
self.egreedy_step = egreedy_step
self.discount_factor = discount_factor
self.batch_size = batch_size
self.save_step = save_step
self.copy_step = copy_step
self.only_draw_graph = only_draw_graph
# 재현 메모리
self.stacked_state = [] # 연속 관측을 담아두기 위한 변수
self.with_replacement = with_replacement
self.rememorystackNum = rememorystackNum
# Double Q 사용여부
self.doubleQ = doubleQ
# Dueiling 사용 여부
self.dueling = dueling
self.RM = ReplayMemory(maxlen=self.rememorystackNum, batch_size=self.batch_size,
with_replacement=self.with_replacement)
# DQN 연산그래프 그리기
self._build_graph()
def __repr__(self):
print("{} With Dueling Double DQN".format(self.model_name))
@property
def _action_space_number(self):
return self.env.action_space.n
@property
def _sample_memories(self):
# 상태, 행동, 보상, 다음 상태, 게임지속여부
cols = [[], [], [], [], []]
for memory in self.RM.sample:
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
state = cols[0]
action = cols[1]
reward = cols[2].reshape(-1, 1) # 형태 맞춰주기
next_state = cols[3]
gamestate = cols[4].reshape(-1, 1) # 형태 맞춰주기
return state, action, reward, next_state, gamestate
def _epsilon_greedy(self, Qvalue, step):
# off policy 요소
# 훈련 스텝 전체에 걸쳐서 epsilon을 1.0 에서 0.1로 감소 시킨다.
epsilon = np.maximum(self.egreedy_min,
self.egreedy_max - (self.egreedy_max - self.egreedy_min) * (step / self.egreedy_step))
if np.random.rand() < epsilon: # epsilon 확률로 랜덤하게 행동
return np.random.randint(self._action_space_number)
else:
return np.argmax(Qvalue) # 1 - epsilon 확률로 랜덤하게 행동
# DDQN의 연산량을 줄이고 훈련속도를 향상시키기
def _data_preprocessing(self, obs):
'''
DQN 논문에서...
Working directly with raw Atari frames, which are 210 × 160 pixel images with a 128 color palette,
can be computationally demanding, so we apply a basic preprocessing step aimed at reducing the
input dimensionality. The raw frames are preprocessed by first converting their RGB representation
to gray-scale and down-sampling it to a 110×84 image. The final input representation is obtained by
cropping an 84 × 84 region of the image that roughly captures the playing area. The final cropping
stage is only required because we use the GPU implementation of 2D convolutions from [11], which
expects square inputs. For the experiments in this paper, the function φ from algorithm 1 applies this
preprocessing to the last 4 frames of a history and stacks them to produce the input to the Q-function.
'''
# 84 x 84 gray로 만들기
obs = cv2.cvtColor(obs, cv2.COLOR_BGR2GRAY)
obs = cv2.resize(obs, dsize=(84, 110))
return obs[17:101, :].astype(np.uint8)
def _concatenated_state(self, state):
listed_state = [self._data_preprocessing(state)[:, :, np.newaxis] for _ in range(self.framesize)]
concatenated_state = np.concatenate(listed_state, axis=-1)
return concatenated_state
def _DQN(self, inputs, name):
# N X 84 x 84 x 4
initializer = tf.contrib.layers.variance_scaling_initializer()
with tf.variable_scope(name) as scope:
conv1 = tf.layers.conv2d(inputs=inputs, filters=32, kernel_size=(8, 8), strides=(4, 4), padding='valid',
activation=tf.nn.relu, use_bias=True,
kernel_initializer=initializer) # N X 20 X 20 X 32
conv2 = tf.layers.conv2d(inputs=conv1, filters=64, kernel_size=(4, 4), strides=(2, 2), padding='valid',
activation=tf.nn.relu, use_bias=True,
kernel_initializer=initializer) # N X 9 X 9 X 64
conv3 = tf.layers.conv2d(inputs=conv2, filters=64, kernel_size=(3, 3), strides=(1, 1), padding='valid',
activation=tf.nn.relu, use_bias=True,
kernel_initializer=initializer) # N X 7 X 7 X 64
hidden = tf.layers.dense(tf.reshape(conv3, shape=(-1, 7 * 7 * 64)), 512, activation=tf.nn.relu,
use_bias=True, kernel_initializer=initializer)
if self.dueling:
state_value, action_advantages = tf.split(hidden, 2, axis=-1)
value = tf.layers.dense(state_value, 1, activation=None, use_bias=True,
kernel_initializer=initializer) # N x 1
advantage = tf.layers.dense(action_advantages, self._action_space_number, activation=None, use_bias=True,
kernel_initializer=initializer) # # N x self._action_space_number
# < An alternative module replaces the max operator with an average >
output = value + tf.subtract(advantage, tf.reduce_mean(advantage, axis=-1, keepdims=True))
else:
output = tf.layers.dense(hidden, self._action_space_number, activation=None, use_bias=True,
kernel_initializer=initializer)
# train_vars = tf.trainable_variables(scope = scope.name)
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)
train_vars_dictionary = {var.name[len(scope.name):]: var for var in train_vars}
return output, train_vars_dictionary
def _build_graph(self):
self.Graph = tf.Graph()
with self.Graph.as_default():
# model input
self.state = tf.placeholder(tf.float32, shape=[None, None, None, self.framesize])
self.action = tf.placeholder(tf.int32, shape=None)
self.target = tf.placeholder(tf.float32, shape=None)
# tensorboard
self.rewards = tf.placeholder(tf.float32, shape=None)
self.Qvalues = tf.placeholder(tf.float32, shape=None)
self.gamelength = tf.placeholder(tf.int32, shape=None)
with tf.name_scope("online"):
self.online_Qvalue, online_var_dictionary = self._DQN(self.state, name="online")
with tf.name_scope("target"):
self.target_Qvalue, target_var_dictionary = self._DQN(self.state, name="target")
with tf.name_scope("copy"):
# 온라인 네트워크의 가중치를 타깃 네트워크로 복사하기
self.cpFromOnlinetoTarget = [target_var.assign(online_var_dictionary[var_name]) for var_name, target_var
in target_var_dictionary.items()]
with tf.name_scope("update_variable"):
trainable_var_list = tf.global_variables()
with tf.name_scope("saver"):
self.saver = tf.train.Saver(var_list=trainable_var_list, max_to_keep=5)
with tf.name_scope("Loss"):
Qvalue = tf.reduce_sum(
tf.multiply(self.online_Qvalue, tf.one_hot(self.action, self._action_space_number)),
axis=1,
keepdims=True)
error = tf.abs(self.target - Qvalue)
# 0 < error < 1 일 때는 tf.square(clipped_error)
# error > 1 일때는 2*error-1 적용 - 선형
clipped_error = tf.clip_by_value(error, 0, 1)
linear_error = 2 * (error - clipped_error)
self.loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
with tf.name_scope("trainer"):
optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, momentum=self.momentum,
epsilon=1e-2)
self.train_operation = optimizer.minimize(self.loss, var_list=trainable_var_list)
all_var_list = tf.global_variables()
with tf.name_scope("game_infomation"):
tf.summary.scalar("Loss", self.loss)
tf.summary.scalar("Reward", self.rewards)
tf.summary.scalar("Qvalue", self.Qvalues)
tf.summary.scalar("Game length", self.gamelength)
self.summary_operation = tf.summary.merge_all()
for operator in (self.state, self.online_Qvalue):
tf.add_to_collection("way", operator)
# generator graph 구조를 파일에 쓴다.
meta_save_file_path = os.path.join(self.model_name, 'Graph.meta')
self.saver.export_meta_graph(meta_save_file_path, collection_list=["way"])
if self.only_draw_graph:
print('<<< Graph.meta 파일만 저장하고 종료합니다. >>>')
exit(0)
config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
# config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = 0.1
self.sess = tf.Session(graph=self.Graph, config=config)
# 가중치 초기화 및 online DQN -> target DQN으로 복사 or 복구
print("<<< initializing!!! >>>")
self.sess.run(tf.variables_initializer(all_var_list))
self.sess.run(self.cpFromOnlinetoTarget)
ckpt = tf.train.get_checkpoint_state(self.model_name)
if (ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path)):
print("<<< all variable retored except for optimizer parameter >>>")
print("<<< Restore {} checkpoint!!! >>>".format(os.path.basename(ckpt.model_checkpoint_path)))
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
self.start = int(os.path.basename(ckpt.model_checkpoint_path).split("-")[-1])
self.start += 1
else:
self.start = 1
def _normalizaiton(self, value=None, factor=255.0):
return np.divide(value, factor)
@property
def train(self):
if os.path.exists("tensorboard/{}".format(self.model_name)):
shutil.rmtree("tensorboard/{}".format(self.model_name))
self.summary_writer = tf.summary.FileWriter(os.path.join("tensorboard", self.model_name), self.sess.graph)
update_counter = 0 # 학습 횟수 카운터!!!
gamelength = 0
totalgame = 0
totalQvalues = 0
totalrewards = 0
gamestate = True # 게임 초기화 및 게임의 완료 정보를 위함
# 실질적으로 (self.training_step - self.rememorystackNum) 만큼만 학습한다.
for step in tqdm(range(self.start, self.training_start_point + self.training_step + 1, 1)):
if (self.training_start_point + step) % self.display_step == 0 and self.display:
val_env = gym.make(self.game_name) # val
print("\n<<< Validation at {} step >>>".format(step))
valid_total_reward = 0
valid_sequence_state = self._concatenated_state(val_env.reset())
valid_gamestate = False
val_step = 0
while valid_gamestate != True:
val_step += 1
val_env.render()
time.sleep(1 / 30) # 30fps
online_Qvalue = self.sess.run(self.online_Qvalue,
feed_dict={self.state: self._normalizaiton([valid_sequence_state])})
valid_action = self._epsilon_greedy(online_Qvalue, step)
valid_next_state, valid_reward, valid_gamestate, _ = val_env.step(valid_action)
valid_sequence_state = np.concatenate((valid_sequence_state[:, :, 1:],
self._data_preprocessing(valid_next_state)[:, :,
np.newaxis]), axis=-1)
valid_total_reward += valid_reward
# 점수를 받은 부분만 표시하기
if valid_reward != 0:
print("게임 step {} -> reward :{}".format(val_step, valid_reward))
print("total reward : {}\n".format(valid_total_reward))
val_env.close()
if gamestate:
totalQvalues = 0
gamelength = 0
totalrewards = 0
totalgame += 1
# 현재의 연속된 관측을 연결하기 -> 84 x 84 x self.frame_size ,
self.sequence_state = self._concatenated_state(self.env.reset())
# 온라인 DQN을 시작한다. / 왜 normalization을 하는 것인가? scale을 맞춰주는것!!!
online_Qvalue = self.sess.run(self.online_Qvalue,
feed_dict={self.state: self._normalizaiton([self.sequence_state])})
if step < self.training_start_point + 1:
action = np.random.randint(self._action_space_number)
else:
action = self._epsilon_greedy(online_Qvalue, step - self.training_start_point)
next_state, reward, gamestate, _ = self.env.step(action)
# # reward -1, 0, 1로 제한하기
reward = np.clip(reward, a_min=-1, a_max=1)
'''
재현 메모리 실행
why ? not gamestate -> 1게임이 끝나면, gamestate 는 True(즉, 1)를 반환하는데,
이는 게임 종료에 해당함으로, gamestate가 0(즉 not True = False = 0)이 되어야 학습할 때 target_Qvalue를 0으로 만들 수 있다.
'''
# 가장 중요한 부분이라고 생각한다. 시간의 흐름을 저장하는 곳!!!
self.RM.remember(self.sequence_state, action, reward, self._data_preprocessing(next_state), not gamestate)
self.sequence_state = self.RM.next_sequence_state
# 1게임이 얼마나 지속? gamelength, 1게임의 q 가치의 평균 값
totalrewards += reward
gamelength += 1
totalQvalues += (np.max(online_Qvalue) / gamelength)
if step < self.training_start_point or step % self.training_interval != 0:
continue
##################################### 학습 #########################################
'''
Double DQN 설명
DQN이 각 상태에서 잠재적인 액션의 Q값을 종종 overestimation 한다는 점에서 착안!!!
모든 Action이 언제나 동일하게 overestimation 된다면 별 문제가 아니겠지만, 그렇지 않다고 한다.
따라서 DDQN 논문에서는 간단한 트릭을 제시하게 되는데
학습 단계에서 target Q 값을 계산할 때, q값들에서 최댓값을 구하는 대신(바로 max 안한다는 얘기)
online network에서 가장 큰 Q값을 가지는 action을 선택하고, 해당 action에 대한 target Q값을 target 네트워크에서
생성하는 방법!!!
action 선택과 타깃 Q값 생성을 분리하면, 추정값이 크게 나오는 일을 상당 부분 줄일 수 있으며,
더 빠르고 안정적으로 학습을 진행할 수 있다고 한다.
'''
sampled_state, sampled_action, sampled_reward, sampled_next_state, continues = self._sample_memories
target_Qvalue = self.sess.run(self.target_Qvalue,
feed_dict={self.state: self._normalizaiton(sampled_next_state)})
# 더블큐!!!
# online network에서 action 가져오기
if self.doubleQ:
online_Qvalue = self.sess.run(self.online_Qvalue,
feed_dict={self.state: self._normalizaiton(sampled_next_state)})
online_selection = np.argmax(online_Qvalue, axis=1)
# target_Qvalue의 q값을 online network 에서 고른 action으로 선택하기
double_Qvalue = target_Qvalue[range(self.batch_size), online_selection]
target_Qvalue = sampled_reward + continues * self.discount_factor * double_Qvalue
else:
target_Qvalue = sampled_reward + continues * self.discount_factor * np.max(target_Qvalue, axis=1,
keepdims=True)
_, loss = self.sess.run([self.train_operation, self.loss],
feed_dict={self.state: self._normalizaiton(sampled_state),
self.action: sampled_action,
self.target: target_Qvalue})
update_counter += 1
# self.copy_step마다 online DQN -> target DQN으로 복사
if update_counter % self.copy_step == 0:
print("<<< '{}' 번째 'online' copy to 'target' >>>".format(update_counter // self.copy_step))
self.sess.run(self.cpFromOnlinetoTarget)
# self.save_step 마다 tensorboard 및 가중치 저장 :
if update_counter % self.save_step == 0:
# 학습 과정은 Tensorboard에서 확인하자
summary_str = self.sess.run(self.summary_operation,
feed_dict={self.state: self._normalizaiton(sampled_state),
self.action: sampled_action,
self.target: target_Qvalue, self.rewards: totalrewards,
self.gamelength: gamelength, self.Qvalues: totalQvalues})
self.summary_writer.add_summary(summary_str, global_step=update_counter)
if not os.path.exists(self.model_name):
os.makedirs(self.model_name)
# step과 맞춰주기 위해(self.START 에 영향을 준다.) np.multiply(update_counter, self.training_interval) 를 한다.
self.saver.save(self.sess, self.model_name + "/",
global_step=np.multiply(update_counter, self.training_interval),
write_meta_graph=False)
print("<<< 학습간 전체 게임 횟수 : {} >>>".format(totalgame))
# 닫기
self.sess.close()
self.env.close()
@property
def test(self):
tf.reset_default_graph()
meta_path = glob.glob(os.path.join(self.model_name, '*.meta'))
if len(meta_path) == 0:
print("<<< Graph가 존재 하지 않습니다. 그래프를 그려 주세요. - only_draw_graph = True >>>")
print("<<< 강제 종료 합니다. >>>")
exit(0)
else:
print("<<< Graph가 존재 합니다. >>>")
Graph = tf.Graph()
with Graph.as_default():
saver = tf.train.import_meta_graph(meta_path[0], clear_devices=True) # meta graph 읽어오기
if saver == None:
print("<<< meta 파일을 읽을 수 없습니다. >>>")
print("<<< 강제 종료합니다. >>>")
exit(0)
state, online_Qvalue = tf.get_collection('way')
with tf.Session(graph=Graph) as sess:
ckpt = tf.train.get_checkpoint_state(self.model_name)
if ckpt == None:
print("<<< checkpoint file does not exist>>>")
print("<<< Exit the program >>>")
exit(0)
if (ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path)):
print("<<< all variable retored except for optimizer parameter >>>")
print("<<< Restore {} checkpoint!!! >>>".format(os.path.basename(ckpt.model_checkpoint_path)))
saver.restore(sess, ckpt.model_checkpoint_path)
total_reward = 0
frames = []
sequence_state = self._concatenated_state(self.env.reset())
gamestate = False
step = 0
while gamestate != True:
step += 1
self.env.render()
time.sleep(1 / 30) # 30fps
frame = self.env.render(mode="rgb_array")
frames.append(frame)
Qvalue = sess.run(online_Qvalue, feed_dict={state: self._normalizaiton([sequence_state])})
# Test 코드에서도 epsilon_greedy를 사용해야한다.
'''
Toward Data science 에서...
But even during testing, we may maintain ϵ to a small value like 0.05.
A deterministic policy may stuck in a local optimal.- 이게 주 원인!!!
A non-deterministic policy allows us to break out for a chance to reach a better optimal.
'''
action = self._epsilon_greedy(Qvalue, self.egreedy_step)
next_state, reward, gamestate, _ = self.env.step(action)
total_reward += reward
# 예전의 가장 왼쪽의 프레임 날려버리기
sequence_state = np.concatenate(
(sequence_state[:, :, 1:], self._data_preprocessing(next_state)[:, :, np.newaxis]),
axis=-1)
if reward != 0:
print("게임 step {} -> reward :{}".format(step, reward))
print("total reward : {}".format(total_reward))
self.env.close()
if self.SaveGameMovie:
# 애니매이션 만들기
fig = plt.figure(figsize=(2, 3))
patch = plt.imshow(frames[0]) # 첫번째 scene 보여주기
plt.axis("off") # 축 제거
ani = animation.FuncAnimation(fig,
func=lambda i, frames, patch: patch.set_data(frames[i]),
fargs=(frames, patch),
frames=len(frames),
repeat=True)
# 리눅스 : sudo apt-get install ffmepg or conda install -c conda-forge ffmpeg
# 윈도우 : conda install ffmpeg
ani.save("{}.mp4".format(self.model_name), writer="ffmpeg", fps=30, dpi=200)
plt.show()
if __name__ == "__main__":
Atari = model(
# https://gym.openai.com/envs/#atari
# PongDeterministic-v4 or BreakoutDeterministic-v4
model_name="PongDeterministic-v4",
# model_name="BreakoutDeterministic-v4",
training_display=(True, 1000000),
training_step=50000000,
training_start_point=50000,
# 4번마다 한번씩만 학습 하겠다는 것이다.
# -> 4번중 3번은 게임을 진행해보고 4번째에는 그 결과들을 바탕으로 학습을 하겠다는 이야기
training_interval=4,
rememorystackNum=500000,
save_step=10000, # 가중치 업데이트의 save_step 마다 저장한다.
copy_step=10000, # 가중치 업데이트의 copy_step 마다 저장한다.
framesize=4, # 입력 상태 개수
learning_rate=0.00025,
momentum=0.95,
egreedy_max=1,
egreedy_min=0.1,
egreedy_step=1000000,
discount_factor=0.99,
batch_size=32,
with_replacement=True, # True : 중복추출, False : 비중복 추출
only_draw_graph=False, # model 초기화 하고 연산 그래프 그리기
SaveGameMovie=True,
doubleQ=True,
dueling=True)
Atari.train # 학습 하기
Atari.test # 테스트 하기
| [
"[email protected]"
] | |
d0772dd0edd20d0af5d33d835497aa7e243a2f9f | afc4333ad944301ad969de445a5a4e3b223bb456 | /greedy/greedy_03.py | c5f1ec91097b5ff762d82881078e50a0b2bf23bf | [] | no_license | ykiseong303/python_algorithm | 4cf2df8c0ff2223eab70d246c87466d1ebc26133 | e90268103983917835ba6dbcd14b4b515c3d0fae | refs/heads/main | 2023-07-18T10:41:40.180855 | 2021-08-27T14:33:14 | 2021-08-27T14:33:14 | 342,607,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | '''
분류 : 그리디 알고리즘
문제 : 1로 만들기 (이코테 문제)
작성일자 : 2021.02.26
'''
# 정수 N이 입력될 때
# K로 나누거나 1로 빼기
# 1까지 되는 연산을 사용하는 횟수의 최솟값을 출력
## 목적 : N을 1로 만드는데 가장 적은 횟수로 수행하기
## 접근 : 현재의 N 값에서 가장 작게 만들 수 있는 연산을 수행
N, K = map(int, input().split())
count = 0
while N>1 :
if not N%K :
N//=K
else :
N-=1
count += 1
print(count) | [
"[email protected]"
] | |
9d44c7fc30c7532a83cbfd55e8f20cb446146010 | 2f63688febd21dc3ae6b19abfa79ad313c820154 | /0343_Integer_Break/try_3.py | 528ae78d9d8cc6ac2b91f6f7d4fed33f238a4064 | [] | no_license | novayo/LeetCode | cadd03587ee4ed6e35f60294070165afc1539ac8 | 54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7 | refs/heads/master | 2023-08-14T00:35:15.528520 | 2023-07-30T05:56:05 | 2023-07-30T05:56:05 | 200,248,146 | 8 | 1 | null | 2022-11-19T04:37:54 | 2019-08-02T14:24:19 | Python | UTF-8 | Python | false | false | 412 | py | class Solution:
def integerBreak(self, n: int) -> int:
dp = [1] * (n+1)
for _n in range(2, n+1):
i, j = 1, _n-1
max_value = 0
while i <= j:
max_value = max(max_value, max(i, dp[i]) * max(j, dp[j]))
i, j = i+1, j-1
dp[_n] = max_value
return dp[-1] | [
"[email protected]"
] | |
1558ce39aec8e8a80e658eadae3fa17706dbffbc | 4315cfa1bd13f08053d549c7c00287a76d656f9e | /src/abc159/B.py | 612261c7290dcb8c80f4dd9c313c478367e21286 | [] | no_license | ma96o/atcoder_archive | 1c47e2189b2d55e7f519349e02ceb24500e2164e | 8bc21d351535eda1f81a47442156f92f234cf6c3 | refs/heads/main | 2023-08-13T17:03:13.855154 | 2021-09-15T14:55:00 | 2021-09-15T14:55:00 | 350,179,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | s = input()
n = len(s)
hs = s[:(n-1)//2]
vs = s[((n+3)//2-1):]
if s == s[::-1] and hs == hs[::-1] and vs == vs[::-1]:
print("Yes")
exit()
print("No")
| [
"[email protected]"
] | |
325c7ead66c60a3d6d4100600e21b951274e002e | 2efee5574ff160c97a94e243c1c820814b008058 | /parse/atest.py | d1fb32e644208592b435d8764e45b986a204877e | [] | no_license | haobtc/blockinfo | 5c18430f3f5eaa7b979c119945b686e0e09d2e1c | 00a094d917224dbe6acb6565125badec14f2400f | refs/heads/master | 2020-03-29T07:01:35.936157 | 2014-05-14T03:55:27 | 2014-05-14T03:55:27 | 17,733,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | from bitcointools.deserialize import decode_script
script = 'j(n\xd6{q\xado\xbcp3\xa74\x06>I\x84\x8dv\x9b\x89m\x83h\xfe\x05\x9e;7\xf0q\n\x1a\x89\x00\x00\x00\x00\x01\x00\x00\x00'
script = 'j(\xd8`\x1d4\xd0\x07lLR/\xab\xe9\xa4>\x83\x87\xcb,\xaa\r\xec\xfcJ\xd0/\xc1\xda\x83o\xb8\xfe\xa4\x00\x00\x00\x00\x03\x00\x00\x00'
script = '$\x12\xae\xab\xaf\x92O[\xc9{$0\xe8/2\xe3\r\x1e&\xda\xccVw\x072Y\n[\xf4V\xbe\xb1f\x05\x00\x00\x00'
script = '$\xc6\xfe\xfa\x02\x8eY5\x10+\xb6\xc0\xf0}bG\xc2\x12\x8a\x19*L\x0eW\xd8\x94\x0e\xfa!e\xde>\xb4P\x00\x00\x00'
print len(script), decode_script(script)
| [
"[email protected]"
] | |
102137592b05149728da3095ba674e187218c5b7 | 7a9472018f8a5c06da7341746bdb54a91ec02df0 | /client/locustfile.py | 09f77e398421728f25ea0b2e23edca5f75a846d6 | [] | no_license | nmaekawa/websockets-tst | fb55fcca04d953b086ae8c36e4c2df5c55566c0e | 7278e134b1c60a87873a38f7bbff7f8e3b96fa1c | refs/heads/master | 2022-07-27T00:31:53.448846 | 2020-01-28T21:33:22 | 2020-01-28T21:33:22 | 221,528,746 | 0 | 0 | null | 2022-05-25T03:33:53 | 2019-11-13T18:47:05 | Python | UTF-8 | Python | false | false | 5,553 | py | #
# automated run: use --no-web and --csv
# $> locust -f examples/basic.py --csv=example --no-web -t10m -c 1 -r 1 --host <hxat url>
#
import os
import json
import logging
from random import randint
from subprocess import Popen
from subprocess import PIPE
from uuid import uuid4
from locust import between
from locust import HttpLocust
from locust import TaskSet
from locust import task
import locust.stats
# set custom interval for stats; default is 2s
locust.stats.CSV_STATS_INTERVAL_SEC = 5
TOKEN = ''
USER_ID = ''
USER_NAME = ''
CONTEXT_ID = ''
COLLECTION_ID = ''
TARGET_SOURCE_ID = ''
RESOURCE_LINK_ID = ''
UTM_SOURCE = ''
# this is particular to the target_source document
# and used to randomize the region being annotate
PTAG=2
target_doc = [0, 589, 313, 434, 593, 493]
def fetch_fortune():
process = Popen('fortune', shell=True, stdout=PIPE, stderr=None)
output, _ = process.communicate()
return output.decode('utf-8')
def fresh_wa():
sel_start = randint(0, target_doc[PTAG])
sel_end = randint(sel_start, target_doc[PTAG])
x = {
"@context": "http://catchpy.harvardx.harvard.edu.s3.amazonaws.com/jsonld/catch_context_jsonld.json",
"body": {
"type": "List",
"items": [{
"format": "text/html",
"language": "en",
"purpose": "commenting",
"type": "TextualBody",
"value": fetch_fortune()
}],
},
"creator": {
"id": "d99019cf42efda58f412e711d97beebe",
"name": "nmaekawa2017"
},
"id": "013ec74f-1234-5678-3c61-b5cf9d6f7484",
"permissions": {
"can_admin": [ USER_ID ],
"can_delete": [ USER_ID ],
"can_read": [],
"can_update": [ USER_ID ]
},
"platform": {
"collection_id": COLLECTION_ID,
"context_id": CONTEXT_ID,
"platform_name": "edX",
"target_source_id": TARGET_SOURCE_ID,
},
"schema_version": "1.1.0",
"target": {
"items": [{
"selector": {
"items": [
{ "endSelector": { "type": "XPathSelector", "value": "/div[1]/p[{}]".format(PTAG) },
"refinedBy": { "end": sel_end, "start": sel_start, "type": "TextPositionSelector" },
"startSelector": { "type": "XPathSelector", "value": "/div[1]/p[{}]".format(PTAG) },
"type": "RangeSelector" },
],
"type": "Choice"
},
"source": "http://sample.com/fake_content/preview", "type": "Text"
}],
"type": "List"
},
"type": "Annotation"
}
return x
class UserBehavior_CreateWebAnnotation(TaskSet):
#def on_start(self):
# self.catcha = fresh_wa()
@task(1)
def add_annotation(self):
catcha = fresh_wa()
# create annotation
anno_id = str(uuid4())
target_path = '/annotation_store/api/{}?resource_link_id={}&utm_source={}&version=catchpy'.format(
anno_id, RESOURCE_LINK_ID, UTM_SOURCE)
response = self.client.post(
target_path, json=catcha, catch_response=True,
headers={
'Content-Type': 'Application/json',
'x-annotator-auth-token': TOKEN,
'Referer': 'https://naomi.hxat.hxtech.org/lti_init/launch_lti/',
},
verify=False,
)
if response.content == '':
response.failure('no data')
else:
try:
a_id = response.json()['id']
except KeyError:
resp = response.json()
if 'payload' in resp:
response.failure(resp['payload'])
else:
response.failure('no id in response')
return
except json.decoder.JSONDecodeError as e:
response.failure(e)
return
else:
response.success()
@task(10)
def search(self):
target_path = '/annotation_store/api/?resource_link_id={}&utm_source={}&version=catchpy&limit=10&offset=0&media=text&source_id={}&context_id={}&collection_id={}'.format(
RESOURCE_LINK_ID, UTM_SOURCE,
TARGET_SOURCE_ID, CONTEXT_ID, COLLECTION_ID)
response = self.client.get(
target_path, catch_response=True,
headers={
'Content-Type': 'Application/json',
'x-annotator-auth-token': TOKEN,
'Referer': 'https://naomi.hxat.hxtech.org/lti_init/launch_lti/',
},
verify=False,
)
if response.content == '':
response.failure('no data')
else:
try:
rows = response.json()['rows']
except KeyError:
resp = response.json()
if 'payload' in resp:
response.failure(resp['payload'])
else:
response.failure('missing rows in search response')
return
except json.decoder.JSONDecodeError as e:
response.failure(e)
return
else:
response.success()
class WebsiteUser(HttpLocust):
task_set = UserBehavior_CreateWebAnnotation
wait_time = between(5, 20)
| [
"[email protected]"
] | |
a07aa91eec7899727971e8ef6382c7d6c75ff0dc | bf75d497793b9f5df14bacc368cb43a509615045 | /accounts/migrations/0001_initial.py | 0f45c720c0474885cb3946df0d1bf4ea76c3b478 | [] | no_license | haruyasu/django-allauth-base | 05202fff81f74e44ec8d41cafd141c66d97cc034 | b7f65e4844331e3341ec9f562b9f71b79f333941 | refs/heads/main | 2023-07-18T21:53:47.670149 | 2021-09-22T06:24:06 | 2021-09-22T06:24:06 | 409,087,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,397 | py | # Generated by Django 3.2.7 on 2021-09-22 05:24
import accounts.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='メールアドレス')),
('name', models.CharField(max_length=30, verbose_name='氏名')),
('company', models.CharField(blank=True, max_length=30, null=True, verbose_name='会社名')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='入会日')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', accounts.models.UserManager()),
],
),
]
| [
"[email protected]"
] | |
69af161c933e2333c069fc3bf5828085f06d2373 | ceb3d82494813cd21e38231964e098bb3efe093b | /Feature/structure_tensor_eigenvalues.py | f72a99520193f77a04dcbe1808375927c8ee383b | [
"Apache-2.0"
] | permissive | Joevaen/Scikit-image_On_CT | 0c0a306a9ca18668bd9bb4105e577766b1d5578b | e3bf0eeadc50691041b4b7c44a19d07546a85001 | refs/heads/main | 2023-03-16T01:28:04.871513 | 2021-03-16T07:53:57 | 2021-03-16T07:53:57 | 344,071,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | # 计算结构张量的特征值。
from skimage.feature import structure_tensor
from skimage.feature import structure_tensor_eigenvalues
import numpy as np
square = np.zeros((5, 5))
square[2, 2] = 1
A_elems = structure_tensor(square, sigma=0.1, order='rc')
print(structure_tensor_eigenvalues(A_elems)[0])
| [
"[email protected]"
] | |
c1b3bd9f77d36739a30305d571e50d3ca6a74293 | ae46ca697eabe8ec5eea14aa0e218f6b9f7d0457 | /util/puzzle_runner.py | ab24a1ae85838269695b3f3f11c93e591fe295be | [] | no_license | MyreMylar/word_search | 334993d0bd4eafa8a641ba09abf82d4f4cbbed48 | 4d33ad230240cbf259b374725122786118cf5b2c | refs/heads/master | 2020-03-12T23:56:28.520090 | 2018-04-24T15:37:15 | 2018-04-24T15:37:15 | 130,876,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,075 | py | import hashlib
import pygame
from pygame.locals import *
class Puzzle:
def __init__(self, task, answer):
self.task = task
self.answer = answer
self.answer_hash = ""
self.correct = False
class RenderedPuzzle:
def __init__(self, task, task_size, answer, answer_size, result, result_size):
self.task = task
self.task_size = task_size
self.answer = answer
self.answer_size = answer_size
self.result = result
self.result_size = result_size
def encode_answer(answer):
encoded_answer = hashlib.sha1(answer.lower().encode()).hexdigest()
return encoded_answer
def run_puzzles(puzzle1, puzzle2, puzzle3, puzzle4, puzzle5, puzzle6):
pygame.init()
pygame.display.set_icon(pygame.image.load("util/puzzle_icon.png"))
pygame.display.set_caption('Word Search')
screen = pygame.display.set_mode((1000, 800))
background = pygame.Surface(screen.get_size())
background = background.convert(screen)
background.fill((30, 37, 41))
font = pygame.font.Font("util/FiraCode-Regular.ttf", 12)
font_bold = pygame.font.Font("util/FiraCode-Bold.ttf", 12)
allowed_keys = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
final_answer_string = u''
running = True
text_color = pygame.Color(215, 218, 219, 255)
question_colour = pygame.Color(231, 132, 162, 255)
right_text_color = pygame.Color(60, 255, 255, 255)
wrong_text_color = pygame.Color(255, 200, 80, 255)
meta_text_color = pygame.Color(133, 152, 244, 255)
puzzle1.answer_hash = "24e7451df05ed5cd4cf1041be67c68f8d89d087a"
puzzle2.answer_hash = "063a6bf659ec1feb283f3b09d97c6814af62d134"
puzzle3.answer_hash = "48181acd22b3edaebc8a447868a7df7ce629920a"
puzzle4.answer_hash = "5737ef08a3ec16a337ac79a1d719fb91acba20a4"
puzzle5.answer_hash = "4c1f32a51dbf7d6943108c64980b6935762f87d2"
puzzle6.answer_hash = "56b80273da1d7c0ac32ce82840d543a9da755bfd"
puzzles = [puzzle1, puzzle2, puzzle3, puzzle4, puzzle5, puzzle6]
rendered_puzzles = []
puzzle_num = 1
for puzzle in puzzles:
task_text = font_bold.render("Puzzle " + str(puzzle_num) + ". " + puzzle.task, True, question_colour)
task_text_size = font.size("Puzzle " + str(puzzle_num) + ". " + puzzle.task)
answer_text = font.render("Your current answer is: " + puzzle.answer, True, text_color)
answer_text_size = font.size("Your current answer is: " + puzzle.answer)
answer_hash = hashlib.sha1(puzzle.answer.lower().encode()).hexdigest()
if answer_hash == puzzle.answer_hash:
result_text = font.render("This answer is correct!", True, right_text_color)
result_text_size = font.size("This answer is correct!")
result_text_size = [result_text_size[0], result_text_size[1] * 3]
puzzle.correct = True
else:
result_text = font.render("This answer is wrong.", True, wrong_text_color)
result_text_size = font.size("This answer is wrong.")
result_text_size = [result_text_size[0], result_text_size[1] * 3]
rendered_puzzles.append(RenderedPuzzle(task_text, task_text_size, answer_text,
answer_text_size, result_text, result_text_size))
puzzle_num += 1
if all(puzzle.correct for puzzle in puzzles):
all_correct = True
else:
all_correct = False
final_puzzle_text_1 = font_bold.render("CONGRATULATIONS! ALL NORMAL PUZZLES SOLVED. ", True, right_text_color)
final_puzzle_text_2 = font_bold.render("META PUZZLE UNLOCKED!", True, meta_text_color)
final_puzzle_text_3 = font.render("1. Enter the fourth letter of your first answer.", True, meta_text_color)
final_puzzle_text_4 = font.render("2. Enter the third letter of your fifth answer.", True, meta_text_color)
final_puzzle_text_5 = font.render("3. Enter the second letter of your third Answer.", True, meta_text_color)
final_puzzle_text_6 = font.render("4. Enter the second letter of your fourth answer.", True, meta_text_color)
final_puzzle_text_7 = font.render("5. Enter the second letter of your second answer.", True, meta_text_color)
final_puzzle_text_8 = font.render("6. Enter the eighth letter of your sixth answer.", True, meta_text_color)
final_puzzle_result_text = font_bold.render("CORRECT! FINAL META PUZZLE SOLVED!!! HOORAY!!!",
True, right_text_color)
while running:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
running = False
elif event.key == K_BACKSPACE:
final_answer_string = final_answer_string[:-1]
elif all_correct:
if event.unicode in allowed_keys:
final_answer_string += event.unicode
if event.type == QUIT:
running = False
screen.blit(background, (0, 0))
y_height = 20
for puzzle in rendered_puzzles:
screen.blit(puzzle.task, puzzle.task.get_rect(x=20, y=y_height))
y_height += puzzle.task_size[1]
screen.blit(puzzle.answer, puzzle.answer.get_rect(x=20, y=y_height))
y_height += puzzle.answer_size[1]
screen.blit(puzzle.result, puzzle.result.get_rect(x=20, y=y_height))
y_height += puzzle.result_size[1]
if all_correct:
screen.blit(final_puzzle_text_1, (20, y_height))
x_adjust = final_puzzle_text_1.get_rect().width
screen.blit(final_puzzle_text_2, (20 + x_adjust, y_height))
y_height += final_puzzle_text_2.get_rect().height*2
screen.blit(final_puzzle_text_3, (20, y_height))
y_height += final_puzzle_text_3.get_rect().height
screen.blit(final_puzzle_text_4, (20, y_height))
y_height += final_puzzle_text_4.get_rect().height
screen.blit(final_puzzle_text_5, (20, y_height))
y_height += final_puzzle_text_5.get_rect().height
screen.blit(final_puzzle_text_6, (20, y_height))
y_height += final_puzzle_text_6.get_rect().height
screen.blit(final_puzzle_text_7, (20, y_height))
y_height += final_puzzle_text_7.get_rect().height
screen.blit(final_puzzle_text_8, (20, y_height))
y_height += final_puzzle_text_8.get_rect().height*2
answer_text = font.render("Answer: " + final_answer_string, True, text_color)
screen.blit(answer_text, answer_text.get_rect(x=20, y=y_height))
y_height += font.size("Answer: " + final_answer_string)[1]*2
final_answer = final_answer_string.lower()
if encode_answer(final_answer) == "59c826fc854197cbd4d1083bce8fc00d0761e8b3":
screen.blit(final_puzzle_result_text, (20, y_height))
pygame.display.flip() # flip all our drawn stuff onto the screen
pygame.quit()
| [
"[email protected]"
] | |
462d5c413f4d07555ec10a8367098bb854b7d802 | e33fa4ac60f3504deb885d1c5af5df4127b2cc81 | /src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_commands.py | 8962a2cbb656aee4f1a36ee25f34740bc68df4a0 | [
"MIT",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"BSD-3-Clause",
"MPL-2.0",
"LGPL-2.1-or-later",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | isabella232/azure-cli | d82f504480c9ff53f23cd383a4ec3fe67301132a | 417d2189453437a4b109b0536c1131f73d53312e | refs/heads/dev | 2023-03-07T14:11:11.093502 | 2020-12-04T08:33:12 | 2020-12-04T08:33:12 | 318,479,446 | 0 | 0 | MIT | 2021-02-23T20:57:28 | 2020-12-04T10:20:15 | null | UTF-8 | Python | false | false | 87,641 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# AZURE CLI VM TEST DEFINITIONS
import json
import os
import platform
import tempfile
import time
import unittest
import mock
import uuid
import six
from knack.util import CLIError
from azure_devtools.scenario_tests import AllowLargeResponse, record_only
from azure.cli.core.profiles import ResourceType
from azure.cli.testsdk import (
ScenarioTest, ResourceGroupPreparer, LiveScenarioTest, api_version_constraint,
StorageAccountPreparer)
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
TEST_SSH_KEY_PUB = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCbIg1guRHbI0lV11wWDt1r2cUdcNd27CJsg+SfgC7miZeubtwUhbsPdhMQsfDyhOWHq1+ZL0M+nJZV63d/1dhmhtgyOqejUwrPlzKhydsbrsdUor+JmNJDdW01v7BXHyuymT8G4s09jCasNOwiufbP/qp72ruu0bIA1nySsvlf9pCQAuFkAnVnf/rFhUlOkhtRpwcq8SUNY2zRHR/EKb/4NWY1JzR4sa3q2fWIJdrrX0DvLoa5g9bIEd4Df79ba7v+yiUBOS0zT2ll+z4g9izHK3EO5d8hL4jYxcjKs+wcslSYRWrascfscLgMlMGh0CdKeNTDjHpGPncaf3Z+FwwwjWeuiNBxv7bJo13/8B/098KlVDl4GZqsoBCEjPyJfV6hO0y/LkRGkk7oHWKgeWAfKtfLItRp00eZ4fcJNK9kCaSMmEugoZWcI7NGbZXzqFWqbpRI7NcDP9+WIQ+i9U5vqWsqd/zng4kbuAJ6UuKqIzB0upYrLShfQE3SAck8oaLhJqqq56VfDuASNpJKidV+zq27HfSBmbXnkR/5AK337dc3MXKJypoK/QPMLKUAP5XLPbs+NddJQV7EZXd29DLgp+fRIg3edpKdO7ZErWhv7d+3Kws+e1Y+ypmR2WIVSwVyBEUfgv2C8Ts9gnTF4pNcEY/S2aBicz5Ew2+jdyGNQQ== [email protected]\n"
def _write_config_file(user_name):
public_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8InHIPLAu6lMc0d+5voyXqigZfT5r6fAM1+FQAi+mkPDdk2hNq1BG0Bwfc88G'
'm7BImw8TS+x2bnZmhCbVnHd6BPCDY7a+cHCSqrQMW89Cv6Vl4ueGOeAWHpJTV9CTLVz4IY1x4HBdkLI2lKIHri9+z7NIdvFk7iOk'
'MVGyez5H1xDbF2szURxgc4I2/o5wycSwX+G8DrtsBvWLmFv9YAPx+VkEHQDjR0WWezOjuo1rDn6MQfiKfqAjPuInwNOg5AIxXAOR'
'esrin2PUlArNtdDH1zlvI4RZi36+tJO7mtm3dJiKs4Sj7G6b1CjIU6aaj27MmKy3arIFChYav9yYM3IT')
config = {
'username': user_name,
'ssh_key': public_key
}
_, config_file = tempfile.mkstemp()
with open(config_file, 'w') as outfile:
json.dump(config, outfile)
return config_file
# class VMImageListByAliasesScenarioTest(ScenarioTest):
# def test_vm_image_list_by_alias(self):
# result = self.cmd('vm image list --offer ubuntu').get_output_in_json()
# self.assertTrue(len(result) >= 1)
# self.assertEqual(result[0]['publisher'], 'Canonical')
# self.assertTrue(result[0]['sku'].endswith('LTS'))
# class VMUsageScenarioTest(ScenarioTest):
# def test_vm_usage(self):
# self.cmd('vm list-usage --location westus',
# checks=self.check('type(@)', 'array'))
# class VMImageListThruServiceScenarioTest(ScenarioTest):
# @AllowLargeResponse()
# def test_vm_images_list_thru_services(self):
# result = self.cmd('vm image list -l westus --publisher Canonical --offer UbuntuServer -o tsv --all').output
# assert result.index('16.04') >= 0
# result = self.cmd('vm image list -p Canonical -f UbuntuServer -o tsv --all').output
# assert result.index('16.04') >= 0
# class VMOpenPortTest(ScenarioTest):
# @ResourceGroupPreparer(name_prefix='cli_test_open_port')
# def test_vm_open_port(self, resource_group):
# self.kwargs.update({
# 'vm': 'vm1'
# })
# self.cmd('vm create -g {rg} -l westus -n {vm} --admin-username ubuntu --image Canonical:UbuntuServer:14.04.4-LTS:latest --admin-password @PasswordPassword1! --public-ip-address-allocation dynamic --authentication-type password')
# # min params - apply to existing NIC (updates existing NSG)
# self.kwargs['nsg_id'] = self.cmd('vm open-port -g {rg} -n {vm} --port "*" --priority 900').get_output_in_json()['id']
# self.kwargs['nsg'] = os.path.split(self.kwargs['nsg_id'])[1]
# self.cmd('network nsg show -g {rg} -n {nsg}',
# checks=self.check("length(securityRules[?name == 'open-port-all'])", 1))
# # apply to subnet (creates new NSG)
# self.kwargs['nsg'] = 'newNsg'
# self.cmd('vm open-port -g {rg} -n {vm} --apply-to-subnet --nsg-name {nsg} --port "*" --priority 900')
# self.cmd('network nsg show -g {rg} -n {nsg}',
# checks=self.check("length(securityRules[?name == 'open-port-all'])", 1))
# class VMShowListSizesListIPAddressesScenarioTest(ScenarioTest):
# @ResourceGroupPreparer(name_prefix='cli_test_vm_list_ip')
# def test_vm_show_list_sizes_list_ip_addresses(self, resource_group):
# self.kwargs.update({
# 'loc': 'centralus',
# 'vm': 'vm-with-public-ip',
# 'allocation': 'dynamic',
# 'zone': 2
# })
# # Expecting no results at the beginning
# self.cmd('vm list-ip-addresses --resource-group {rg}', checks=self.is_empty())
# self.cmd('vm create --resource-group {rg} --location {loc} -n {vm} --admin-username ubuntu --image Canonical:UbuntuServer:14.04.4-LTS:latest'
# ' --admin-password testPassword0 --public-ip-address-allocation {allocation} --authentication-type password --zone {zone}')
# result = self.cmd('vm show --resource-group {rg} --name {vm} -d', checks=[
# self.check('type(@)', 'object'),
# self.check('name', '{vm}'),
# self.check('location', '{loc}'),
# self.check('resourceGroup', '{rg}')
# ]).get_output_in_json()
# self.assertEqual(4, len(result['publicIps'].split('.')))
# result = self.cmd('vm list --resource-group {rg} -d', checks=[
# self.check('[0].name', '{vm}'),
# self.check('[0].location', '{loc}'),
# self.check('[0].resourceGroup', '{rg}'),
# self.check('[0].powerState', 'VM running')
# ]).get_output_in_json()
# self.assertEqual(4, len(result[0]['publicIps'].split('.')))
# self.cmd('vm list-vm-resize-options --resource-group {rg} --name {vm}',
# checks=self.check('type(@)', 'array'))
# # Expecting the one we just added
# self.kwargs['rg_caps'] = resource_group.upper() # test the command handles name with casing diff.
# self.cmd('vm list-ip-addresses --resource-group {rg_caps}', checks=[
# self.check('length(@)', 1),
# self.check('[0].virtualMachine.name', '{vm}'),
# self.check('[0].virtualMachine.resourceGroup', '{rg}'),
# self.check('length([0].virtualMachine.network.publicIpAddresses)', 1),
# self.check('[0].virtualMachine.network.publicIpAddresses[0].ipAllocationMethod', self.kwargs['allocation'].title()),
# self.check('type([0].virtualMachine.network.publicIpAddresses[0].ipAddress)', 'string'),
# self.check('[0].virtualMachine.network.publicIpAddresses[0].zone', '{zone}'),
# self.check('type([0].virtualMachine.network.publicIpAddresses[0].name)', 'string'),
# self.check('[0].virtualMachine.network.publicIpAddresses[0].resourceGroup', '{rg}')
# ])
class VMSizeListScenarioTest(ScenarioTest):
def test_vm_size_list(self):
self.cmd('vm list-sizes --location westus',
checks=self.check('type(@)', 'array'))
class VMImageListOffersScenarioTest(ScenarioTest):
def test_vm_image_list_offers(self):
self.kwargs.update({
'loc': 'westus',
'pub': 'Canonical'
})
result = self.cmd('vm image list-offers --location {loc} --publisher {pub}').get_output_in_json()
self.assertTrue(len(result) > 0)
self.assertFalse([i for i in result if i['location'].lower() != self.kwargs['loc']])
class VMImageListPublishersScenarioTest(ScenarioTest):
@AllowLargeResponse()
def test_vm_image_list_publishers(self):
self.kwargs.update({
'loc': 'westus'
})
self.cmd('vm image list-publishers --location {loc}', checks=[
self.check('type(@)', 'array'),
self.check("length([?location == '{loc}']) == length(@)", True),
])
class VMImageListSkusScenarioTest(ScenarioTest):
def test_vm_image_list_skus(self):
self.kwargs.update({
'loc': 'westus',
'pub': 'Canonical',
'offer': 'UbuntuServer'
})
result = self.cmd("vm image list-skus --location {loc} -p {pub} --offer {offer} --query \"length([].id.contains(@, '/Publishers/{pub}/ArtifactTypes/VMImage/Offers/{offer}/Skus/'))\"").get_output_in_json()
self.assertTrue(result > 0)
class VMImageShowScenarioTest(ScenarioTest):
def test_vm_image_show(self):
self.kwargs.update({
'loc': 'westus',
'pub': 'Canonical',
'offer': 'UbuntuServer',
'sku': '14.04.2-LTS',
'ver': '14.04.201503090'
})
self.cmd('vm image show --location {loc} --publisher {pub} --offer {offer} --sku {sku} --version {ver}', checks=[
self.check('type(@)', 'object'),
self.check('location', '{loc}'),
self.check('name', '{ver}'),
self.check("contains(id, '/Publishers/{pub}/ArtifactTypes/VMImage/Offers/{offer}/Skus/{sku}/Versions/{ver}')", True)
])
class VMGeneralizeScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_generalize_vm')
def test_vm_generalize(self, resource_group):
self.kwargs.update({
'vm': 'vm-generalize'
})
self.cmd('vm create -g {rg} -n {vm} --admin-username ubuntu --image UbuntuLTS --admin-password testPassword0 --authentication-type password --use-unmanaged-disk')
self.cmd('vm stop -g {rg} -n {vm}')
# Should be able to generalize the VM after it has been stopped
self.cmd('vm generalize -g {rg} -n {vm}', checks=self.is_empty())
vm = self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()
self.cmd('vm capture -g {rg} -n {vm} --vhd-name-prefix vmtest')
# capture to a custom image
self.kwargs['image'] = 'myImage'
self.cmd('image create -g {rg} -n {image} --source {vm}', checks=[
self.check('name', '{image}'),
self.check('sourceVirtualMachine.id', vm['id']),
self.check('storageProfile.zoneResilient', None)
])
@ResourceGroupPreparer(name_prefix='cli_test_generalize_vm')
def test_vm_capture_zone_resilient_image(self, resource_group):
self.kwargs.update({
'loc': 'francecentral',
'vm': 'vm-generalize'
})
self.cmd('vm create -g {rg} --location {loc} -n {vm} --admin-username ubuntu --image centos --admin-password testPassword0 --authentication-type password')
self.cmd('vm deallocate -g {rg} -n {vm}')
# Should be able to generalize the VM after it has been stopped
self.cmd('vm generalize -g {rg} -n {vm}', checks=self.is_empty())
vm = self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()
# capture to a custom image
self.kwargs['image'] = 'myImage2'
self.cmd('image create -g {rg} -n {image} --source {vm} --zone-resilient -l {loc}', checks=[
self.check('name', '{image}'),
self.check('sourceVirtualMachine.id', vm['id']),
self.check('storageProfile.zoneResilient', True)
])
class VMVMSSWindowsLicenseTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_windows_license_type')
def test_vm_vmss_windows_license_type(self, resource_group):
self.kwargs.update({
'vm': 'winvm',
'vmss': 'winvmss'
})
self.cmd('vm create -g {rg} -n {vm} --image Win2012R2Datacenter --admin-username clitest1234 --admin-password Test123456789# --license-type Windows_Server')
self.cmd('vm show -g {rg} -n {vm}', checks=[
self.check('licenseType', 'Windows_Server')
])
self.cmd('vm update -g {rg} -n {vm} --license-type None', checks=[
self.check('licenseType', 'None')
])
self.cmd('vmss create -g {rg} -n {vmss} --image Win2012R2Datacenter --admin-username clitest1234 --admin-password Test123456789# --license-type Windows_Server --instance-count 1')
self.cmd('vmss show -g {rg} -n {vmss}', checks=[
self.check('virtualMachineProfile.licenseType', 'Windows_Server')
])
self.cmd('vmss update -g {rg} -n {vmss} --license-type None', checks=[
self.check('virtualMachineProfile.licenseType', 'None')
])
class VMCustomImageTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_custom_image')
def test_vm_custom_image(self, resource_group):
self.kwargs.update({
'vm1': 'vm-unmanaged-disk',
'vm2': 'vm-managed-disk',
'newvm1': 'fromimage1',
'newvm2': 'fromimage2',
'image1': 'img-from-unmanaged',
'image2': 'img-from-managed',
})
self.cmd('vm create -g {rg} -n {vm1} --image ubuntults --use-unmanaged-disk --admin-username sdk-test-admin --admin-password testPassword0')
# deprovision the VM, but we have to do it async to avoid hanging the run-command itself
self.cmd('vm run-command invoke -g {rg} -n {vm1} --command-id RunShellScript --scripts "echo \'sudo waagent -deprovision+user --force\' | at -M now + 1 minutes"')
time.sleep(70)
self.cmd('vm deallocate -g {rg} -n {vm1}')
self.cmd('vm generalize -g {rg} -n {vm1}')
self.cmd('image create -g {rg} -n {image1} --source {vm1}')
self.cmd('vm create -g {rg} -n {vm2} --image ubuntults --storage-sku standard_lrs --data-disk-sizes-gb 1 1 --admin-username sdk-test-admin --admin-password testPassword0')
self.cmd('vm run-command invoke -g {rg} -n {vm2} --command-id RunShellScript --scripts "echo \'sudo waagent -deprovision+user --force\' | at -M now + 1 minutes"')
time.sleep(70)
self.cmd('vm deallocate -g {rg} -n {vm2}')
self.cmd('vm generalize -g {rg} -n {vm2}')
self.cmd('image create -g {rg} -n {image2} --source {vm2}')
self.cmd('vm create -g {rg} -n {newvm1} --image {image1} --admin-username sdk-test-admin --admin-password testPassword0 --authentication-type password')
self.cmd('vm show -g {rg} -n {newvm1}', checks=[
self.check('storageProfile.imageReference.resourceGroup', '{rg}'),
self.check('storageProfile.osDisk.createOption', 'FromImage')
])
self.cmd('vmss create -g {rg} -n vmss1 --image {image1} --admin-username sdk-test-admin --admin-password testPassword0 --authentication-type password', checks=[
self.check('vmss.virtualMachineProfile.storageProfile.imageReference.resourceGroup', '{rg}'),
self.check('vmss.virtualMachineProfile.storageProfile.osDisk.createOption', 'FromImage')
])
self.cmd('vm create -g {rg} -n {newvm2} --image {image2} --admin-username sdk-test-admin --admin-password testPassword0 --authentication-type password')
self.cmd('vm show -g {rg} -n {newvm2}', checks=[
self.check('storageProfile.imageReference.resourceGroup', '{rg}'),
self.check('storageProfile.osDisk.createOption', 'FromImage'),
self.check("length(storageProfile.dataDisks)", 2),
self.check("storageProfile.dataDisks[0].createOption", 'FromImage'),
self.check('storageProfile.dataDisks[0].managedDisk.storageAccountType', 'Standard_LRS')
])
self.cmd('vmss create -g {rg} -n vmss2 --image {image2} --admin-username sdk-test-admin --admin-password testPassword0 --authentication-type password', checks=[
self.check('vmss.virtualMachineProfile.storageProfile.imageReference.resourceGroup', '{rg}'),
self.check('vmss.virtualMachineProfile.storageProfile.osDisk.createOption', 'FromImage'),
self.check("length(vmss.virtualMachineProfile.storageProfile.dataDisks)", 2),
self.check("vmss.virtualMachineProfile.storageProfile.dataDisks[0].createOption", 'FromImage'),
self.check("vmss.virtualMachineProfile.storageProfile.dataDisks[0].managedDisk.storageAccountType", 'Standard_LRS'),
self.check("vmss.virtualMachineProfile.storageProfile.dataDisks[1].createOption", 'FromImage'),
self.check("vmss.virtualMachineProfile.storageProfile.dataDisks[1].managedDisk.storageAccountType", 'Standard_LRS')
])
@ResourceGroupPreparer(name_prefix='cli_test_vm_custom_image_conflict')
def test_vm_custom_image_name_conflict(self, resource_group):
self.kwargs.update({
'vm': 'test-vm',
'image1': 'img-from-vm',
'image2': 'img-from-vm-id',
'image3': 'img-from-disk-id',
})
self.cmd('vm create -g {rg} -n {vm} --image debian --use-unmanaged-disk --admin-username ubuntu --admin-password testPassword0 --authentication-type password')
vm1_info = self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()
self.cmd('vm stop -g {rg} -n {vm}')
# set variables up to test against name conflict between disk and vm.
self.kwargs.update({
'os_disk_vhd_uri': vm1_info['storageProfile']['osDisk']['vhd']['uri'],
'vm_id': vm1_info['id'],
'os_disk': vm1_info['name']
})
# create disk with same name as vm
disk_info = self.cmd('disk create -g {rg} -n {os_disk} --source {os_disk_vhd_uri} --os-type linux').get_output_in_json()
self.kwargs.update({'os_disk_id': disk_info['id']})
# Deallocate and generalize vm. Do not need to deprovision vm as this test will not recreate a vm from the image.
self.cmd('vm deallocate -g {rg} -n {vm}')
self.cmd('vm generalize -g {rg} -n {vm}')
# Create image from vm
self.cmd('image create -g {rg} -n {image1} --source {vm}', checks=[
self.check("sourceVirtualMachine.id", '{vm_id}'),
self.check("storageProfile.osDisk.managedDisk", None)
])
# Create image from vm id
self.cmd('image create -g {rg} -n {image2} --source {vm_id}', checks=[
self.check("sourceVirtualMachine.id", '{vm_id}'),
self.check("storageProfile.osDisk.managedDisk", None)
])
# Create image from disk id
self.cmd('image create -g {rg} -n {image3} --source {os_disk_id} --os-type linux', checks=[
self.check("sourceVirtualMachine", None),
self.check("storageProfile.osDisk.managedDisk.id", '{os_disk_id}')
])
class VMImageWithPlanTest(ScenarioTest):
@ResourceGroupPreparer()
def test_vm_create_with_market_place_image(self, resource_group, resource_group_location):
# test 2 scenarios, 1. create vm from market place image, 2. create from a custom image captured from such vms
self.kwargs.update({
'location': resource_group_location,
'publisher': 'microsoft-ads',
'offer': 'linux-data-science-vm-ubuntu',
'sku': 'linuxdsvmubuntu',
'vm1': 'vm1',
'vm2': 'vm2',
'image': 'image1'
})
self.kwargs['urn'] = '{publisher}:{offer}:{sku}:latest'.format(**self.kwargs)
# extract out the plan info to be used when create the vm from the captured image
plan = self.cmd('vm image show --urn {urn}').get_output_in_json()['plan']
self.kwargs['plan_name'] = plan['name']
self.kwargs['plan_product'] = plan['product']
self.kwargs['plan_publisher'] = plan['publisher']
# let us accept the term
self.cmd('vm image accept-terms --urn {urn}', checks=self.check('accepted', True))
# create a vm and capture an image from it
self.cmd('vm create -g {rg} -n {vm1} --image {urn} --admin-username sdk-test-admin --admin-password testPassword0')
# deprovision the VM, but we have to do it async to avoid hanging the run-command itself
self.cmd('vm run-command invoke -g {rg} -n {vm1} --command-id RunShellScript --scripts "echo \'sudo waagent -deprovision+user --force\' | at -M now + 1 minutes"')
time.sleep(70)
self.cmd('vm deallocate -g {rg} -n {vm1}')
self.cmd('vm generalize -g {rg} -n {vm1}')
self.cmd('image create -g {rg} -n {image} --source {vm1}')
self.cmd('vm create -g {rg} -n {vm2} --image {image} --admin-username sdk-test-admin --admin-password testPassword0 --authentication-type password --plan-publisher {plan_publisher} --plan-name {plan_name} --plan-product {plan_product}')
self.cmd('vm show -g {rg} -n {vm2}', checks=self.check('provisioningState', 'Succeeded'))
class VMCreateFromUnmanagedDiskTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_from_unmanaged_disk')
def test_vm_create_from_unmanaged_disk(self, resource_group):
# create a vm with unmanaged os disk
self.kwargs.update({
'loc': 'westus',
'vm': 'vm1'
})
self.cmd('vm create -g {rg} -n {vm} --image debian --use-unmanaged-disk --admin-username ubuntu --admin-password testPassword0 --authentication-type password')
vm1_info = self.cmd('vm show -g {rg} -n {vm}', checks=[
self.check('name', '{vm}'),
self.check('licenseType', None)
]).get_output_in_json()
self.cmd('vm stop -g {rg} -n {vm}')
# import the unmanaged os disk into a specialized managed disk
self.kwargs.update({
'os_disk_vhd_uri': vm1_info['storageProfile']['osDisk']['vhd']['uri'],
'vm': 'vm2',
'os_disk': 'os1'
})
self.cmd('disk create -g {rg} -n {os_disk} --source {os_disk_vhd_uri} --os-type linux',
checks=[self.check('name', '{os_disk}'), self.check('osType', 'Linux')])
# create a vm by attaching to it
self.cmd('vm create -g {rg} -n {vm} --attach-os-disk {os_disk} --os-type linux',
checks=self.check('powerState', 'VM running'))
class VMCreateWithSpecializedUnmanagedDiskTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_with_specialized_unmanaged_disk')
def test_vm_create_with_specialized_unmanaged_disk(self, resource_group):
self.kwargs.update({
'loc': 'westus'
})
# create a vm with unmanaged os disk
self.cmd('vm create -g {rg} -n vm1 --image debian --use-unmanaged-disk --admin-username ubuntu --admin-password testPassword0 --authentication-type password')
vm1_info = self.cmd('vm show -g {rg} -n vm1').get_output_in_json()
self.kwargs['disk_uri'] = vm1_info['storageProfile']['osDisk']['vhd']['uri']
self.cmd('vm delete -g {rg} -n vm1 -y')
# create a vm by attaching the OS disk from the deleted VM
self.cmd('vm create -g {rg} -n vm2 --attach-os-disk {disk_uri} --os-type linux --use-unmanaged-disk',
checks=self.check('powerState', 'VM running'))
@ResourceGroupPreparer(name_prefix='cli_test_vm_with_specialized_unmanaged_disk')
def test_vm_create_with_unmanaged_data_disks(self, resource_group):
self.kwargs.update({
'vm': 'vm1',
'vm2': 'vm2'
})
# create a unmanaged bm with 2 unmanaged disks
vm_create_cmd = 'vm create -g {rg} -n vm1 --image debian --use-unmanaged-disk --admin-username ubuntu --admin-password testPassword0 --authentication-type password'
self.cmd(vm_create_cmd)
self.cmd('vm unmanaged-disk attach -g {rg} --vm-name {vm} --new --size-gb 1')
self.cmd('vm unmanaged-disk attach -g {rg} --vm-name {vm} --new --size-gb 2')
vm1_info = self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()
self.kwargs['disk_uri'] = vm1_info['storageProfile']['osDisk']['vhd']['uri']
self.kwargs['data_disk'] = vm1_info['storageProfile']['dataDisks'][0]['vhd']['uri']
self.kwargs['data_disk2'] = vm1_info['storageProfile']['dataDisks'][1]['vhd']['uri']
self.cmd('vm delete -g {rg} -n vm1 -y')
# create a vm by attaching the OS disk from the deleted VM
vm_create_cmd = ('vm create -g {rg} -n {vm2} --attach-os-disk {disk_uri} --os-type linux --use-unmanaged-disk '
'--attach-data-disks {data_disk} {data_disk2} --data-disk-caching 0=ReadWrite 1=ReadOnly')
self.cmd(vm_create_cmd)
self.cmd('vm show -g {rg} -n {vm2} -d', checks=[
self.check('storageProfile.dataDisks[0].caching', 'ReadWrite'),
self.check('storageProfile.dataDisks[0].lun', 0),
self.check('storageProfile.dataDisks[1].caching', 'ReadOnly'),
self.check('storageProfile.dataDisks[1].lun', 1)
])
class VMAttachDisksOnCreate(ScenarioTest):
@ResourceGroupPreparer()
def test_vm_create_by_attach_os_and_data_disks(self, resource_group):
# the testing below follow a real custom's workflow requiring the support of attaching data disks on create
# creating a vm
self.cmd('vm create -g {rg} -n vm1 --image centos --admin-username centosadmin --admin-password testPassword0 --authentication-type password --data-disk-sizes-gb 2')
result = self.cmd('vm show -g {rg} -n vm1').get_output_in_json()
self.kwargs.update({
'origin_os_disk': result['storageProfile']['osDisk']['name'],
'origin_data_disk': result['storageProfile']['dataDisks'][0]['name'],
# snapshot the os & data disks
'os_snapshot': 'oSnapshot',
'os_disk': 'sDisk',
'data_snapshot': 'dSnapshot',
'data_disk': 'dDisk'
})
self.cmd('snapshot create -g {rg} -n {os_snapshot} --source {origin_os_disk}')
self.cmd('disk create -g {rg} -n {os_disk} --source {os_snapshot}')
self.cmd('snapshot create -g {rg} -n {data_snapshot} --source {origin_data_disk}')
self.cmd('disk create -g {rg} -n {data_disk} --source {data_snapshot}')
# rebuild a new vm
# (os disk can be resized)
self.cmd('vm create -g {rg} -n vm2 --attach-os-disk {os_disk} --attach-data-disks {data_disk} --data-disk-sizes-gb 3 --os-disk-size-gb 100 --os-type linux',
checks=self.check('powerState', 'VM running'))
self.cmd('vm show -g {rg} -n vm2', checks=[
self.check('length(storageProfile.dataDisks)', 2),
self.check('storageProfile.dataDisks[0].diskSizeGb', 3),
self.check('storageProfile.dataDisks[0].managedDisk.storageAccountType', 'Premium_LRS'),
self.check('storageProfile.osDisk.diskSizeGb', 100)
])
@ResourceGroupPreparer()
def test_vm_create_by_attach_unmanaged_os_and_data_disks(self, resource_group):
# creating a vm
self.cmd('vm create -g {rg} -n vm1 --use-unmanaged-disk --image centos --admin-username centosadmin --admin-password testPassword0 --authentication-type password')
self.cmd('vm unmanaged-disk attach -g {rg} --vm-name vm1 --new --size-gb 2')
result = self.cmd('vm show -g {rg} -n vm1').get_output_in_json()
self.kwargs['os_disk_vhd'] = result['storageProfile']['osDisk']['vhd']['uri']
self.kwargs['data_disk_vhd'] = result['storageProfile']['dataDisks'][0]['vhd']['uri']
# delete the vm to end vhd's leases so they can be used to create a new vm through attaching
self.cmd('vm deallocate -g {rg} -n vm1')
self.cmd('vm delete -g {rg} -n vm1 -y')
# rebuild a new vm
self.cmd('vm create -g {rg} -n vm2 --attach-os-disk {os_disk_vhd} --attach-data-disks {data_disk_vhd} --os-type linux --use-unmanaged-disk',
checks=self.check('powerState', 'VM running'))
class VMManagedDiskScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_managed_disk')
def test_vm_managed_disk(self, resource_group):
self.kwargs.update({
'loc': 'westus',
'disk1': 'd1',
'disk2': 'd2',
'snapshot1': 's1',
'snapshot2': 's2',
'image': 'i1',
'image_2': 'i2',
'image_3': 'i3'
})
# create a disk and update
data_disk = self.cmd('disk create -g {rg} -n {disk1} --size-gb 1 --tags tag1=d1', checks=[
self.check('sku.name', 'Premium_LRS'),
self.check('diskSizeGb', 1),
self.check('tags.tag1', 'd1')
]).get_output_in_json()
self.cmd('disk update -g {rg} -n {disk1} --size-gb 10 --sku Standard_LRS', checks=[
self.check('sku.name', 'Standard_LRS'),
self.check('diskSizeGb', 10)
])
# get SAS token
result = self.cmd('disk grant-access -g {rg} -n {disk1} --duration-in-seconds 10').get_output_in_json()
self.assertTrue('sv=' in result['accessSas'])
# create another disk by importing from the disk1
self.kwargs['disk1_id'] = data_disk['id']
data_disk2 = self.cmd('disk create -g {rg} -n {disk2} --source {disk1_id}').get_output_in_json()
# create a snpashot
os_snapshot = self.cmd('snapshot create -g {rg} -n {snapshot1} --size-gb 1 --sku Premium_LRS --tags tag1=s1', checks=[
self.check('sku.name', 'Premium_LRS'),
self.check('diskSizeGb', 1),
self.check('tags.tag1', 's1')
]).get_output_in_json()
# update the sku
self.cmd('snapshot update -g {rg} -n {snapshot1} --sku Standard_LRS', checks=[
self.check('sku.name', 'Standard_LRS'),
self.check('diskSizeGb', 1)
])
# create another snapshot by importing from the disk1
data_snapshot = self.cmd('snapshot create -g {rg} -n {snapshot2} --source {disk1} --sku Premium_LRS').get_output_in_json()
self.kwargs.update({
'snapshot1_id': os_snapshot['id'],
'snapshot2_id': data_snapshot['id'],
'disk2_id': data_disk2['id']
})
# till now, image creation doesn't inspect the disk for os, so the command below should succeed with junk disk
self.cmd('image create -g {rg} -n {image} --source {snapshot1} --data-disk-sources {disk1} {snapshot2_id} {disk2_id} --os-type Linux --tags tag1=i1', checks=[
self.check('storageProfile.osDisk.osType', 'Linux'),
self.check('storageProfile.osDisk.snapshot.id', '{snapshot1_id}'),
self.check('length(storageProfile.dataDisks)', 3),
self.check('storageProfile.dataDisks[0].lun', 0),
self.check('storageProfile.dataDisks[1].lun', 1),
self.check('tags.tag1', 'i1')
])
# test that images can be created with different storage skus and os disk caching settings.
self.cmd('image create -g {rg} -n {image_2} --source {snapshot1} --data-disk-sources {disk1} {snapshot2_id} {disk2_id}'
' --os-type Linux --tags tag1=i1 --storage-sku Premium_LRS --os-disk-caching None',
checks=[
self.check('storageProfile.osDisk.storageAccountType', 'Premium_LRS'),
self.check('storageProfile.osDisk.osType', 'Linux'),
self.check('storageProfile.osDisk.snapshot.id', '{snapshot1_id}'),
self.check('length(storageProfile.dataDisks)', 3),
self.check('storageProfile.dataDisks[0].lun', 0),
self.check('storageProfile.dataDisks[1].lun', 1),
self.check('storageProfile.osDisk.caching', 'None'),
self.check('tags.tag1', 'i1')
])
self.cmd('image create -g {rg} -n {image_3} --source {snapshot1} --data-disk-sources {disk1} {snapshot2_id} {disk2_id}'
' --os-type Linux --tags tag1=i1 --storage-sku Standard_LRS --os-disk-caching ReadWrite',
checks=[
self.check('storageProfile.osDisk.storageAccountType', 'Standard_LRS'),
self.check('storageProfile.osDisk.caching', 'ReadWrite')
])
class ComputeListSkusScenarioTest(ScenarioTest):
@unittest.skip("Need to check this")
@AllowLargeResponse(size_kb=99999)
def test_list_compute_skus_table_output(self):
result = self.cmd('vm list-skus -l eastus2 -otable')
lines = result.output.split('\n')
# 1st line is header
self.assertEqual(lines[0].split(), ['ResourceType', 'Locations', 'Name', 'Zones', 'Restrictions'])
# spot check the first 4 entries
fd_found, ud_found, size_found, zone_found = False, False, False, False
for line in lines[2:]:
parts = line.split()
if not fd_found and (parts[:4] == ['availabilitySets', 'eastus2', 'Classic', 'None']):
fd_found = True
elif not ud_found and (parts[:4] == ['availabilitySets', 'eastus2', 'Aligned', 'None']):
ud_found = True
elif not size_found and parts[:3] == ['disks', 'eastus2', 'Standard_LRS']:
size_found = True
elif not zone_found and parts[3] == '1,2,3':
zone_found = True
self.assertTrue(fd_found)
self.assertTrue(ud_found)
self.assertTrue(size_found)
self.assertTrue(zone_found)
@unittest.skip("Need to check this")
@AllowLargeResponse(size_kb=16144)
def test_list_compute_skus_filter(self):
result = self.cmd('vm list-skus -l eastus2 --size Standard_DS1_V2 --zone').get_output_in_json()
self.assertTrue(result and len(result) == len([x for x in result if x['name'] == 'Standard_DS1_v2' and x['locationInfo'][0]['zones']]))
result = self.cmd('vm list-skus -l westus --resource-type disks').get_output_in_json()
self.assertTrue(result and len(result) == len([x for x in result if x['resourceType'] == 'disks']))
class VMExtensionScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_extension')
def test_vm_extension(self, resource_group):
user_name = 'foouser1'
config_file = _write_config_file(user_name)
self.kwargs.update({
'vm': 'myvm',
'pub': 'Microsoft.OSTCExtensions',
'ext': 'VMAccessForLinux',
'config': config_file,
'user': user_name
})
self.cmd('vm create -n {vm} -g {rg} --image UbuntuLTS --authentication-type password --admin-username user11 --admin-password testPassword0')
self.cmd('vm extension list --vm-name {vm} --resource-group {rg}',
checks=self.check('length([])', 0))
self.cmd('vm extension set -n {ext} --publisher {pub} --version 1.2 --vm-name {vm} --resource-group {rg} --protected-settings "{config}" --force-update')
result = self.cmd('vm get-instance-view -n {vm} -g {rg}', checks=[
self.check('*.extensions[0].name', ['VMAccessForLinux']),
]).get_output_in_json()
# ensure the minor version is 2+
minor_version = int(result['instanceView']['extensions'][0]['typeHandlerVersion'].split('.')[1])
self.assertGreater(minor_version, 2)
result = self.cmd('vm extension show --resource-group {rg} --vm-name {vm} --name {ext}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{ext}'),
self.check('resourceGroup', '{rg}'),
]).get_output_in_json()
uuid.UUID(result['forceUpdateTag'])
self.cmd('vm extension delete --resource-group {rg} --vm-name {vm} --name {ext}')
@ResourceGroupPreparer(name_prefix='cli_test_vm_extension_2')
def test_vm_extension_instance_name(self, resource_group):
user_name = 'foouser1'
config_file = _write_config_file(user_name)
self.kwargs.update({
'vm': 'myvm',
'pub': 'Microsoft.OSTCExtensions',
'ext_type': 'VMAccessForLinux',
'config': config_file,
'user': user_name,
'ext_name': 'MyAccessExt'
})
self.cmd('vm create -n {vm} -g {rg} --image UbuntuLTS --authentication-type password --admin-username user11 --admin-password testPassword0')
self.cmd('vm extension set -n {ext_type} --publisher {pub} --version 1.2 --vm-name {vm} --resource-group {rg} '
'--protected-settings "{config}" --extension-instance-name {ext_name}')
self.cmd('vm extension show --resource-group {rg} --vm-name {vm} --name {ext_name}', checks=[
self.check('name', '{ext_name}'),
self.check('virtualMachineExtensionType', '{ext_type}')
])
self.cmd('vm extension delete --resource-group {rg} --vm-name {vm} --name {ext_name}')
class VMMachineExtensionImageScenarioTest(ScenarioTest):
def test_vm_machine_extension_image(self):
self.kwargs.update({
'loc': 'westus',
'pub': 'Microsoft.Azure.Diagnostics',
'ext': 'IaaSDiagnostics',
'ver': '1.6.4.0'
})
self.cmd('vm extension image list-names --location {loc} --publisher {pub}', checks=[
self.check('type(@)', 'array'),
self.check("length([?location == '{loc}']) == length(@)", True),
])
self.cmd('vm extension image list-versions --location {loc} -p {pub} --name {ext}', checks=[
self.check('type(@)', 'array'),
self.check("length([?location == '{loc}']) == length(@)", True),
])
self.cmd('vm extension image show --location {loc} -p {pub} --name {ext} --version {ver}', checks=[
self.check('type(@)', 'object'),
self.check('location', '{loc}'),
self.check("contains(id, '/Providers/Microsoft.Compute/Locations/{loc}/Publishers/{pub}/ArtifactTypes/VMExtension/Types/{ext}/Versions/{ver}')", True)
])
class VMExtensionImageSearchScenarioTest(LiveScenarioTest):
def test_vm_extension_image_search(self):
# pick this specific name, so the search will be under one publisher. This avoids
# the parallel searching behavior that causes incomplete VCR recordings.
self.kwargs.update({
'pub': 'Test.Microsoft.VisualStudio.Services',
'image': 'TeamServicesAgentLinux1'
})
self.cmd('vm extension image list -l westus --publisher {pub} --name {image}', checks=[
self.check('type(@)', 'array'),
self.check("length([?name == '{image}']) == length(@)", True)
])
class VMCreateUbuntuScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_create_ubuntu')
def test_vm_create_ubuntu(self, resource_group, resource_group_location):
self.kwargs.update({
'username': 'ubuntu',
'vm': 'cli-test-vm2',
'image': 'UbuntuLTS',
'auth': 'ssh',
'ssh_key': TEST_SSH_KEY_PUB,
'loc': resource_group_location
})
self.cmd('vm create --resource-group {rg} --admin-username {username} --name {vm} --authentication-type {auth} --image {image} --ssh-key-value \'{ssh_key}\' --location {loc} --data-disk-sizes-gb 1 --data-disk-caching ReadOnly')
self.cmd('vm show -g {rg} -n {vm}', checks=[
self.check('provisioningState', 'Succeeded'),
self.check('osProfile.adminUsername', '{username}'),
self.check('osProfile.computerName', '{vm}'),
self.check('osProfile.linuxConfiguration.disablePasswordAuthentication', True),
self.check('osProfile.linuxConfiguration.ssh.publicKeys[0].keyData', '{ssh_key}'),
self.check('storageProfile.dataDisks[0].managedDisk.storageAccountType', 'Premium_LRS'),
self.check('storageProfile.osDisk.managedDisk.storageAccountType', 'Premium_LRS'),
self.check('storageProfile.dataDisks[0].lun', 0),
self.check('storageProfile.dataDisks[0].caching', 'ReadOnly'),
])
# test for idempotency--no need to reverify, just ensure the command doesn't fail
self.cmd('vm create --resource-group {rg} --admin-username {username} --name {vm} --authentication-type {auth} --image {image} --ssh-key-value \'{ssh_key}\' --location {loc} --data-disk-sizes-gb 1 --data-disk-caching ReadOnly ')
class VMCreateExistingOptions(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_create_existing')
@StorageAccountPreparer()
def test_vm_create_existing_options(self, resource_group, storage_account):
self.kwargs.update({
'availset': 'vrfavailset',
'pubip': 'vrfpubip',
'vnet': 'vrfvnet',
'subnet': 'vrfsubnet',
'nsg': 'vrfnsg',
'vm': 'vrfvm',
'disk': 'vrfosdisk',
'container': 'vrfcontainer',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd('vm availability-set create --name {availset} -g {rg} --unmanaged --platform-fault-domain-count 3 --platform-update-domain-count 3')
self.cmd('network public-ip create --name {pubip} -g {rg}')
self.cmd('network vnet create --name {vnet} -g {rg} --subnet-name {subnet}')
self.cmd('network nsg create --name {nsg} -g {rg}')
self.cmd('vm create --image UbuntuLTS --os-disk-name {disk} --vnet-name {vnet} --subnet {subnet} --availability-set {availset} --public-ip-address {pubip} -l "West US" --nsg {nsg} --use-unmanaged-disk --size Standard_DS2 --admin-username user11 --storage-account {sa} --storage-container-name {container} -g {rg} --name {vm} --ssh-key-value \'{ssh_key}\'')
self.cmd('vm availability-set show -n {availset} -g {rg}',
checks=self.check('virtualMachines[0].id.ends_with(@, \'{}\')'.format(self.kwargs['vm'].upper()), True))
self.cmd('network nsg show -n {nsg} -g {rg}',
checks=self.check('networkInterfaces[0].id.ends_with(@, \'{vm}VMNic\')', True))
self.cmd('network nic show -n {vm}VMNic -g {rg}',
checks=self.check('ipConfigurations[0].publicIpAddress.id.ends_with(@, \'{pubip}\')', True))
self.cmd('vm show -n {vm} -g {rg}',
checks=self.check('storageProfile.osDisk.vhd.uri', 'https://{sa}.blob.core.windows.net/{container}/{disk}.vhd'))
class VMCreateExistingIdsOptions(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_create_existing_ids')
@StorageAccountPreparer()
def test_vm_create_existing_ids_options(self, resource_group, storage_account):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import resource_id, is_valid_resource_id
subscription_id = self.get_subscription_id()
self.kwargs.update({
'availset': 'vrfavailset',
'pubip': 'vrfpubip',
'vnet': 'vrfvnet',
'subnet': 'vrfsubnet',
'nsg': 'vrfnsg',
'vm': 'vrfvm',
'disk': 'vrfosdisk',
'container': 'vrfcontainer',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd('vm availability-set create --name {availset} -g {rg} --unmanaged --platform-fault-domain-count 3 --platform-update-domain-count 3')
self.cmd('network public-ip create --name {pubip} -g {rg}')
self.cmd('network vnet create --name {vnet} -g {rg} --subnet-name {subnet}')
self.cmd('network nsg create --name {nsg} -g {rg}')
rg = self.kwargs['rg']
self.kwargs.update({
'availset_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Compute', type='availabilitySets', name=self.kwargs['availset']),
'pubip_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Network', type='publicIpAddresses', name=self.kwargs['pubip']),
'subnet_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Network', type='virtualNetworks', child_type_1='subnets', name=self.kwargs['vnet'], child_name_1=self.kwargs['subnet']),
'nsg_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Network', type='networkSecurityGroups', name=self.kwargs['nsg'])
})
assert is_valid_resource_id(self.kwargs['availset_id'])
assert is_valid_resource_id(self.kwargs['pubip_id'])
assert is_valid_resource_id(self.kwargs['subnet_id'])
assert is_valid_resource_id(self.kwargs['nsg_id'])
self.cmd('vm create --image UbuntuLTS --os-disk-name {disk} --subnet {subnet_id} --availability-set {availset_id} --public-ip-address {pubip_id} -l "West US" --nsg {nsg_id} --use-unmanaged-disk --size Standard_DS2 --admin-username user11 --storage-account {sa} --storage-container-name {container} -g {rg} --name {vm} --ssh-key-value \'{ssh_key}\'')
self.cmd('vm availability-set show -n {availset} -g {rg}',
checks=self.check('virtualMachines[0].id.ends_with(@, \'{}\')'.format(self.kwargs['vm'].upper()), True))
self.cmd('network nsg show -n {nsg} -g {rg}',
checks=self.check('networkInterfaces[0].id.ends_with(@, \'{vm}VMNic\')', True))
self.cmd('network nic show -n {vm}VMNic -g {rg}',
checks=self.check('ipConfigurations[0].publicIpAddress.id.ends_with(@, \'{pubip}\')', True))
self.cmd('vm show -n {vm} -g {rg}',
checks=self.check('storageProfile.osDisk.vhd.uri', 'https://{sa}.blob.core.windows.net/{container}/{disk}.vhd'))
# region VMSS Tests
class VMSSCreateAndModify(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_and_modify')
def test_vmss_create_and_modify(self):
self.kwargs.update({
'vmss': 'vmss1',
'count': 5,
'new_count': 4
})
self.cmd('vmss create --admin-password testPassword0 --name {vmss} -g {rg} --admin-username myadmin --image Win2012R2Datacenter --instance-count {count}')
self.cmd('vmss show --name {vmss} -g {rg}', checks=[
self.check('virtualMachineProfile.priority', None),
self.check('sku.name', 'Standard_DS1_v2'),
])
self.cmd('vmss list',
checks=self.check('type(@)', 'array'))
self.cmd('vmss list --resource-group {rg}', checks=[
self.check('type(@)', 'array'),
self.check('length(@)', 1),
self.check('[0].name', '{vmss}'),
self.check('[0].resourceGroup', '{rg}')
])
self.cmd('vmss list-skus --resource-group {rg} --name {vmss}',
checks=self.check('type(@)', 'array'))
self.cmd('vmss show --resource-group {rg} --name {vmss}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{vmss}'),
self.check('resourceGroup', '{rg}')
])
result = self.cmd('vmss list-instances --resource-group {rg} --name {vmss} --query "[].instanceId"').get_output_in_json()
self.kwargs['instance_ids'] = result[3] + ' ' + result[4]
self.cmd('vmss update-instances --resource-group {rg} --name {vmss} --instance-ids {instance_ids}')
self.cmd('vmss get-instance-view --resource-group {rg} --name {vmss}', checks=[
self.check('type(@)', 'object'),
self.check('type(virtualMachine)', 'object'),
self.check('type(statuses)', 'array')
])
self.cmd('vmss stop --resource-group {rg} --name {vmss}')
self.cmd('vmss start --resource-group {rg} --name {vmss}')
self.cmd('vmss restart --resource-group {rg} --name {vmss}')
self.cmd('vmss scale --resource-group {rg} --name {vmss} --new-capacity {new_count}')
self.cmd('vmss show --resource-group {rg} --name {vmss}', checks=[
self.check('sku.capacity', '{new_count}'),
self.check('virtualMachineProfile.osProfile.windowsConfiguration.enableAutomaticUpdates', True)
])
result = self.cmd('vmss list-instances --resource-group {rg} --name {vmss} --query "[].instanceId"').get_output_in_json()
self.kwargs['instance_ids'] = result[2] + ' ' + result[3]
self.cmd('vmss delete-instances --resource-group {rg} --name {vmss} --instance-ids {instance_ids}')
self.cmd('vmss get-instance-view --resource-group {rg} --name {vmss}', checks=[
self.check('type(@)', 'object'),
self.check('type(virtualMachine)', 'object'),
self.check('virtualMachine.statusesSummary[0].count', self.kwargs['new_count'] - 2)
])
self.cmd('vmss deallocate --resource-group {rg} --name {vmss}')
self.cmd('vmss delete --resource-group {rg} --name {vmss}')
self.cmd('vmss list --resource-group {rg}', checks=self.is_empty())
class VMSSCreateOptions(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_options')
def test_vmss_create_options(self, resource_group):
self.kwargs.update({
'vmss': 'vrfvmss',
'count': 2,
'caching': 'ReadWrite',
'update': 'automatic',
'ip': 'vrfpubip'
})
self.cmd('network public-ip create --name {ip} -g {rg}')
self.cmd('vmss create --image Debian --admin-password testPassword0 -l westus -g {rg} -n {vmss} --disable-overprovision --instance-count {count} --os-disk-caching {caching} --upgrade-policy-mode {update} --authentication-type password --admin-username myadmin --public-ip-address {ip} --data-disk-sizes-gb 1 --vm-sku Standard_D2_v2')
self.cmd('network lb show -g {rg} -n {vmss}lb ',
checks=self.check('frontendIpConfigurations[0].publicIpAddress.id.ends_with(@, \'{ip}\')', True))
self.cmd('vmss show -g {rg} -n {vmss}', checks=[
self.check('sku.capacity', '{count}'),
self.check('virtualMachineProfile.storageProfile.osDisk.caching', '{caching}'),
self.check('upgradePolicy.mode', self.kwargs['update'].title()),
self.check('singlePlacementGroup', True),
])
self.kwargs['id'] = self.cmd('vmss list-instances -g {rg} -n {vmss} --query "[].instanceId"').get_output_in_json()[0]
self.cmd('vmss show -g {rg} -n {vmss} --instance-id {id}',
checks=self.check('instanceId', '{id}'))
self.cmd('vmss disk attach -g {rg} --vmss-name {vmss} --size-gb 3')
self.cmd('vmss show -g {rg} -n {vmss}', checks=[
self.check('length(virtualMachineProfile.storageProfile.dataDisks)', 2),
self.check('virtualMachineProfile.storageProfile.dataDisks[0].diskSizeGb', 1),
self.check('virtualMachineProfile.storageProfile.dataDisks[0].managedDisk.storageAccountType', 'Standard_LRS'),
self.check('virtualMachineProfile.storageProfile.dataDisks[1].diskSizeGb', 3),
self.check('virtualMachineProfile.storageProfile.dataDisks[1].managedDisk.storageAccountType', 'Standard_LRS'),
])
self.cmd('vmss disk detach -g {rg} --vmss-name {vmss} --lun 1')
self.cmd('vmss show -g {rg} -n {vmss}', checks=[
self.check('length(virtualMachineProfile.storageProfile.dataDisks)', 1),
self.check('virtualMachineProfile.storageProfile.dataDisks[0].lun', 0),
self.check('virtualMachineProfile.storageProfile.dataDisks[0].diskSizeGb', 1)
])
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_options')
def test_vmss_update_instance_disks(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'caching': 'ReadWrite',
'update': 'automatic',
'ip': 'vrfpubip',
'disk': 'd1',
'instance_id': '1',
'sku': 'Standard_LRS'
})
self.cmd('vmss create --image Debian --admin-username clitest1 --admin-password testPassword0 -l westus -g {rg} -n {vmss} --storage-sku {sku}')
self.cmd('disk create -g {rg} -n {disk} --size-gb 1 --sku {sku}')
instances = self.cmd('vmss list-instances -g {rg} -n {vmss}').get_output_in_json()
self.kwargs['instance_id'] = instances[0]['instanceId']
self.cmd('vmss disk attach -g {rg} --vmss-name {vmss} --instance-id {instance_id} --disk {disk} --caching {caching}')
self.cmd("vmss list-instances -g {rg} -n {vmss} --query \"[?instanceId=='{instance_id}']\"", checks=[
self.check('length([0].storageProfile.dataDisks)', 1),
self.check('[0].storageProfile.dataDisks[0].caching', self.kwargs['caching'])
])
self.cmd('vmss disk detach -g {rg} --vmss-name {vmss} --instance-id {instance_id} --lun 0')
self.cmd("vmss list-instances -g {rg} -n {vmss} --query \"[?instanceId=='{instance_id}']\"", checks=[
self.check('length([0].storageProfile.dataDisks)', 0)
])
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_options')
def test_vmss_create_auth(self, resource_group):
self.kwargs.update({
'vmss_1': 'vmss1',
'vmss_2': 'vmss2',
'ssh_key': TEST_SSH_KEY_PUB,
})
self.cmd('vmss create --image Debian -l westus -g {rg} -n {vmss_1} --authentication-type all '
' --admin-username myadmin --admin-password testPassword0 --ssh-key-value \'{ssh_key}\'',
checks=[
self.check('vmss.virtualMachineProfile.osProfile.linuxConfiguration.disablePasswordAuthentication', False),
self.check('vmss.virtualMachineProfile.osProfile.linuxConfiguration.ssh.publicKeys[0].keyData', TEST_SSH_KEY_PUB)
])
self.cmd('vmss create --image Debian -l westus -g {rg} -n {vmss_2} --authentication-type ssh '
' --admin-username myadmin --ssh-key-value \'{ssh_key}\'',
checks=[
self.check('vmss.virtualMachineProfile.osProfile.linuxConfiguration.disablePasswordAuthentication', True)
])
class VMSSCreateBalancerOptionsTest(ScenarioTest): # pylint: disable=too-many-instance-attributes
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_none')
def test_vmss_create_none_options(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'ssh_key': TEST_SSH_KEY_PUB,
'quotes': '""' if platform.system() == 'Windows' else "''"
})
self.cmd('vmss create -n {vmss} -g {rg} --image Debian --load-balancer {quotes} --admin-username ubuntu --ssh-key-value \'{ssh_key}\' --public-ip-address {quotes} --tags {quotes} --vm-sku Basic_A1')
self.cmd('vmss show -n {vmss} -g {rg}', checks=[
self.check('tags', {}),
self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations.ipConfigurations.loadBalancerBackendAddressPools', None),
self.check('sku.name', 'Basic_A1'),
self.check('sku.tier', 'Basic')
])
self.cmd('vmss update -g {rg} -n {vmss} --set tags.test=success',
checks=self.check('tags.test', 'success'))
self.cmd('network public-ip show -n {vmss}PublicIP -g {rg}', expect_failure=True)
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_w_ag')
def test_vmss_create_with_app_gateway(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd("vmss create -n {vmss} -g {rg} --image Debian --admin-username clittester --ssh-key-value '{ssh_key}' --app-gateway apt1 --instance-count 5",
checks=self.check('vmss.provisioningState', 'Succeeded'))
# spot check it is using gateway
self.cmd('vmss show -n {vmss} -g {rg}', checks=[
self.check('sku.capacity', 5),
self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].ipConfigurations[0].applicationGatewayBackendAddressPools[0].resourceGroup', '{rg}')
])
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_existing_lb')
def test_vmss_existing_lb(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'lb': 'lb1'
})
self.cmd('network lb create -g {rg} -n {lb} --backend-pool-name test')
self.cmd('vmss create -g {rg} -n {vmss} --load-balancer {lb} --image UbuntuLTS --admin-username clitester --admin-password TestTest12#$')
@ResourceGroupPreparer()
def test_vmss_single_placement_group_default_to_std_lb(self, resource_group):
self.kwargs.update({
'vmss': 'vmss123'
})
self.cmd('vmss create -g {rg} -n {vmss} --admin-username clitester --admin-password PasswordPassword1! --image debian --single-placement-group false')
self.cmd('vmss show -g {rg} -n {vmss}', checks=[
self.check('singlePlacementGroup', False)
])
self.cmd('network lb list -g {rg}', checks=[
self.check('[0].sku.name', 'Standard')
])
self.cmd('network public-ip list -g {rg}', checks=[
self.check('[0].sku.name', 'Standard')
])
class VMSSCreatePublicIpPerVm(ScenarioTest): # pylint: disable=too-many-instance-attributes
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_w_ips')
def test_vmss_public_ip_per_vm_custom_domain_name(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'nsg': 'testnsg',
'ssh_key': TEST_SSH_KEY_PUB,
'dns_label': self.create_random_name('clivmss', 20)
})
nsg_result = self.cmd('network nsg create -g {rg} -n {nsg}').get_output_in_json()
self.cmd("vmss create -n {vmss} -g {rg} --image Debian --admin-username clittester --ssh-key-value '{ssh_key}' --vm-domain-name {dns_label} --public-ip-per-vm --dns-servers 10.0.0.6 10.0.0.5 --nsg {nsg}",
checks=self.check('vmss.provisioningState', 'Succeeded'))
result = self.cmd("vmss show -n {vmss} -g {rg}", checks=[
self.check('length(virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].dnsSettings.dnsServers)', 2),
self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].dnsSettings.dnsServers[0]', '10.0.0.6'),
self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].dnsSettings.dnsServers[1]', '10.0.0.5'),
self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].networkSecurityGroup.id', nsg_result['NewNSG']['id'])
])
# spot check we have the domain name and have a public ip
result = self.cmd('vmss list-instance-public-ips -n {vmss} -g {rg}').get_output_in_json()
self.assertEqual(len(result[0]['ipAddress'].split('.')), 4)
self.assertTrue(result[0]['dnsSettings']['domainNameLabel'].endswith(self.kwargs['dns_label']))
# class SecretsScenarioTest(ScenarioTest): # pylint: disable=too-many-instance-attributes
# @ResourceGroupPreparer(name_prefix='cli_test_vm_secrets')
# def test_vm_create_linux_secrets(self, resource_group, resource_group_location):
# self.kwargs.update({
# 'admin': 'ubuntu',
# 'loc': 'westus',
# 'image': 'UbuntuLTS',
# 'auth': 'ssh',
# 'ssh_key': TEST_SSH_KEY_PUB,
# 'vm': 'vm-name',
# 'secrets': json.dumps([{'sourceVault': {'id': 'id'}, 'vaultCertificates': []}]),
# 'vault': self.create_random_name('vmlinuxkv', 20)
# })
# message = 'Secret is missing vaultCertificates array or it is empty at index 0'
# with self.assertRaisesRegexp(CLIError, message):
# self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --authentication-type {auth} --image {image} --ssh-key-value \'{ssh_key}\' -l {loc} --secrets \'{secrets}\' --nsg-rule NONE')
# vault_out = self.cmd('keyvault create -g {rg} -n {vault} -l {loc} --enabled-for-deployment true --enabled-for-template-deployment true').get_output_in_json()
# time.sleep(60)
# self.kwargs['policy_path'] = os.path.join(TEST_DIR, 'keyvault', 'policy.json')
# self.cmd('keyvault certificate create --vault-name {vault} -n cert1 -p @"{policy_path}"')
# self.kwargs['secret_out'] = self.cmd('keyvault secret list-versions --vault-name {vault} -n cert1 --query "[?attributes.enabled].id" -o tsv').output.strip()
# vm_format = self.cmd('vm secret format -s {secret_out}').get_output_in_json()
# self.kwargs['secrets'] = json.dumps(vm_format)
# self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --authentication-type {auth} --image {image} --ssh-key-value \'{ssh_key}\' -l {loc} --secrets \'{secrets}\' --nsg-rule NONE')
# self.cmd('vm show -g {rg} -n {vm}', checks=[
# self.check('provisioningState', 'Succeeded'),
# self.check('osProfile.secrets[0].sourceVault.id', vault_out['id']),
# self.check('osProfile.secrets[0].vaultCertificates[0].certificateUrl', '{secret_out}')
# ])
# @ResourceGroupPreparer()
# def test_vm_create_windows_secrets(self, resource_group, resource_group_location):
# self.kwargs.update({
# 'admin': 'windowsUser',
# 'loc': 'westus',
# 'image': 'Win2012R2Datacenter',
# 'vm': 'vm-name',
# 'secrets': json.dumps([{'sourceVault': {'id': 'id'}, 'vaultCertificates': [{'certificateUrl': 'certurl'}]}]),
# 'vault': self.create_random_name('vmkeyvault', 20)
# })
# message = 'Secret is missing certificateStore within vaultCertificates array at secret index 0 and ' \
# 'vaultCertificate index 0'
# with self.assertRaisesRegexp(CLIError, message):
# self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --admin-password VerySecret!12 --image {image} -l {loc} --secrets \'{secrets}\' --nsg-rule NONE')
# vault_out = self.cmd(
# 'keyvault create -g {rg} -n {vault} -l {loc} --enabled-for-deployment true --enabled-for-template-deployment true').get_output_in_json()
# time.sleep(60)
# self.kwargs['policy_path'] = os.path.join(TEST_DIR, 'keyvault', 'policy.json')
# self.cmd('keyvault certificate create --vault-name {vault} -n cert1 -p @"{policy_path}"')
# self.kwargs['secret_out'] = self.cmd('keyvault secret list-versions --vault-name {vault} -n cert1 --query "[?attributes.enabled].id" -o tsv').output.strip()
# self.kwargs['secrets'] = self.cmd('vm secret format -s {secret_out} --certificate-store "My"').get_output_in_json()
# self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --admin-password VerySecret!12 --image {image} -l {loc} --secrets "{secrets}" --nsg-rule NONE')
# self.cmd('vm show -g {rg} -n {vm}', checks=[
# self.check('provisioningState', 'Succeeded'),
# self.check('osProfile.secrets[0].sourceVault.id', vault_out['id']),
# self.check('osProfile.secrets[0].vaultCertificates[0].certificateUrl', self.kwargs['secret_out']),
# self.check('osProfile.secrets[0].vaultCertificates[0].certificateStore', 'My')
# ])
# class VMSSCreateLinuxSecretsScenarioTest(ScenarioTest):
# @ResourceGroupPreparer(name_prefix='cli_test_vmss_create_linux_secrets')
# @AllowLargeResponse()
# def test_vmss_create_linux_secrets(self, resource_group):
# self.kwargs.update({
# 'loc': 'westus',
# 'vmss': 'vmss1-name',
# 'secrets': json.dumps([{'sourceVault': {'id': 'id'}, 'vaultCertificates': []}]),
# 'vault': self.create_random_name('vmsslinuxkv', 20),
# 'secret': 'mysecret',
# 'ssh_key': TEST_SSH_KEY_PUB
# })
# vault_out = self.cmd('keyvault create -g {rg} -n {vault} -l {loc} --enabled-for-deployment true --enabled-for-template-deployment true').get_output_in_json()
# time.sleep(60)
# self.kwargs['policy_path'] = os.path.join(TEST_DIR, 'keyvault', 'policy.json')
# self.cmd('keyvault certificate create --vault-name {vault} -n cert1 -p @"{policy_path}"')
# self.kwargs['secret_out'] = self.cmd('keyvault secret list-versions --vault-name {vault} -n cert1 --query "[?attributes.enabled].id" -o tsv').output.strip()
# vm_format = self.cmd('vm secret format -s {secret_out}').get_output_in_json()
# self.kwargs['secrets'] = json.dumps(vm_format)
# self.cmd('vmss create -n {vmss} -g {rg} --image Debian --admin-username deploy --ssh-key-value \'{ssh_key}\' --secrets \'{secrets}\'')
# self.cmd('vmss show -n {vmss} -g {rg}', checks=[
# self.check('provisioningState', 'Succeeded'),
# self.check('virtualMachineProfile.osProfile.secrets[0].sourceVault.id', vault_out['id']),
# self.check('virtualMachineProfile.osProfile.secrets[0].vaultCertificates[0].certificateUrl', '{secret_out}')
# ])
class VMSSCreateExistingOptions(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_existing_options')
def test_vmss_create_existing_options(self):
self.kwargs.update({
'vmss': 'vrfvmss',
'os_disk': 'vrfosdisk',
'container': 'vrfcontainer',
'sku': 'Standard_A3',
'vnet': 'vrfvnet',
'subnet': 'vrfsubnet',
'lb': 'vrflb',
'bepool': 'mybepool',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd('network vnet create -n {vnet} -g {rg} --subnet-name {subnet}')
self.cmd('network lb create --name {lb} -g {rg} --backend-pool-name {bepool}')
self.cmd('vmss create --image CentOS --os-disk-name {os_disk} --admin-username ubuntu --vnet-name {vnet} --subnet {subnet} -l "West US" --vm-sku {sku} --storage-container-name {container} -g {rg} --name {vmss} --load-balancer {lb} --ssh-key-value \'{ssh_key}\' --backend-pool-name {bepool} --use-unmanaged-disk')
self.cmd('vmss show --name {vmss} -g {rg}', checks=[
self.check('sku.name', '{sku}'),
self.check('virtualMachineProfile.storageProfile.osDisk.name', '{os_disk}'),
self.check('virtualMachineProfile.storageProfile.osDisk.vhdContainers[0].ends_with(@, \'{container}\')', True)
])
self.cmd('network lb show --name {lb} -g {rg}',
checks=self.check('backendAddressPools[0].backendIpConfigurations[0].id.contains(@, \'{vmss}\')', True))
self.cmd('network vnet show --name {vnet} -g {rg}',
checks=self.check('subnets[0].ipConfigurations[0].id.contains(@, \'{vmss}\')', True))
class VMSSCreateExistingIdsOptions(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_existing_ids')
def test_vmss_create_existing_ids_options(self, resource_group):
from msrestazure.tools import resource_id, is_valid_resource_id
subscription_id = self.get_subscription_id()
self.kwargs.update({
'vmss': 'vrfvmss',
'os_disk': 'vrfosdisk',
'container': 'vrfcontainer',
'sku': 'Standard_A3',
'vnet': 'vrfvnet',
'subnet': 'vrfsubnet',
'lb': 'vrflb',
'bepool': 'mybepool',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd('network vnet create -n {vnet} -g {rg} --subnet-name {subnet}')
self.cmd('network lb create --name {lb} -g {rg} --backend-pool-name {bepool}')
self.kwargs.update({
'subnet_id': resource_id(subscription=subscription_id, resource_group=resource_group, namespace='Microsoft.Network', type='virtualNetworks', child_type_1='subnets', name=self.kwargs['vnet'], child_name_1=self.kwargs['subnet']),
'lb_id': resource_id(subscription=subscription_id, resource_group=resource_group, namespace='Microsoft.Network', type='loadBalancers', name=self.kwargs['lb'])
})
assert is_valid_resource_id(self.kwargs['subnet_id'])
assert is_valid_resource_id(self.kwargs['lb_id'])
self.cmd('vmss create --image CentOS --os-disk-name {os_disk} --admin-username ubuntu --subnet {subnet_id} -l "West US" --vm-sku {sku} --storage-container-name {container} -g {rg} --name {vmss} --load-balancer {lb_id} --ssh-key-value \'{ssh_key}\' --backend-pool-name {bepool} --use-unmanaged-disk')
self.cmd('vmss show --name {vmss} -g {rg}', checks=[
self.check('sku.name', '{sku}'),
self.check('virtualMachineProfile.storageProfile.osDisk.name', '{os_disk}'),
self.check('virtualMachineProfile.storageProfile.osDisk.vhdContainers[0].ends_with(@, \'{container}\')', True)
])
self.cmd('network lb show --name {lb} -g {rg}',
checks=self.check('backendAddressPools[0].backendIpConfigurations[0].id.contains(@, \'{vmss}\')', True))
self.cmd('network vnet show --name {vnet} -g {rg}',
checks=self.check('subnets[0].ipConfigurations[0].id.contains(@, \'{vmss}\')', True))
class VMSSVMsScenarioTest(ScenarioTest):
def _check_vms_power_state(self, *args):
for iid in self.kwargs['instance_ids']:
result = self.cmd('vmss get-instance-view --resource-group {{rg}} --name {{vmss}} --instance-id {}'.format(iid)).get_output_in_json()
self.assertTrue(result['statuses'][1]['code'] in args)
@ResourceGroupPreparer(name_prefix='cli_test_vmss_vms')
def test_vmss_vms(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'count': 2,
'instance_ids': []
})
self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --authentication-type password --admin-username admin123 --admin-password TestTest12#$ --instance-count {count}')
instance_list = self.cmd('vmss list-instances --resource-group {rg} --name {vmss}', checks=[
self.check('type(@)', 'array'),
self.check('length(@)', '{count}'),
self.check("length([].name.starts_with(@, '{vmss}'))", self.kwargs['count'])
]).get_output_in_json()
self.kwargs['instance_ids'] = [x['instanceId'] for x in instance_list]
self.kwargs['id'] = self.kwargs['instance_ids'][0]
self.cmd('vmss show --resource-group {rg} --name {vmss} --instance-id {id}', checks=[
self.check('type(@)', 'object'),
self.check('instanceId', '{id}')
])
result = self.cmd('vmss list-instance-connection-info --resource-group {rg} --name {vmss}').get_output_in_json()
self.assertTrue(result['instance 0'].split('.')[1], '5000')
self.cmd('vmss restart --resource-group {rg} --name {vmss} --instance-ids *')
self._check_vms_power_state('PowerState/running', 'PowerState/starting')
self.cmd('vmss stop --resource-group {rg} --name {vmss} --instance-ids *')
self._check_vms_power_state('PowerState/stopped')
self.cmd('vmss start --resource-group {rg} --name {vmss} --instance-ids *')
self._check_vms_power_state('PowerState/running', 'PowerState/starting')
self.cmd('vmss deallocate --resource-group {rg} --name {vmss} --instance-ids *')
self._check_vms_power_state('PowerState/deallocated')
self.cmd('vmss delete-instances --resource-group {rg} --name {vmss} --instance-ids *')
self.cmd('vmss list-instances --resource-group {rg} --name {vmss}')
class VMSSCustomDataScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_custom_data')
def test_vmss_create_custom_data(self):
self.kwargs.update({
'vmss': 'vmss-custom-data',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd('vmss create -n {vmss} -g {rg} --image Debian --admin-username deploy --ssh-key-value "{ssh_key}" --custom-data "#cloud-config\nhostname: myVMhostname"')
# custom data is write only, hence we have no automatic way to cross check. Here we just verify VM was provisioned
self.cmd('vmss show -n {vmss} -g {rg}',
checks=self.check('provisioningState', 'Succeeded'))
class VMSSNicScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_nics')
def test_vmss_nics(self):
self.kwargs.update({
'vmss': 'vmss1',
})
self.cmd('vmss create -g {rg} -n {vmss} --authentication-type password --admin-username admin123 --admin-password PasswordPassword1! --image Win2012R2Datacenter')
self.cmd('vmss nic list -g {rg} --vmss-name {vmss}', checks=[
self.check('type(@)', 'array'),
self.check("length([?resourceGroup == '{rg}']) == length(@)", True)
])
result = self.cmd('vmss list-instances -g {rg} -n {vmss}').get_output_in_json()
self.kwargs['iid'] = result[0]['instanceId']
nic_list = self.cmd('vmss nic list-vm-nics -g {rg} --vmss-name {vmss} --instance-id {iid}', checks=[
self.check('type(@)', 'array'),
self.check("length([?resourceGroup == '{rg}']) == length(@)", True)
]).get_output_in_json()
self.kwargs['nic'] = nic_list[0].get('name')
self.cmd('vmss nic show --resource-group {rg} --vmss-name {vmss} --instance-id {iid} -n {nic}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{nic}'),
self.check('resourceGroup', '{rg}')
])
class VMSSCreateIdempotentTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_idempotent')
def test_vmss_create_idempotent(self, resource_group):
self.kwargs.update({'vmss': 'vmss1'})
# run the command twice with the same parameters and verify it does not fail
self.cmd('vmss create -g {rg} -n {vmss} --authentication-type password --admin-username admin123 --admin-password PasswordPassword1! --image UbuntuLTS --use-unmanaged-disk')
self.cmd('vmss create -g {rg} -n {vmss} --authentication-type password --admin-username admin123 --admin-password PasswordPassword1! --image UbuntuLTS --use-unmanaged-disk')
# still 1 vnet and 1 subnet inside
self.cmd('network vnet list -g {rg}', checks=[
self.check('length([])', 1),
self.check('[0].name', self.kwargs['vmss'] + 'VNET'),
self.check('[0].subnets[0].addressPrefix', '10.0.0.0/24'),
self.check('length([0].subnets)', 1),
])
class VMSSILBTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_ilb')
def test_vmss_with_ilb(self, resource_group):
self.kwargs.update({'vmss': 'vmss1'})
self.cmd('vmss create -g {rg} -n {vmss} --admin-username admin123 --admin-password PasswordPassword1! --image centos --instance-count 1 --public-ip-address ""')
# TODO: restore error validation when #5155 is addressed
# with self.assertRaises(AssertionError) as err:
self.cmd('vmss list-instance-connection-info -g {rg} -n {vmss}', expect_failure=True)
# self.assertTrue('internal load balancer' in str(err.exception))
@api_version_constraint(ResourceType.MGMT_NETWORK, min_api='2017-08-01')
class VMSSLoadBalancerWithSku(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_lb_sku')
def test_vmss_lb_sku(self, resource_group):
self.kwargs.update({
'vmss0': 'vmss0',
'vmss': 'vmss1',
'lb': 'lb1',
'ip': 'pubip1',
'sku': 'standard',
'loc': 'eastus2'
})
# default to Basic
self.cmd('vmss create -g {rg} -l {loc} -n {vmss0} --image UbuntuLTS --admin-username admin123 --admin-password PasswordPassword1!')
self.cmd('network lb list -g {rg}', checks=self.check('[0].sku.name', 'Basic'))
self.cmd('network public-ip list -g {rg}', checks=[
self.check('[0].sku.name', 'Basic'),
self.check('[0].publicIpAllocationMethod', 'Dynamic')
])
# but you can overrides the defaults
self.cmd('vmss create -g {rg} -l {loc} -n {vmss} --lb {lb} --lb-sku {sku} --public-ip-address {ip} --image UbuntuLTS --admin-username admin123 --admin-password PasswordPassword1!')
self.cmd('network lb show -g {rg} -n {lb}',
checks=self.check('sku.name', 'Standard'))
self.cmd('network public-ip show -g {rg} -n {ip}', checks=[
self.check('sku.name', 'Standard'),
self.check('publicIpAllocationMethod', 'Static')
])
class VMLiveScenarioTest(LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_create_progress')
def test_vm_create_progress(self, resource_group):
from azure.cli.testsdk.utilities import force_progress_logging
self.kwargs.update({'vm': 'vm123'})
with force_progress_logging() as test_io:
self.cmd('vm create -g {rg} -n {vm} --admin-username {vm} --admin-password PasswordPassword1! --image debian')
content = test_io.getvalue()
# check log has okay format
lines = content.splitlines()
for line in lines:
self.assertTrue(line.split(':')[0] in ['Accepted', 'Succeeded'])
# spot check we do have some relevant progress messages coming out
# (Note, CLI's progress controller does routine "sleep" before sample the LRO response.
# This has the consequence that it can't promise each resource's result wil be displayed)
self.assertTrue(any(line.startswith('Succeeded:') or line.startswith('Accepted:') for line in lines))
@api_version_constraint(ResourceType.MGMT_COMPUTE, min_api='2017-03-30')
class VMZoneScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_disk_zones', location='eastus2')
def test_vm_disk_create_zones(self, resource_group, resource_group_location):
self.kwargs.update({
'zones': '2',
'disk': 'disk123',
'size': 1
})
self.cmd('disk create -g {rg} -n {disk} --size-gb {size} --zone {zones}', checks=[
self.check('zones[0]', '{zones}')
])
self.cmd('disk show -g {rg} -n {disk}',
checks=self.check('zones[0]', '{zones}'))
result = self.cmd('disk show -g {rg} -n {disk} -otable')
table_output = set(result.output.splitlines()[2].split())
self.assertTrue(set([resource_group, resource_group_location, self.kwargs['disk'], self.kwargs['zones'], str(self.kwargs['size']), 'Premium_LRS']).issubset(table_output))
result = self.cmd('disk list -g {rg} -otable')
table_output = set(result.output.splitlines()[2].split())
self.assertTrue(set([resource_group, resource_group_location, self.kwargs['disk'], self.kwargs['zones']]).issubset(table_output))
class VMRunCommandScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_run_command')
def test_vm_run_command_e2e(self, resource_group, resource_group_location):
self.kwargs.update({
'vm': 'test-run-command-vm',
'loc': resource_group_location
})
self.cmd('vm run-command list -l {loc}')
self.cmd('vm run-command show --command-id RunShellScript -l {loc}')
public_ip = self.cmd('vm create -g {rg} -n {vm} --image ubuntults --admin-username clitest1 --admin-password Test12345678!!').get_output_in_json()['publicIpAddress']
self.cmd('vm open-port -g {rg} -n {vm} --port 80')
self.cmd('vm run-command invoke -g {rg} -n{vm} --command-id RunShellScript --script "sudo apt-get update && sudo apt-get install -y nginx"')
time.sleep(15) # 15 seconds should be enough for nginx started(Skipped under playback mode)
import requests
r = requests.get('http://' + public_ip)
self.assertTrue('Welcome to nginx' in str(r.content))
@ResourceGroupPreparer(name_prefix='cli_test_vm_run_command_w_params')
def test_vm_run_command_with_parameters(self, resource_group):
self.kwargs.update({'vm': 'test-run-command-vm2'})
self.cmd('vm create -g {rg} -n {vm} --image debian --admin-username clitest1 --admin-password Test12345678!!')
self.cmd('vm run-command invoke -g {rg} -n{vm} --command-id RunShellScript --scripts "echo $0 $1" --parameters hello world')
# @ResourceGroupPreparer(name_prefix='cli_test_vm_encryption', location='westus')
# def test_vm_disk_encryption_e2e(self, resource_group, resource_group_location):
# self.kwargs.update({
# 'vault': self.create_random_name('vault', 10),
# 'vm': 'vm1'
# })
# self.cmd('keyvault create -g {rg} -n {vault} --enabled-for-disk-encryption "true"')
# time.sleep(60) # to avoid 504(too many requests) on a newly created vault
# self.cmd('vm create -g {rg} -n {vm} --image win2012datacenter --admin-username clitester1 --admin-password Test123456789!')
# self.cmd('vm encryption enable -g {rg} -n {vm} --disk-encryption-keyvault {vault}')
# self.cmd('vm encryption show -g {rg} -n {vm}', checks=[self.check('disks[0].statuses[0].code', 'EncryptionState/encrypted')])
# self.cmd('vm encryption disable -g {rg} -n {vm}')
@api_version_constraint(ResourceType.MGMT_COMPUTE, min_api='2017-03-30')
class VMSSRollingUpgrade(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_rolling_update')
def test_vmss_rolling_upgrade(self, resource_group):
self.kwargs.update({
'lb': 'lb1',
'probe': 'probe1',
'vmss': 'vmss1'
})
# set up a LB with the probe for rolling upgrade
self.cmd('network lb create -g {rg} -n {lb}')
self.cmd('network lb probe create -g {rg} --lb-name {lb} -n {probe} --protocol http --port 80 --path /')
self.cmd('network lb rule create -g {rg} --lb-name {lb} -n rule1 --protocol tcp --frontend-port 80 --backend-port 80 --probe-name {probe}')
self.cmd('network lb inbound-nat-pool create -g {rg} --lb-name {lb} -n nat-pool1 --backend-port 22 --frontend-port-range-start 50000 --frontend-port-range-end 50119 --protocol Tcp --frontend-ip-name LoadBalancerFrontEnd')
# create a scaleset to use the LB, note, we start with the manual mode as we are not done with the setup yet
self.cmd('vmss create -g {rg} -n {vmss} --image ubuntults --admin-username clitester1 --admin-password Testqwer1234! --lb {lb} --health-probe {probe}')
# install the web server
_, settings_file = tempfile.mkstemp()
with open(settings_file, 'w') as outfile:
json.dump({
"commandToExecute": "sudo apt-get update && sudo apt-get install -y nginx",
}, outfile)
settings_file = settings_file.replace('\\', '\\\\')
self.kwargs['settings'] = settings_file
self.cmd('vmss extension set -g {rg} --vmss-name {vmss} -n customScript --publisher Microsoft.Azure.Extensions --settings {settings} --version 2.0')
self.cmd('vmss update-instances -g {rg} -n {vmss} --instance-ids "*"')
# now we are ready for the rolling upgrade mode
self.cmd('vmss update -g {rg} -n {vmss} --set upgradePolicy.mode=rolling')
# make sure the web server works
result = self.cmd('vmss list-instance-connection-info -g {rg} -n {vmss} -o tsv')
time.sleep(15) # 15 seconds should be enough for nginx started(Skipped under playback mode)
import requests
r = requests.get('http://' + result.output.split(':')[0])
self.assertTrue('Welcome to nginx' in str(r.content))
# do some rolling upgrade, maybe nonsense, but we need to test the command anyway
self.cmd('vmss rolling-upgrade start -g {rg} -n {vmss}')
result = self.cmd('vmss rolling-upgrade get-latest -g {rg} -n {vmss}').get_output_in_json()
self.assertTrue(('policy' in result) and ('progress' in result)) # spot check that it is about rolling upgrade
# 'cancel' should fail as we have no active upgrade to cancel
self.cmd('vmss rolling-upgrade cancel -g {rg} -n {vmss}', expect_failure=True)
class VMCreateWithExistingNic(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_create_vm_existing_nic')
def test_vm_create_existing_nic(self, resource_group):
import re
self.cmd('network public-ip create -g {rg} -n my-pip')
self.cmd('network vnet create -g {rg} -n my-vnet --subnet-name my-subnet1')
self.cmd('network nic create -g {rg} -n my-nic --subnet my-subnet1 --vnet-name my-vnet --public-ip-address my-pip')
self.cmd('network nic ip-config create -n my-ipconfig2 -g {rg} --nic-name my-nic --private-ip-address-version IPv6')
self.cmd('vm create -g {rg} -n vm1 --image ubuntults --nics my-nic --generate-ssh-keys --admin-username ubuntuadmin')
result = self.cmd('vm show -g {rg} -n vm1 -d').get_output_in_json()
self.assertTrue(re.match(r'[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+', result['publicIps']))
self.assertTrue(re.match(r'[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+', result['privateIps']))
class VMOsDiskSwap(ScenarioTest):
@ResourceGroupPreparer()
def test_vm_os_disk_swap(self, resource_group):
self.kwargs.update({
'vm': 'vm1',
'backupDisk': 'disk1',
})
self.cmd('vm create -g {rg} -n {vm} --image centos --admin-username clitest123 --generate-ssh-keys')
res = self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()
original_disk_id = res['storageProfile']['osDisk']['managedDisk']['id']
backup_disk_id = self.cmd('disk create -g {{rg}} -n {{backupDisk}} --source {}'.format(original_disk_id)).get_output_in_json()['id']
self.cmd('vm stop -g {rg} -n {vm}')
self.cmd('vm update -g {{rg}} -n {{vm}} --os-disk {}'.format(backup_disk_id))
self.cmd('vm show -g {rg} -n {vm}', checks=[
self.check('storageProfile.osDisk.managedDisk.id', backup_disk_id),
self.check('storageProfile.osDisk.name', self.kwargs['backupDisk'])
])
class VMGenericUpdate(ScenarioTest):
@ResourceGroupPreparer()
def test_vm_generic_update(self, resource_group):
self.kwargs.update({
'vm': 'vm1',
})
self.cmd('vm create -g {rg} -n {vm} --image debian --data-disk-sizes-gb 1 2 --admin-username cligenerics --generate-ssh-keys')
# we will try all kinds of generic updates we can
self.cmd('vm update -g {rg} -n {vm} --set identity.type="SystemAssigned"', checks=[
self.check('identity.type', 'SystemAssigned')
])
self.cmd('vm update -g {rg} -n {vm} --set storageProfile.dataDisks[0].caching="ReadWrite"', checks=[
self.check('storageProfile.dataDisks[0].caching', 'ReadWrite')
])
self.cmd('vm update -g {rg} -n {vm} --remove storageProfile.dataDisks', checks=[
self.check('storageProfile.dataDisks', [])
])
# endregion
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
031216c936d39754abde2a11c2e9bc24bcd35fdf | 9560e118fafa944de93c5f6aec92a41a53825068 | /webaskb_run.py | 07997f0184ac34ea0c8d6c1d7cf93247b9085020 | [] | no_license | cimeister/WebAsKB | 6bedd567646495f3af8daf939cbf4ff9d674ee6e | b8f4488ce9a226bca0f0cff278cc84fd7d2d1f6c | refs/heads/master | 2022-01-06T05:49:44.402051 | 2019-05-24T13:58:55 | 2019-05-24T13:58:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | from config import *
from SplitQA import SplitQA
from noisy_supervision import NoisySupervision
from webaskb_ptrnet import WebAsKB_PtrNet
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("operation", help='available operations: "gen_noisy_sup","run_ptrnet" ,"train_ptrnet", "splitqa"')
parser.add_argument("--eval_set", help='available eval sets: "dev","test"')
args = parser.parse_args()
if args.eval_set is not None:
config.EVALUATION_SET = args.eval_set
if args.operation == 'gen_noisy_sup':
noisy_sup = NoisySupervision()
noisy_sup.gen_noisy_supervision()
elif args.operation == 'run_ptrnet':
ptrnet = WebAsKB_PtrNet()
ptrnet.load_data()
ptrnet.init()
ptrnet.eval()
elif args.operation == 'train_ptrnet':
config.PERFORM_TRAINING = True
config.LOAD_SAVED_MODEL = False
config.max_evalset_size = 2000
ptrnet = WebAsKB_PtrNet()
ptrnet.load_data()
ptrnet.init()
ptrnet.train()
elif args.operation == 'splitqa':
config.PERFORM_TRAINING = False
splitqa = SplitQA()
splitqa.run_executors()
splitqa.gen_predictions_file()
splitqa.compute_final_results()
else:
print('option not found, available operations: "gen_noisy_sup","run_ptrnet" ,"train_ptrnet", "splitqa"') | [
"[email protected]"
] | |
2e295fa94dd0fee0546ab2840203eef3f5a2ae4e | 14453c13d552165cabe72a310f44f7c58eaacad0 | /tests/main/dsl/test_skip_passes.py | 7cb4376f18205ffa80bb6f7e6da33bcb8a3eb791 | [
"Apache-2.0"
] | permissive | ai2cm/pace | 76a98ffae3baa92bd3b2ddc422b50dfa50255642 | c543e8ec478d46d88b48cdd3beaaa1717a95b935 | refs/heads/main | 2023-07-06T07:18:11.558315 | 2022-12-22T21:45:34 | 2022-12-22T21:45:34 | 392,106,887 | 27 | 13 | Apache-2.0 | 2023-07-03T13:47:46 | 2021-08-02T22:05:11 | Python | UTF-8 | Python | false | false | 2,193 | py | import unittest.mock
# will need to update this import when gt4py is updated
from gt4py.cartesian.gtc.passes.oir_optimizations.horizontal_execution_merging import (
HorizontalExecutionMerging,
)
from gt4py.cartesian.gtc.passes.oir_pipeline import DefaultPipeline
from gt4py.cartesian.gtscript import PARALLEL, computation, interval
from pace.dsl.dace.dace_config import DaceConfig
from pace.dsl.stencil import (
CompilationConfig,
GridIndexing,
StencilConfig,
StencilFactory,
)
from pace.dsl.typing import FloatField
from pace.util import X_DIM, Y_DIM, Z_DIM
def stencil_definition(a: FloatField):
with computation(PARALLEL), interval(...):
a = 0.0
def test_skip_passes_becomes_oir_pipeline():
backend = "numpy"
dace_config = DaceConfig(None, backend)
config = StencilConfig(
compilation_config=CompilationConfig(backend=backend), dace_config=dace_config
)
grid_indexing = GridIndexing(
domain=(4, 4, 7),
n_halo=3,
south_edge=False,
north_edge=False,
west_edge=False,
east_edge=False,
)
factory = StencilFactory(config=config, grid_indexing=grid_indexing)
with unittest.mock.patch(
"gt4py.cartesian.gtscript.stencil"
) as mock_stencil_builder:
factory.from_dims_halo(
stencil_definition,
compute_dims=[X_DIM, Y_DIM, Z_DIM],
)
pipeline: DefaultPipeline = mock_stencil_builder.call_args.kwargs.get(
"oir_pipeline", DefaultPipeline()
)
assert HorizontalExecutionMerging not in pipeline.skip
assert HorizontalExecutionMerging in pipeline.steps
with unittest.mock.patch(
"gt4py.cartesian.gtscript.stencil"
) as mock_stencil_builder:
factory.from_dims_halo(
stencil_definition,
compute_dims=[X_DIM, Y_DIM, Z_DIM],
skip_passes=("HorizontalExecutionMerging",),
)
assert "oir_pipeline" in mock_stencil_builder.call_args.kwargs
pipeline: DefaultPipeline = mock_stencil_builder.call_args.kwargs["oir_pipeline"]
assert HorizontalExecutionMerging in pipeline.skip
assert HorizontalExecutionMerging not in pipeline.steps
| [
"[email protected]"
] | |
99bbd9d46836511c97535c041251b08e19961ac9 | a2062cce9fbc6a5392e188ffc1babd26f05e7814 | /numpytutorial/basic/tutorial/multiplication.py | 8cebe2369875b13916937e2b339ab2c1d3d6a809 | [] | no_license | rohitaswchoudhary/spy_tutorial | 65fa62b25891a076245cf6a1093ba8ccb9932d3c | 0afdb9492f34a59a15d0531de5ca64d8ef8422af | refs/heads/main | 2023-06-02T14:52:16.752137 | 2021-06-17T06:54:59 | 2021-06-17T06:54:59 | 328,871,875 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | import numpy as np
x = np.array([[1,3,0], [-1,2,1], [0, 0, 2]])
y = np.matrix([[2, 3, 4], [1,2,3], [-1,1,2]])
print(np.dot(x, y))
print(np.dot(y, x))
print(x==y) | [
"[email protected]"
] | |
2e9d806506c0eb1a705e0a484e74b7e78d806147 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/kepler_bd/sdB_kepler_bd_coadd.py | 848fd3775b6bb4cab1b38014fff779fb2a41155a | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 421 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[+42¡3250,286.91875], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_kepler_bd/sdB_kepler_bd_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_kepler_bd/sdB_kepler_bd_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
f41ecf32c0cbade99fe1da798f042464bfb10985 | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/pytest/fib7/test_fibonacci.py | 412bfcf25a359af1ef1610c04a755329dca30286 | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 540 | py | import pytest
from fibonacci import fib
def test_fib():
assert fib(10) == 55
def test_fib_negative():
with pytest.raises(Exception) as err:
fib(-1)
assert err.type == ValueError
assert str(err.value) == 'Invalid parameter -1'
def test_fib_negative_again():
with pytest.raises(ValueError) as err:
fib(-1)
assert str(err.value) == 'Invalid parameter -1'
def test_fib_negative_again():
with pytest.raises(ValueError) as err:
fib(3.5)
assert str(err.value) == 'Invalid parameter 3.5'
| [
"[email protected]"
] | |
7415b1fe6c3fef75bcb7ef26e5dc8d6ae6afa1de | 7c384a56e1bcd66ad4ee4b9dd413c49ff9f71bf1 | /mario.py | cd382e705f6e9b24ee0eb3b92eaf1623ec3f2094 | [] | no_license | Rouen007/mario | d9405c016ac172d5b38c26fa239a27be51662448 | da5a678feefdcf3e8b220c1c6e8fd0ef67f285b7 | refs/heads/master | 2020-09-27T07:08:55.213068 | 2017-10-07T22:38:20 | 2017-10-07T22:38:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,822 | py | #!/usr/bin/env python
"""Mario Gym Adventure!
python mario.py ppaquette/meta-SuperMarioBros-v0 \
-a DQFDAgent -c mario_agent.json \
-ld ./demos/ -s ./agents/ -m ./monitors/ -mv 1000 -D
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import demo
from model import mario_net
from ppaquette_gym_super_mario import wrappers
from tensorforce import Configuration, TensorForceError
from tensorforce.agents import agents
from tensorforce.contrib.openai_gym import OpenAIGym
from tensorforce.execution import Runner
def main():
parser = argparse.ArgumentParser()
parser.add_argument('gym_id', help="ID of the gym environment, i.e. ppaquette/SuperMarioBros-1-1-v0")
parser.add_argument('-a', '--agent', help='Agent')
parser.add_argument('-c', '--agent-config', help="Agent configuration file")
parser.add_argument('-e', '--episodes', type=int, default=50000, help="Number of episodes")
parser.add_argument('-t', '--max-timesteps', type=int, default=100000, help="Maximum number of timesteps per episode")
parser.add_argument('-m', '--monitor', help="Save results to this directory")
parser.add_argument('-ms', '--monitor-safe', action='store_true', default=False, help="Do not overwrite previous results")
parser.add_argument('-mv', '--monitor-video', type=int, default=0, help="Save video every x steps (0 = disabled)")
parser.add_argument('-s', '--save', help="Save agent to this dir")
parser.add_argument('-se', '--save-episodes', type=int, default=100, help="Save agent every x episodes")
parser.add_argument('-l', '--load', help="Load agent from this dir")
parser.add_argument('-D', '--debug', action='store_true', default=False, help="Show debug outputs")
parser.add_argument('-ld', '--load-demo', required=True, help="Load demos from this dir")
parser.add_argument('-pt', '--pretrain', action='store_true', default=False, help="Pretrain agent on demos")
parser.add_argument('-ul', '--use_lstm', action='store_true', default=False, help="Use LSTM model")
parser.add_argument('-ls', '--lstm_size', type=int, default=256, help="LSTM size")
args = parser.parse_args()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
environment = OpenAIGym(args.gym_id,
monitor=args.monitor,
monitor_safe=args.monitor_safe,
monitor_video=args.monitor_video)
mode_wrapper = wrappers.SetPlayingMode('algo')
ac_wrapper = wrappers.ToDiscrete()
environment.gym = mode_wrapper(ac_wrapper(environment.gym))
if args.agent_config:
agent_config = Configuration.from_json(args.agent_config)
else:
agent_config = Configuration()
logger.info("No agent configuration provided.")
agent_config.default(dict(states=environment.states,
actions=environment.actions,
network=mario_net(name='mario',
lstm_size=args.lstm_size,
actions=environment.actions['num_actions'],
use_lstm=args.use_lstm)))
agent = agents[args.agent](config=agent_config)
if args.load:
load_dir = os.path.dirname(args.load)
if not os.path.isdir(load_dir):
raise OSError("Could not load agent from {}: No such directory.".format(load_dir))
logger.info("-" * 16)
agent.load_model(args.load)
logger.info("Loaded {}".format(agent))
if args.debug:
logger.info("-" * 16)
logger.info("Configuration:")
logger.info(agent_config)
if args.save:
save_dir = os.path.dirname(args.save)
if not os.path.isdir(save_dir):
try:
os.mkdir(save_dir, 0o755)
except OSError:
raise OSError("Cannot save agent to dir {} ()".format(save_dir))
try:
if args.load_demo:
logger.info("-" * 16)
logger.info("Loading demos")
demos = demo.load(args.load_demo)
logger.info("Importing demos")
agent.import_demonstrations(demos)
if args.pretrain:
logger.info("-" * 16)
logger.info("Pretraining {} steps".format(len(demos)))
agent.pretrain(steps=len(demos))
runner = Runner(
agent=agent,
environment=environment,
repeat_actions=1,
save_path=args.save,
save_episodes=args.save_episodes
)
report_episodes = args.episodes // 1000
if args.debug:
report_episodes = 1
def episode_finished(r):
if r.episode % report_episodes == 0:
logger.info("Finished episode {ep} after {ts} timesteps".format(ep=r.episode, ts=r.timestep))
logger.info("Episode reward: {}".format(r.episode_rewards[-1]))
logger.info("Average of last 500 rewards: {}".format(sum(r.episode_rewards[-500:]) / 500))
logger.info("Average of last 100 rewards: {}".format(sum(r.episode_rewards[-100:]) / 100))
return True
logger.info("Starting {agent} for Environment '{env}'".format(agent=agent, env=environment))
runner.run(args.episodes, args.max_timesteps, episode_finished=episode_finished)
logger.info("Learning finished. Total episodes: {ep}".format(ep=runner.episode))
except (KeyboardInterrupt):
agent.save_model(args.save)
pass
if args.monitor:
environment.gym.monitor.close()
environment.close()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
9970485c230cc96aa9656113f34c690f37407afc | c2ff2ee2b0c84e047a80cfdf0b0d0b122fc9db79 | /VL-BERT/cls/function/train.py | edfa314cfd96d29555f476a8c80cc34b2e73dc0a | [
"MIT"
] | permissive | obarnard99/vilio | 275dcb62cdb8b2d8c55ab1e73f3a796bd2073a5b | 77aac226c3a0910410f11a5999f8908181f57ccd | refs/heads/master | 2023-06-29T17:02:02.282457 | 2021-06-22T09:50:11 | 2021-06-22T09:50:11 | 337,738,373 | 0 | 0 | MIT | 2021-06-22T09:50:12 | 2021-02-10T13:50:49 | Python | UTF-8 | Python | false | false | 20,986 | py | import os
import pprint
import shutil
import inspect
import random
from tensorboardX import SummaryWriter
import imgaug
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.distributed as distributed
from torch.nn.parallel import DistributedDataParallel as DDP
from loguru import logger
from torch.optim.swa_utils import SWALR
from common.utils.create_logger import create_logger
from common.utils.misc import summary_parameters, bn_fp16_half_eval
from common.utils.load import smart_resume, smart_partial_load_model_state_dict
from common.trainer import train
from common.metrics.composite_eval_metric import CompositeEvalMetric
from common.metrics import cls_metrics
from common.callbacks.batch_end_callbacks.speedometer import Speedometer
from common.callbacks.epoch_end_callbacks.validation_monitor import ValidationMonitor
from common.callbacks.epoch_end_callbacks.checkpoint import Checkpoint
from common.lr_scheduler import WarmupMultiStepLR
from common.nlp.bert.optimization import AdamW, WarmupLinearSchedule
from common.losses import RocStarLoss
from cls.data.build import make_dataloader, build_dataset, build_transforms
from cls.modules import *
from cls.function.val import do_validation
try:
from apex import amp
from apex.parallel import DistributedDataParallel as Apex_DDP
except ImportError:
pass
#raise ImportError("Please install apex from https://www.github.com/nvidia/apex if you want to use fp16.")
def train_net(args, config):
# setup logger
logger, final_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
split='train')
model_prefix = os.path.join(final_output_path, config.MODEL_PREFIX)
if args.log_dir is None:
args.log_dir = os.path.join(final_output_path, 'tensorboard_logs')
# pprint.pprint(args)
# logger.info('training args:{}\n'.format(args))
# pprint.pprint(config)
# logger.info('training config:{}\n'.format(pprint.pformat(config)))
# manually set random seed
if config.RNG_SEED > -1:
random.seed(a=config.RNG_SEED)
np.random.seed(config.RNG_SEED)
torch.random.manual_seed(config.RNG_SEED)
torch.cuda.manual_seed_all(config.RNG_SEED)
torch.backends.cudnn.deterministic = True
imgaug.random.seed(config.RNG_SEED)
# cudnn
torch.backends.cudnn.benchmark = False
if args.cudnn_off:
torch.backends.cudnn.enabled = False
if args.dist:
model = eval(config.MODULE)(config)
local_rank = int(os.environ.get('LOCAL_RANK') or 0)
config.GPUS = str(local_rank)
torch.cuda.set_device(local_rank)
master_address = os.environ['MASTER_ADDR']
master_port = int(os.environ['MASTER_PORT'] or 23456)
world_size = int(os.environ['WORLD_SIZE'] or 1)
rank = int(os.environ['RANK'] or 0)
if rank == 0:
pprint.pprint(args)
logger.info('training args:{}\n'.format(args))
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
if args.slurm:
distributed.init_process_group(backend='nccl')
else:
try:
distributed.init_process_group(
backend='nccl',
init_method='tcp://{}:{}'.format(master_address, master_port),
world_size=world_size,
rank=rank,
group_name='mtorch')
except RuntimeError:
pass
print(f'native distributed, size: {world_size}, rank: {rank}, local rank: {local_rank}')
torch.cuda.set_device(local_rank)
config.GPUS = str(local_rank)
model = model.cuda()
if not config.TRAIN.FP16:
model = DDP(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
if rank == 0:
summary_parameters(model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model,
logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
writer = None
if args.log_dir is not None:
tb_log_dir = os.path.join(args.log_dir, 'rank{}'.format(rank))
if not os.path.exists(tb_log_dir):
os.makedirs(tb_log_dir)
writer = SummaryWriter(log_dir=tb_log_dir)
batch_size = world_size * (sum(config.TRAIN.BATCH_IMAGES)
if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
total_gpus = world_size
train_loader, train_sampler = make_dataloader(config,
mode='train',
distributed=True,
num_replicas=world_size,
rank=rank,
expose_sampler=True)
val_loader = make_dataloader(config,
mode='val',
distributed=True,
num_replicas=world_size,
rank=rank)
else:
pprint.pprint(args)
logger.info('training args:{}\n'.format(args))
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
#os.environ['CUDA_VISIBLE_DEVICES'] = config.GPUS
model = eval(config.MODULE)(config)
summary_parameters(model, logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
num_gpus = len(config.GPUS.split(','))
# assert num_gpus <= 1 or (not config.TRAIN.FP16), "Not support fp16 with torch.nn.DataParallel. " \
# "Please use amp.parallel.DistributedDataParallel instead."
if num_gpus > 1 and config.TRAIN.FP16:
logger.warning("Not support fp16 with torch.nn.DataParallel.")
config.TRAIN.FP16 = False
total_gpus = num_gpus
rank = None
writer = SummaryWriter(log_dir=args.log_dir) if args.log_dir is not None else None
if hasattr(model, 'setup_adapter'):
logger.info('Setting up adapter modules!')
model.setup_adapter()
# model
if num_gpus > 1:
model = torch.nn.DataParallel(model, device_ids=[int(d) for d in config.GPUS.split(',')]).cuda()
else:
torch.cuda.set_device(int(config.GPUS))
model.cuda()
# loader
# train_set = 'train+val' if config.DATASET.TRAIN_WITH_VAL else 'train'
train_loader = make_dataloader(config, mode='train', distributed=False)
val_loader = make_dataloader(config, mode='val', distributed=False)
train_sampler = None
batch_size = num_gpus * (sum(config.TRAIN.BATCH_IMAGES) if isinstance(config.TRAIN.BATCH_IMAGES, list)
else config.TRAIN.BATCH_IMAGES)
if config.TRAIN.GRAD_ACCUMULATE_STEPS > 1:
batch_size = batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS
base_lr = config.TRAIN.LR * batch_size
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if _k in n],
'lr': base_lr * _lr_mult}
for _k, _lr_mult in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for n, p in model.named_parameters()
if all([_k not in n for _k, _ in config.TRAIN.LR_MULT])]})
if config.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
momentum=config.TRAIN.MOMENTUM,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
weight_decay=config.TRAIN.WD)
elif config.TRAIN.OPTIMIZER == 'AdamW':
optimizer = AdamW(optimizer_grouped_parameters,
lr=config.TRAIN.LR * batch_size,
betas=(0.9, 0.999),
eps=1e-6,
weight_decay=config.TRAIN.WD,
correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
# partial load pretrain state dict
if config.NETWORK.PARTIAL_PRETRAIN != "":
pretrain_state_dict = torch.load(config.NETWORK.PARTIAL_PRETRAIN, map_location=lambda storage, loc: storage)['state_dict']
prefix_change = [prefix_change.split('->') for prefix_change in config.NETWORK.PARTIAL_PRETRAIN_PREFIX_CHANGES]
if len(prefix_change) > 0:
pretrain_state_dict_parsed = {}
for k, v in pretrain_state_dict.items():
no_match = True
for pretrain_prefix, new_prefix in prefix_change:
if k.startswith(pretrain_prefix):
k = new_prefix + k[len(pretrain_prefix):]
pretrain_state_dict_parsed[k] = v
no_match = False
break
if no_match:
pretrain_state_dict_parsed[k] = v
pretrain_state_dict = pretrain_state_dict_parsed
smart_partial_load_model_state_dict(model, pretrain_state_dict)
# pretrained classifier
# if config.NETWORK.CLASSIFIER_PRETRAINED:
# print('Initializing classifier weight from pretrained word embeddings...')
# answers_word_embed = []
# for k, v in model.state_dict().items():
# if 'word_embeddings.weight' in k:
# word_embeddings = v.detach().clone()
# break
# for answer in train_loader.dataset.answer_vocab:
# a_tokens = train_loader.dataset.tokenizer.tokenize(answer)
# a_ids = train_loader.dataset.tokenizer.convert_tokens_to_ids(a_tokens)
# a_word_embed = (torch.stack([word_embeddings[a_id] for a_id in a_ids], dim=0)).mean(dim=0)
# answers_word_embed.append(a_word_embed)
# answers_word_embed_tensor = torch.stack(answers_word_embed, dim=0)
# for name, module in model.named_modules():
# if name.endswith('final_mlp'):
# module[-1].weight.data = answers_word_embed_tensor.to(device=module[-1].weight.data.device)
# metrics
train_metrics_list = [
cls_metrics.Accuracy(allreduce=args.dist, num_replicas=world_size if args.dist else 1)
]
val_metrics_list = [
cls_metrics.Accuracy(allreduce=args.dist, num_replicas=world_size if args.dist else 1),
cls_metrics.RocAUC(allreduce=args.dist, num_replicas=world_size if args.dist else 1)
]
for output_name, display_name in config.TRAIN.LOSS_LOGGERS:
train_metrics_list.append(
cls_metrics.LossLogger(output_name, display_name=display_name, allreduce=args.dist,
num_replicas=world_size if args.dist else 1))
train_metrics = CompositeEvalMetric()
val_metrics = CompositeEvalMetric()
for child_metric in train_metrics_list:
train_metrics.add(child_metric)
for child_metric in val_metrics_list:
val_metrics.add(child_metric)
# epoch end callbacks
epoch_end_callbacks = []
if (rank is None) or (rank == 0):
epoch_end_callbacks = [Checkpoint(model_prefix, config.CHECKPOINT_FREQUENT)]
validation_monitor = ValidationMonitor(do_validation, val_loader, val_metrics,
host_metric_name='RocAUC',
label_index_in_batch=config.DATASET.LABEL_INDEX_IN_BATCH,
model_dir=os.path.dirname(model_prefix))
# optimizer initial lr before
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
# resume/auto-resume
if rank is None or rank == 0:
smart_resume(model, optimizer, validation_monitor, config, model_prefix, logger)
if args.dist:
begin_epoch = torch.tensor(config.TRAIN.BEGIN_EPOCH).cuda()
distributed.broadcast(begin_epoch, src=0)
config.TRAIN.BEGIN_EPOCH = begin_epoch.item()
# batch end callbacks
batch_size = len(config.GPUS.split(',')) * config.TRAIN.BATCH_IMAGES
batch_end_callbacks = [Speedometer(batch_size, config.LOG_FREQUENT,
batches_per_epoch=len(train_loader),
epochs=config.TRAIN.END_EPOCH - config.TRAIN.BEGIN_EPOCH)]
# setup lr step and lr scheduler
if config.TRAIN.LR_SCHEDULE == 'plateau':
print("Warning: not support resuming on plateau lr schedule!")
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='max',
factor=config.TRAIN.LR_FACTOR,
patience=1,
verbose=True,
threshold=1e-4,
threshold_mode='rel',
cooldown=2,
min_lr=0,
eps=1e-8)
elif config.TRAIN.LR_SCHEDULE == 'triangle':
lr_scheduler = WarmupLinearSchedule(optimizer,
config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
t_total=int(config.TRAIN.END_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS),
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
elif config.TRAIN.LR_SCHEDULE == 'step':
lr_iters = [
int(epoch * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS)
for epoch in config.TRAIN.LR_STEP]
lr_scheduler = WarmupMultiStepLR(optimizer, milestones=lr_iters,
gamma=config.TRAIN.LR_FACTOR,
warmup_factor=config.TRAIN.WARMUP_FACTOR,
warmup_iters=config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0,
warmup_method=config.TRAIN.WARMUP_METHOD,
last_epoch=int(config.TRAIN.BEGIN_EPOCH * len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS) - 1)
else:
raise ValueError("Not support lr schedule: {}.".format(config.TRAIN.LR_SCHEDULE))
if config.TRAIN.SWA:
assert config.TRAIN.SWA_START_EPOCH < config.TRAIN.END_EPOCH
if not config.TRAIN.DEBUG:
true_epoch_step = len(train_loader) / config.TRAIN.GRAD_ACCUMULATE_STEPS
else:
true_epoch_step = 50
step_per_cycle = config.TRAIN.SWA_EPOCH_PER_CYCLE * true_epoch_step
# swa_scheduler = torch.optim.lr_scheduler.CyclicLR(
# optimizer,
# base_lr=config.TRAIN.SWA_MIN_LR * batch_size,
# max_lr=config.TRAIN.SWA_MAX_LR * batch_size,
# cycle_momentum=False,
# step_size_up=10,
# step_size_down=step_per_cycle - 10)
anneal_steps = max(1, (config.TRAIN.END_EPOCH - config.TRAIN.SWA_START_EPOCH) // 4) * step_per_cycle
anneal_steps = int(anneal_steps)
swa_scheduler = SWALR(
optimizer,
anneal_epochs=anneal_steps,
anneal_strategy='linear',
swa_lr=config.TRAIN.SWA_MAX_LR * batch_size
)
else:
swa_scheduler = None
if config.TRAIN.ROC_STAR:
assert config.TRAIN.ROC_START_EPOCH < config.TRAIN.END_EPOCH
roc_star = RocStarLoss(
delta=2.0,
sample_size=config.TRAIN.ROC_SAMPLE_SIZE,
sample_size_gamma=config.TRAIN.ROC_SAMPLE_SIZE * 2,
update_gamma_each=config.TRAIN.ROC_SAMPLE_SIZE,
)
else:
roc_star = None
# broadcast parameter and optimizer state from rank 0 before training start
if args.dist:
for v in model.state_dict().values():
distributed.broadcast(v, src=0)
# for v in optimizer.state_dict().values():
# distributed.broadcast(v, src=0)
best_epoch = torch.tensor(validation_monitor.best_epoch).cuda()
best_val = torch.tensor(validation_monitor.best_val).cuda()
distributed.broadcast(best_epoch, src=0)
distributed.broadcast(best_val, src=0)
validation_monitor.best_epoch = best_epoch.item()
validation_monitor.best_val = best_val.item()
# apex: amp fp16 mixed-precision training
if config.TRAIN.FP16:
# model.apply(bn_fp16_half_eval)
model, optimizer = amp.initialize(model, optimizer,
opt_level='O2',
keep_batchnorm_fp32=False,
loss_scale=config.TRAIN.FP16_LOSS_SCALE,
min_loss_scale=32.0)
if args.dist:
model = Apex_DDP(model, delay_allreduce=True)
# NOTE: final_model == model if not using SWA, else final_model == AveragedModel(model)
final_model = train(
model, optimizer, lr_scheduler,
train_loader, train_sampler, train_metrics,
config.TRAIN.BEGIN_EPOCH,
config.TRAIN.END_EPOCH,
logger,
fp16=config.TRAIN.FP16,
rank=rank,
writer=writer,
batch_end_callbacks=batch_end_callbacks,
epoch_end_callbacks=epoch_end_callbacks,
validation_monitor=validation_monitor,
clip_grad_norm=config.TRAIN.CLIP_GRAD_NORM,
gradient_accumulate_steps=config.TRAIN.GRAD_ACCUMULATE_STEPS,
ckpt_path=config.TRAIN.CKPT_PATH,
swa_scheduler=swa_scheduler,
swa_start_epoch=config.TRAIN.SWA_START_EPOCH,
swa_cycle_epoch=config.TRAIN.SWA_EPOCH_PER_CYCLE,
swa_use_scheduler=config.TRAIN.SWA_SCHEDULE,
roc_star=roc_star,
roc_star_start_epoch=config.TRAIN.ROC_START_EPOCH,
roc_interleave=config.TRAIN.ROC_INTERLEAVE,
debug=config.TRAIN.DEBUG,
)
return rank, final_model
| [
"[email protected]"
] | |
adc963ff1204c00fb87b90795664b46370b2735d | a1e10efa6a131e305351909a437bfa5d083d4513 | /summary_product_report/reports/product_sales_qty_report/product_sales_qty_report.py | 399f5bde169bc9bdd57785835dabc268a4757791 | [] | no_license | h3llopy/glodok_extra_addons_od12 | 5089412b36b0dafdb17235a627c8e33ed2acbb1f | 5c493962b93254fb2ca8cd674c4fe153ac86d680 | refs/heads/master | 2022-12-05T06:22:08.182302 | 2020-08-29T14:32:30 | 2020-08-29T14:32:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,188 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _,tools
from odoo.exceptions import UserError,ValidationError
import logging
_logger = logging.getLogger(__name__)
class ProductSalesQtyReport(models.Model):
_name = 'product.sales.qty.report'
_description = 'Product Sales Qty Report'
_auto = False
product_id = fields.Many2one('product.product', string="Product", readonly=True)
product_tmpl_id = fields.Many2one('product.template', string="Product Templat", readonly=True)
product_uom_qty = fields.Float('Qty', readonly=True, group_operator='sum')
date_series = fields.Date(string="Date Series", readonly=True)
uom_id = fields.Many2one('product.uom', string="UOM", related="product_tmpl_id.uom_id", readonly=True)
def _select(self):
query = """
WITH series AS (
SELECT date_trunc('day', dd)::date as ds
FROM generate_series(
(
SELECT NOW()-(CONCAT(icp.value,' day')::interval)
FROM ir_config_parameter icp
-- WHERE key='auth_signup.template_user_id'
WHERE key='interval.product_sales_qty_report'
)
, now()::timestamp
, '1 day'::interval
) dd
)
SELECT
CONCAT(to_char(series.ds, 'YYYYMMDD'), LPAD(prod.product_tmpl_id::text,4,'0'))::bigint AS id
,prod.id as product_id
,prod.product_tmpl_id
,SUM(CASE WHEN ps.product_id is NOT NULL THEN ps.product_uom_qty ELSE 0 END) AS product_uom_qty
,series.ds as date_series
FROM series
CROSS JOIN product_product prod
LEFT JOIN (
SELECT
pp.id as product_id
,pp.product_tmpl_id
,(CASE WHEN sol.id IS NOT NULL THEN sol.product_uom_qty ELSE 0 END) AS product_uom_qty
,so.date_order
,so.id as so_id
FROM product_product AS pp
LEFT JOIN sale_order_line AS sol ON sol.product_id = pp.id AND sol.state='done'
LEFT JOIN sale_order AS so ON so.id = sol.order_id
) ps ON prod.id=ps.product_id and series.ds::date = ps.date_order::date
GROUP BY prod.id,series.ds
"""
return query
@api.model_cr
def init(self):
tools.drop_view_if_exists(self.env.cr, self._table)
self.env.cr.execute("""CREATE or REPLACE VIEW %s as (
%s
)""" % (self._table, self._select())) | [
"[email protected]"
] | |
7ea725be597104cfa2dac6fd1b1f81b9726da36d | 13f4a06cd439f579e34bf38406a9d5647fe7a0f3 | /script/try_python/try_Django/mzitu_com/mzitu_com_project/mzitu_com_project/home_page_transform.py | 595accd90ae239ec0258a9fae2673b5f268d2f68 | [] | no_license | edt-yxz-zzd/python3_src | 43d6c2a8ef2a618f750b59e207a2806132076526 | 41f3a506feffb5f33d4559e5b69717d9bb6303c9 | refs/heads/master | 2023-05-12T01:46:28.198286 | 2023-05-01T13:46:32 | 2023-05-01T13:46:32 | 143,530,977 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,505 | py |
__all__ = '''
home_page_transform__url
home_page_transform__page
extract_mzitu_com__the_all_or_old_page
'''.split()
#NotFoundError
#find
from nn_ns.internet.webpage.fetch_webpage import fetch_webpage
from seed.helper.repr_input import repr_helper
from bs4 import BeautifulSoup
import re
from urllib.parse import urlparse, urlunparse
from pathlib import PurePosixPath as Path
from .DATA import project_per_page_url_route
r'''
def make_project_per_page_url_base(new_url):
parse_result = urlparse(new_url)
parts = (parse_result.scheme, parse_result.netloc, project_per_page_url_route, '', '', '')
project_per_page_url_base = urlunparse(parts)
return project_per_page_url_base
'''
r'''
def home_page_transform__url(old_url, new_project_website):
# .DATA.website_all_old/website_all_new
# e.g. home_page_transform__url(.DATA.website_all_old, 'http://127.0.0.1:8000/')
# e.g. home_page_transform__url('https://www.mzitu.com/old/', 'http://127.0.0.1:8000/')
# e.g. home_page_transform__url('https://www.mzitu.com/all/', 'http://127.0.0.1:8000/')
#
assert new_project_website[-1:] == '/'
'''
project_per_page_url_relative_base = Path(project_per_page_url_route)
def home_page_transform__url(old_url):
# .DATA.website_all_old/website_all_new
# e.g. home_page_transform__url(.DATA.website_all_old)
# e.g. home_page_transform__url('https://www.mzitu.com/old/')
# e.g. home_page_transform__url('https://www.mzitu.com/all/')
#
html_page = fetch_webpage(old_url)
#project_per_page_url_base = make_project_per_page_url_base(new_url)
assert project_per_page_url_route[:1] == '/'
#project_per_page_url_base = new_project_website + project_per_page_url_route[1:]
#project_per_page_url_base = Path(new_project_website, project_per_page_url_route)
project_per_page_url_base = project_per_page_url_relative_base
return home_page_transform__page(html_page, project_per_page_url_base)
class NotFoundError(Exception):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __repr__(self):
return repr_helper(self, *self.args, **self.kwargs)
def find(soup_find, *args, **kwargs):
may_r = soup_find(*args, **kwargs)
if may_r is None: raise NotFoundError(*args, **kwargs)
r = may_r
return r
def extract_mzitu_com__the_all_or_old_page(html_page):
# html_page of .../all/ or .../old/
#
# html_page -> (html_title, [((year::str, month::str), [(old_url::str, title::str)])])
#
soup = BeautifulSoup(html_page, 'lxml')
[html_title] = soup.head.find_all('title')
html_title = html_title.get_text()
[class_main] = soup.find_all('div', {'class': 'main'})
[class_all] = class_main.find_all('div', {'class': 'all'})
class_years = class_all.find_all('div', {'class': 'year'})
#what's the name of tail-string?#?No such name in bs4?
#print(dir(class_main))
#import sys; sys.exit()
year_month__url_title_pairs__pairs = []
for class_year in class_years:
year = class_year.get_text()
class_archives = find(class_year.find_next_sibling, 'ul', {'class':'archives'})
class_monthes = class_archives.find_all('p', {'class': 'month'})
for class_month in class_monthes:
month = class_month.get_text()
class_url = find(class_month.find_next_sibling, 'p', {'class':'url'})
href_children = class_url.find_all('a')
year_month = year, month
url_title_pairs = []
for href_child in href_children:
url = href_child['href']
title = href_child.get_text()
url_title_pairs.append((url, title))
year_month__url_title_pairs__pairs.append((year_month, url_title_pairs))
return html_title, year_month__url_title_pairs__pairs
new_html_begin = r'''
<!DOCTYPE html>
<html>
<head> <title></title> </head>
<body>
<ul>
</ul>
</body>
</html>
'''
body_tpl = r'''
{year} {month} {new_url} {title}
'''
def old_url2new_url(old_url, project_per_page_url_base:Path):
project_per_page_url = project_per_page_url_base / Path(old_url).name
new_url = project_per_page_url
return new_url
def home_page_transform__page(html_page, project_per_page_url_base:Path):
(html_title, year_month__url_title_pairs__pairs
) = extract_mzitu_com__the_all_or_old_page(html_page)
new_soup = BeautifulSoup(new_html_begin, 'lxml')
[new_title_tag] = new_soup.find_all('title')
new_title_tag.append(html_title)
[new_ul_tag] = new_soup.find_all('ul')
for (year, month), url_title_pairs in year_month__url_title_pairs__pairs:
new_li_tag = new_soup.new_tag('li')
new_ul_tag.append(new_li_tag)
fst_new_p_tag = new_soup.new_tag('p')
new_li_tag.append(fst_new_p_tag)
fst_new_p_tag.string = f'{year} {month}'
snd_new_p_tag = new_soup.new_tag('p')
new_li_tag.append(snd_new_p_tag)
for old_url, title in url_title_pairs:
new_url = old_url2new_url(old_url, project_per_page_url_base)
new_href_tag = new_soup.new_tag('a', href=new_url, target="_blank")
new_href_tag.string = title
new_br_tag = new_soup.new_tag('br')
snd_new_p_tag.append(new_href_tag)
snd_new_p_tag.append(new_br_tag)
#new_html_page = new_soup.encode('gb18030')
new_html_page = str(new_soup)
return new_html_page
| [
"[email protected]"
] | |
eb7284de08d0db2639a08a3dc6348c377f0be6e4 | b53e3d57d31a47a98d87141e44a5f8940ee15bca | /src/utils/socket_client/socket_client.py | d64c219641216ff48c67160faa9ad2d93b41bdeb | [
"MIT"
] | permissive | Chrissimple/program-y | 52177fcc17e75fb97ab3993a4652bcbe7906bd58 | 80d80f0783120c2341e6fc57e7716bbbf28a8b3f | refs/heads/master | 2020-03-29T13:20:08.162177 | 2018-09-26T19:09:20 | 2018-09-26T19:09:20 | 149,952,995 | 1 | 0 | null | 2018-09-23T06:11:04 | 2018-09-23T06:11:04 | null | UTF-8 | Python | false | false | 651 | py | # client.py
import socket
import sys
import json
host = sys.argv[1]
port = int(sys.argv[2])
question = sys.argv[3]
clientid = sys.argv[4]
max_size = 1024
if len(sys.argv) == 6:
max_size = sys.argv[5]
payload = {"question": question, "clientid": clientid}
json_data = json.dumps(payload)
# create a socket object
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# connection to hostname on the port.
s.connect((host, port))
s.send(json_data.encode('utf-8'))
# Receive no more than 1024 bytes
received = s.recv(max_size)
s.close()
json_data = json.loads(received.decode('utf-8'))
print("Answer: %s" % json_data['answer'])
| [
"[email protected]"
] | |
c5a78fbd2e9b6d4765751767fa9482d51b71cf16 | 7986ec6498e3f93967fa9bfe2b6a9d4056138293 | /Protheus_WebApp/Modules/SIGAFIN/FINA171TESTCASE.py | d514f8aee6362dfcd0583815fa42b1334b983c59 | [
"MIT"
] | permissive | HelenaAdrignoli/tir-script-samples | 7d08973e30385551ef13df15e4410ac484554303 | bb4f4ab3a49f723216c93f66a4395e5aa328b846 | refs/heads/master | 2023-02-21T11:26:28.247316 | 2020-04-28T16:37:26 | 2020-04-28T16:37:26 | 257,304,757 | 0 | 0 | MIT | 2020-04-20T14:22:21 | 2020-04-20T14:22:20 | null | UTF-8 | Python | false | false | 1,786 | py | from tir import Webapp
import unittest
class FINA171(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAFIN','12/09/2019','T1','D MG 01 ','06')
inst.oHelper.Program("FINA171")
def test_FINA171_CT006(self):
self.oHelper.SetButton("Incluir")
self.oHelper.SetBranch('D MG 01')
self.oHelper.SetValue('Numero','FITIR1')
self.oHelper.SetValue('Modelo',"A")
self.oHelper.SetValue('Operacao',"CDB")
self.oHelper.SetValue('Banco',"033")
self.oHelper.SetValue('Agencia', '00001')
self.oHelper.SetValue('Conta Banco', '0000000004')
self.oHelper.SetValue('Vlr.Operacao', '1000,00')
self.oHelper.SetValue('Moeda', '1')
self.oHelper.SetButton("Salvar")
self.oHelper.SetButton("Cancelar")
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult('EH_NUMERO','FITIR1')
self.oHelper.CheckResult('EH_APLEMP','APL')
self.oHelper.CheckResult('EH_TIPO','CDB')
self.oHelper.CheckResult('EH_BANCO','033')
self.oHelper.CheckResult('EH_AGENCIA','00001')
self.oHelper.CheckResult('EH_CONTA','0000000004')
self.oHelper.CheckResult('EH_VALOR','1000,00')
self.oHelper.SetButton("Cancelar")
self.oHelper.SearchBrowse(f"D MG 01 FITIR101")
self.oHelper.SetButton("Outras Ações", "Excluir")
self.oHelper.CheckResult('EH_NUMERO','FITIR1')
self.oHelper.CheckResult('EH_APLEMP','APL')
self.oHelper.CheckResult('EH_TIPO','CDB')
self.oHelper.CheckResult('EH_BANCO','033')
self.oHelper.CheckResult('EH_AGENCIA','00001')
self.oHelper.CheckResult('EH_CONTA','0000000004')
self.oHelper.CheckResult('EH_VALOR','1000,00')
self.oHelper.SetButton("Confirmar")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
d391896c768c79d6b6c61cc58b27a4c668f5ec58 | bf0d7c8d987d5fda14208eb9ce70e31c83c25c25 | /dl-stl/nn_util.py | d8f272e50ceddd2ac441aa7907285d1d7fa4afd1 | [] | no_license | SummerBigData/SamRepo | 7876e9393c7175e300e175a60c17633c3b23a1bb | fd84ad654370faa48c084349952c2921fde4032d | refs/heads/master | 2020-03-18T05:09:08.787956 | 2018-06-18T17:11:49 | 2018-06-18T17:11:49 | 134,327,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,666 | py | from scipy.io import loadmat
from scipy.special import expit
from sklearn.preprocessing import OneHotEncoder
import scipy.optimize as opt
import numpy as np
import sae_util
from read_mnist import *
#from prepro import crop
import os
from functools import reduce
""" Global variables """ # I know they're gross
Js, accs = [], [] # lists for tracking the cost and accuracy over time
prog_name = 'nn' # the name of the program for avoiding collisions in log files
last_tflat = np.zeros((1,1)) # the last theta vector if we need to save it early
global_step = 0 # tracks how many times the back_propagation algorithm is called
""" Data loading """
def shuffle(X, y):
X = np.hstack((X, y.reshape((len(y), 1))))
np.random.shuffle(X)
return X[:,:-1], X[:,-1]
def onehot(y):
onehot_encoder = OneHotEncoder(sparse=False)
return onehot_encoder.fit_transform(y.reshape((len(y), 1))) # onehot encode y
def shuffle_oh(X, y):
X, y = shuffle(X, y)
return X, onehot(y)
# load the data from the .mat file and do some preprocessing
def load_data(all_data=False, train=True, split=False, num_sample=500):
# if we are using the full dataset, we can load it from the -ubyte.gz files
# if we are using the toy dataset, we can load it from the .mat files
# using the full dataset, we have a train and a test set, to the train argument tells us
# which dataset to read
# using the toy dataset, there is no test set, so split should be set to True so that
# we can say that the last 10th of the train set is actually the test set
# if you are using the full dataset, don't set split to True, that would just be pointless
if all_data:
filenames = os.listdir('./data')
filenames = [f for f in filenames if f.endswith('.gz')]
filenames = [os.path.join('./data', f) for f in filenames]
if train:
filenames = [f for f in filenames if 'train' in f]
else:
filenames = [f for f in filenames if not 'train' in f]
if not train:
num_sample = None
if 'images' in filenames[0]:
#X, y = read_images(filenames[0], n=num_sample), read_labels(filenames[1], n=num_sample)
X, y = read_idx(filenames[0], n=num_sample), read_idx(filenames[1], n=num_sample)
else:
#X, y = read_images(filenames[1], n=num_sample), read_labels(filenames[0], n=num_sample)
X, y = read_idx(filenames[1], n=num_sample), read_idx(filenames[0], n=num_sample)
#X = crop(X)
X = X / 255.0
X = X.reshape((X.shape[0], X.shape[1]*X.shape[2]))
else:
mat = loadmat('data/data.mat')
X, y = mat['X'], mat['y']
prepro = lambda i: 0 if i == 10 else i
v_prepro = np.vectorize(prepro)
y = v_prepro(y.flatten())
if not all_data:
if split:
test_len = 500//10
idxs = reduce(
lambda x, y: x+y,
[[500*i-j for j in reversed(range(1,test_len+1))] for i in range(1,11)])
X_test, y_test = X[idxs], y[idxs]
idxs = [i for i in range(5000) if not i in idxs]
train, test = shuffle_oh(X[idxs], y[idxs]), shuffle_oh(X_test, y_test)
return train[0], train[1], test[0], test[1]
return shuffle_oh(X, y)
return X, onehot(y)
# generate an array of random matrices according to the given sizes
def random_thetas(sizes):
def eps(size):
return np.sqrt(6.0/(size[0]+size[1]+1.0))
return [np.random.uniform(-eps(s), eps(s), s) for s in sizes]
# calculate the cost of the model
def cost(h, y, m, k, lmbda=0, thetas=[]):
cost = -np.mean(np.sum(y*np.log(h), axis=1), axis=0)
if lmbda > 0:
weight_sum = sum(list(map(
lambda t: np.sum(t[:,1:].flatten()**2),
thetas)))
cost += weight_sum * lmbda / (2.0*m)
return cost
""" FORWARD PROP """
# propagate the dataset forward through the network given a set of parameters
def forward_prop(X, thetas):
a = X
a_arr = []
for i, theta in enumerate(thetas):
a = np.hstack((np.ones((len(a), 1)), a))
a_arr.append(a)
z = a.dot(theta.T)
if i == len(thetas)-1:
a = np.exp(z)
denom = np.sum(a, axis=1)
a = a / denom.reshape(len(a), 1)
else:
a = expit(z)
a_arr.append(a)
return a_arr
""" BACKPROP """
# using backpropagation, calculate the gradient with respect to all weights of the model
def back_prop(a_arr, y, m, L, thetas, lmbda=0):
global global_step
global_step += 1
if global_step % 20 == 0:
print 'Global Step: %d' % (global_step)
big_del, lil_del = [], []
k = len(y[0])
for a in a_arr:
lil_del.append(np.zeros((a[0].shape[0], 1)))
for t in thetas:
big_del.append(np.zeros(t.shape))
for i in range(m):
lil_del[-1] = -(y[i] - a_arr[-1][i])
lil_del[-1] = lil_del[-1].reshape((k, 1))
for l in range(2, L+1):
last_delta = lil_del[-l+1]
if l > 2: # if it is a hidden layer then ignore the bias error
last_delta = last_delta[1:,:]
curr_delta = thetas[-l+1].T.dot(last_delta)
a = a_arr[-l][i] # get this layer's activation
s = len(a) # size of the current layer
a = a.reshape((s, 1))
big_del[-l+1] += last_delta.dot(a.T) # increment big delta for the layer accordingly
gprime = np.multiply(a, 1-a)
gprime = gprime.reshape((s, 1))
curr_delta = np.multiply(curr_delta, gprime) # multiply the error by it
lil_del[-l][:] = curr_delta[:]
for i, bd in enumerate(big_del):
big_del[i] = bd/float(m)
if lmbda > 0:
reg_mat = thetas[i][:]
reg_mat[:,0] = 0
big_del[i] += lmbda*reg_mat/float(m)
return big_del
def predict(X, thetas, Ws, bs, return_h=False):
X = sae_util.forward_prop(X, Ws, bs)[0][1]
h = forward_prop(X, thetas)[-1]
pred = np.argmax(h, axis=1)
if return_h:
return (h, pred)
return pred
def thetas_from_flat(theta_flat, sizes):
thetas = []
idx = 0
for size in sizes:
n = size[0]*size[1]
thetas.append(
np.array(theta_flat[idx:idx+n]).reshape(size))
idx += n
return thetas
def check_grad(X, y, Ws, bs, m, n, k, L, sizes, lmbda=0):
print 'Calculating numerical gradient'
thetas = random_thetas(sizes)
theta_flat = np.array([], dtype=float)
for theta in thetas:
theta_flat = np.append(theta_flat, theta.flatten())
X = sae_util.forward_prop(X, Ws, bs)[0][1]
print X.shape
def f(thetas, *args):
thetas = thetas_from_flat(thetas, sizes)
h = forward_prop(X, thetas)[-1]
return cost(h, y, m, k, lmbda=lmbda, thetas=thetas)
def fgrad(thetas, *args):
thetas = thetas_from_flat(thetas, sizes)
a_arr = forward_prop(X, thetas)
dels = back_prop(a_arr, y, m, L, thetas, lmbda=lmbda)
g = np.array([], dtype=float)
for d in dels:
g = np.append(g, d.flatten())
return g
return opt.check_grad(f, fgrad, theta_flat)
def load_test(all_data=False):
return load_data(all_data=all_data, train=False)
def calc_acc(X_test, y_test, thetas):
h = forward_prop(X_test, thetas)[-1]
pred = np.argmax(h, axis=1)
actual = np.argmax(y_test, axis=1)
return np.sum(pred == actual) / float(len(y_test))
def write_cost_and_acc():
global Js, accs
with open('logs/'+prog_name+'/cost_and_acc.csv', 'w') as f:
f.write('Cost, Accuracy\n')
for j, acc in zip(Js, accs):
f.write('%f, %f\n' % (j, acc))
def write_last_theta():
global last_tflat, prog_name
np.savetxt('logs/'+prog_name+'/weights_nn.txt', last_tflat)
# train a network using convergent gradient minimization
def train(X, y, Ws, bs, m, n, k, L, sizes, lmbda=0, max_iter=400, test_set=None, name='nn'):
global global_step, Js, accs, prog_name, last_tflat
prog_name = name
X_test, y_test = test_set
if X_test is None or y_test is None:
X_test, y_test = load_test(all_data=True)
X = sae_util.forward_prop(X, Ws, bs)[0][1]
X_test = sae_util.forward_prop(X_test, Ws, bs)[0][1]
thetas = random_thetas(sizes)
theta_flat = np.array([], dtype=float)
for theta in thetas:
theta_flat = np.append(theta_flat, theta.flatten())
def f(thetas, *args):
global last_tflat, Js, accs, global_step
last_tflat = np.copy(thetas) # store the last theta for if we get booted
thetas = thetas_from_flat(thetas, sizes)
h = forward_prop(X, thetas)[-1]
J = cost(h, y, m, k, lmbda=lmbda, thetas=thetas)
if global_step % 20 == 0:
Js.append(J)
accs.append(calc_acc(X_test, y_test, thetas))
print Js[-1], accs[-1]
write_cost_and_acc()
write_last_theta()
return J
def fgrad(thetas, *args):
global last_tflat, Js, accs
last_tflat = np.copy(thetas) # store the last theta vector in case we have to exit early
thetas = thetas_from_flat(thetas, sizes)
a_arr = forward_prop(X, thetas)
dels = back_prop(a_arr, y, m, L, thetas, lmbda=lmbda)
g = np.array([], dtype=float)
for d in dels:
g = np.append(g, d.flatten())
return g
return opt.minimize(
f, theta_flat, jac=fgrad,
method='CG', tol=1e-5,
options={'disp': True, 'maxiter': max_iter}).x
| [
"[email protected]"
] | |
78bf6e82317ffebd4169455cfddeb3ae20aa7715 | 8b0fdc693d2551b38d9d99fadc848b333add69a5 | /Sesion-04/Ejemplo-03/Bedutravels/Bedutravels/urls.py | a1d9b62d633ad4f342931ebac32211dee0e2cf66 | [] | no_license | mayela/Backend-con-Python-Expert | cb2e7f924f37e6c321ed75fc74aad3cf557ecb66 | 12adb17c0227a48e2252e1c830f0706e9dc36682 | refs/heads/master | 2022-11-12T19:14:22.365643 | 2020-07-04T03:18:29 | 2020-07-04T03:18:29 | 256,653,359 | 0 | 3 | null | 2020-04-18T02:27:47 | 2020-04-18T02:27:46 | null | UTF-8 | Python | false | false | 848 | py | """Bedutravels URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('tours.urls')),
path('perfiles/', include('perfiles.urls')),
]
| [
"[email protected]"
] | |
cd1d00359df9774b800509d1661d19dd46beb71a | 61bccf4117ba351365a2526603024a0e99e2559f | /pseudo/api_translators/python_api_handlers.py | 3c5ed44386fdc67cbeaac8e5745a3471fdeff16d | [
"MIT"
] | permissive | gitter-badger/pseudo | 781363ee0c116ee0d7dfce968b007708a33c8bc4 | 070a2649d47170927184ad19af8c32c7be4f7612 | refs/heads/master | 2021-01-22T15:43:29.890465 | 2016-03-07T03:42:19 | 2016-03-07T03:42:19 | 53,293,990 | 0 | 0 | null | 2016-03-07T03:48:09 | 2016-03-07T03:48:08 | null | UTF-8 | Python | false | false | 2,630 | py | from pseudo.pseudo_tree import Node, call, method_call, local, assignment, to_node
from pseudo.api_handlers import BizarreLeakingNode, NormalLeakingNode
def expand_map(receiver, func):
if func.type == 'lambda':
return Node(
'_py_list_comp',
sequence=receiver)
else:
return call('map', [func, receiver])
def expand_filter(receiver, func):
if func.type == 'lambda':
return Node(
'_py_list_comp')
else:
return call('filter', [func, receiver])
def expand_set_slice(receiver, from_=None, to=None, value=None, pseudo_type=None):
s = expand_slice(receiver, from_, to, pseudo_type)
return assignment(s, value)
def expand_slice(receiver, from_=None, to=None, pseudo_type=None):
if from_:
if to:
if from_.type == 'int' and from_.value == 0:
return Node('_py_slice_to', sequence=receiver, to=to, pseudo_type=pseudo_type)
else:
return Node('_py_slice', sequence=receiver, from_=from_, to=to, pseudo_type=pseudo_type)
else:
return Node('_py_slice_from', sequence=receiver, from_=from_, pseudo_type=pseudo_type)
elif to:
return Node('_py_slice_to', sequence=receiver, to=to, pseudo_type=pseudo_type)
else:
return None
class ReadFile(BizarreLeakingNode):
'''
transforms `io:read_file`
`io:read_file(name)`
to
`with open(name, 'r') as _f:
<target>/_file_contents = f.read()`
'''
def temp_name(self, target):
return '_file_contents'
# assign : as_assignment
# block-level: as_expression
# inside: as_assignment with temp_name as target
def as_expression(self):
return [Node(
'_py_with',
call=call('open', [self.args[0], to_node('r')], 'File'),
context='_f',
block=[method_call(local('_f', 'File'), 'read', [], 'String')],
pseudo_type='Void')], None
def as_assignment(self, target):
expression = self.as_expression()[0][0]
expression.block[0] = assignment(target, expression.block[0])
return [expression]
class WriteFile(NormalLeakingNode):
'''
transforms `io:write_file`
`io:write_file(name, stuff)`
`with open(name, 'w') as _f:
_f.write(stuff)`
'''
def as_expression(self):
return [], Node(
'_py_with',
call=call('open', [self.args[0], to_node('w')], 'File'),
context='_f',
block=[method_call(local('_f', 'File'), 'write', [self.args[1]], 'Void')],
pseudo_type='Void')
| [
"[email protected]"
] | |
2a208e38f3ee3b3d5ad2f906c8d4f09584af8142 | 3d7383bd777c9c49525ac7a0565b21ddea22f480 | /draw/migrations/0003_canvas_in_use.py | 285857b18f75cec4f092b92ad0eb9c1de1bb493c | [] | no_license | eranfuchs1/draw-factory | 34b785f97960adc7f4ddf105c355f83d0c83f7d7 | 064f481a5f42a72d6ca2945b145f688ca819ac39 | refs/heads/main | 2023-08-17T21:24:40.737542 | 2021-10-10T13:53:32 | 2021-10-10T13:53:32 | 411,957,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # Generated by Django 3.2.7 on 2021-09-16 13:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('draw', '0002_auto_20210910_0639'),
]
operations = [
migrations.AddField(
model_name='canvas',
name='in_use',
field=models.BooleanField(null=True),
),
]
| [
"xer@xer-labtop"
] | xer@xer-labtop |
9040a4631960319ec6148982d433cd13b8960be8 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/4975214/snippet.py | 89b2ebfbf651f8646080afc7aa78e741eb01c122 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 1,947 | py | #!/usr/bin/env python3
# Joonas Kuorilehto 2013
# This script is Public Domain.
import csv
import subprocess
import pipes
from datetime import datetime
import smtplib
from email.mime.text import MIMEText
EMAIL = **************
ADB_BINARY = "adt-bundle-linux-x86_64/sdk/platform-tools/adb"
SMS_DB = "/data/data/com.android.providers.telephony/databases/mmssms.db"
def android_sql(sql_query):
cmd = 'sqlite3 -csv -header %s %s' % (SMS_DB, pipes.quote(sql_query))
shell_cmd = 'su -c {}'.format(pipes.quote(cmd))
p = subprocess.Popen([ADB_BINARY, 'shell', shell_cmd],
stdout=subprocess.PIPE, universal_newlines=True)
sqlite_out, sqlite_stderr = p.communicate()
reader = csv.DictReader(sqlite_out.split("\n"))
return reader
def get_unread_messages():
# Format message; get unread SMS messages from Android
result = android_sql("SELECT _id, address, date, body FROM sms WHERE read=0;")
message_ids = []
email_message = ""
for msg in result:
message_ids.append(msg['_id'])
date = datetime.fromtimestamp(int(int(msg['date'])/1000))
m = "{} [{}]\n {}\n\n".format(date, msg['address'], msg['body'])
email_message += m
return (message_ids, email_message)
def send_email(message_content, sender=EMAIL, to=EMAIL,
subject="Received SMS messages", charset="UTF-8"):
# Create a text/plain message
msg = MIMEText(message_content.encode(charset), _charset=charset)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = to
# Send the message via our own SMTP server.
s = smtplib.SMTP('localhost')
s.send_message(msg)
s.quit()
def main():
message_ids, email_message = get_unread_messages()
if len(message_ids) > 0:
send_email(email_message)
read_ids = ",".join(message_ids)
android_sql("UPDATE sms SET read=1 WHERE _id in ({});".format(read_ids))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2c9a8ded7947bcb9ce4a3f05b9e33107d4f561f4 | 297497957c531d81ba286bc91253fbbb78b4d8be | /testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome.py | c04e715a20e5eaa0635c562676c17e8f89f45cf7 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 5,676 | py |
from . import chrome_spki_certs
from .base import WebDriverBrowser, require_arg
from .base import NullBrowser
from .base import get_timeout_multiplier
from .base import cmd_arg
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorwebdriver import WebDriverCrashtestExecutor
from ..executors.base import WdspecExecutor
from ..executors.executorchrome import (
ChromeDriverPrintRefTestExecutor,
ChromeDriverRefTestExecutor,
ChromeDriverTestharnessExecutor,
)
__wptrunner__ = {"product": "chrome",
"check_args": "check_args",
"browser": "ChromeBrowser",
"executor": {"testharness": "ChromeDriverTestharnessExecutor",
"reftest": "ChromeDriverRefTestExecutor",
"print-reftest": "ChromeDriverPrintRefTestExecutor",
"wdspec": "WdspecExecutor",
"crashtest": "WebDriverCrashtestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"update_properties": "update_properties",
"timeout_multiplier": "get_timeout_multiplier",}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(logger, test_type, test_environment, run_info_data,
**kwargs):
sanitizer_enabled = kwargs.get("sanitizer_enabled")
if sanitizer_enabled:
test_type = "crashtest"
executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data,
**kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["sanitizer_enabled"] = sanitizer_enabled
executor_kwargs["reuse_window"] = kwargs.get("reuse_window", False)
capabilities = {
"goog:chromeOptions": {
"prefs": {
"profile": {
"default_content_setting_values": {
"popups": 1
}
}
},
"excludeSwitches": ["enable-automation"],
"w3c": True
}
}
chrome_options = capabilities["goog:chromeOptions"]
if kwargs["binary"] is not None:
chrome_options["binary"] = kwargs["binary"]
chrome_options["args"] = []
chrome_options["args"].append("--ignore-certificate-errors-spki-list=%s" %
','.join(chrome_spki_certs.IGNORE_CERTIFICATE_ERRORS_SPKI_LIST))
chrome_options["args"].append("--autoplay-policy=no-user-gesture-required")
chrome_options["args"].append("--use-fake-device-for-media-stream")
chrome_options["args"].append("--use-fake-ui-for-media-stream")
chrome_options["args"].append("--use-fake-ui-for-fedcm")
chrome_options["args"].append("--short-reporting-delay")
chrome_options["args"].append("--host-resolver-rules=MAP nonexistent.*.test ~NOTFOUND, MAP *.test 127.0.0.1")
chrome_options["args"].append("--enable-features=SecurePaymentConfirmationBrowser")
address_space_overrides_ports = [
("http-private", "private"),
("http-public", "public"),
("https-private", "private"),
("https-public", "public"),
]
address_space_overrides_arg = ",".join(
f"127.0.0.1:{port_number}={address_space}"
for port_name, address_space in address_space_overrides_ports
for port_number in test_environment.config.ports.get(port_name, [])
)
if address_space_overrides_arg:
chrome_options["args"].append(
"--ip-address-space-overrides=" + address_space_overrides_arg)
if kwargs["enable_mojojs"]:
chrome_options["args"].append("--enable-blink-features=MojoJS,MojoJSTest")
if kwargs["enable_swiftshader"]:
chrome_options["args"].extend(["--use-gl=angle", "--use-angle=swiftshader"])
if kwargs["enable_experimental"]:
chrome_options["args"].extend(["--enable-experimental-web-platform-features"])
if kwargs["binary_args"] is not None:
chrome_options["args"].extend(kwargs["binary_args"])
if ((kwargs["headless"] or test_type == "print-reftest") and
"--headless" not in chrome_options["args"]):
chrome_options["args"].append("--headless")
webtranport_h3_port = test_environment.config.ports.get('webtransport-h3')
if webtranport_h3_port is not None:
chrome_options["args"].append(
f"--origin-to-force-quic-on=web-platform.test:{webtranport_h3_port[0]}")
if test_type == "wdspec":
executor_kwargs["binary_args"] = chrome_options["args"]
executor_kwargs["capabilities"] = capabilities
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {"server_host": "127.0.0.1"}
def update_properties():
return (["debug", "os", "processor"], {"os": ["version"], "processor": ["bits"]})
class ChromeBrowser(WebDriverBrowser):
def make_command(self):
return [self.webdriver_binary,
cmd_arg("port", str(self.port)),
cmd_arg("url-base", self.base_path),
cmd_arg("enable-chrome-logs")] + self.webdriver_args
| [
"[email protected]"
] | |
449a53f9deeb2701e3efcf585c996e3261bf2469 | 47243c719bc929eef1475f0f70752667b9455675 | /bungeni.buildout/branches/bungeni.buildout-refactor-2010-06-02/src/bungeni.main/bungeni/plonepas/tests/test_groupmanager.py | e3b5b1c457f18909d4cec8c2e9e29f42507e0cc5 | [] | no_license | malangalanga/bungeni-portal | bbf72ce6d69415b11287a8796b81d4eb6520f03a | 5cf0ba31dfbff8d2c1b4aa8ab6f69c7a0ae9870d | refs/heads/master | 2021-01-19T15:31:42.943315 | 2014-11-18T09:03:00 | 2014-11-18T09:03:00 | 32,453,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,943 | py |
# tests from sqlpasplugin ( GPL v2 )
from bungeni.plonepas.tests import basetestcase
from Products.PlonePAS.plugins.group import PloneGroup
class TestGroupManager( basetestcase.BaseTestCase ):
def afterSetUp( self ):
self.group_name = "ExampleReporters"
self.groupname = self.group_name
self.username = 'joe'
self.password = 'passw0rd'
self.source_groups = self.getPAS().bungeni_groups
self.source_users = self.getPAS().bungeni_users
def testAddGroup(self):
self.source_groups.addGroup( self.group_name )
ret = self.source_groups.enumerateGroups( id=self.group_name, exact_match=True )
self.assertEqual( len(ret), 1 )
self.source_groups.removeGroup( self.group_name )
def testRemoveGroup( self ):
# Is it empty?
ret = self.source_groups.enumerateGroups(id=self.group_name, exact_match=True)
self.assertEqual(len(ret), 0)
# allowRemove
ret = self.source_groups.allowDeletePrincipal(self.group_name)
self.assertEqual(ret, False)
# Add one
self.source_groups.addGroup(self.group_name)
ret = self.source_groups.enumerateGroups(id=self.group_name, exact_match=True)
self.assertEqual(len(ret), 1)
# allowRemove
ret = self.source_groups.allowDeletePrincipal(self.group_name)
self.assertEqual(ret, True)
# Delete one
self.source_groups.removeGroup(self.group_name)
ret = self.source_groups.enumerateGroups(id=self.group_name, exact_match=True)
self.assertEqual(len(ret), 0)
def testMembershipLifecycle(self):
# Create Group
self.source_groups.addGroup(self.groupname)
ret = self.source_groups.enumerateGroups(id=self.groupname, exact_match=True)
self.assertEqual(len(ret), 1)
# Create User
self.source_users.doAddUser(self.username, self.password)
ret = self.source_users.enumerateUsers(id=self.username, exact_match=True)
self.assertEqual(len(ret), 1)
# User should have no memberships
ret = self.source_groups.getGroupsForPrincipal(self.username)
self.assertEqual(len(ret), 0, "Database seems unclean")
# Add the user to the group
self.source_groups.addPrincipalToGroup(self.username, self.groupname)
ret = self.source_groups.getGroupsForPrincipal(self.username)
self.assertEqual(len(ret), 1, "Failed to add user to group")
self.assertEqual(ret[0], self.groupname)
# Remove the user from the group
self.source_groups.removePrincipalFromGroup(self.username,
self.groupname)
ret = self.source_groups.getGroupsForPrincipal(self.username)
self.assertEqual(len(ret), 0, "Failed to remove user from group")
# Cleanup
self.source_users.removeUser(self.username)
self.source_groups.removeGroup(self.groupname)
def testEnumerateGroups(self):
"groupmanager.enumerateGroups()"
ret = self.source_groups.enumerateGroups()
self.assertEqual(len(ret), 0)
count = 10
for x in range(count):
groupname = 'group_%i' % x
self.source_groups.addGroup(groupname)
ret = self.source_groups.enumerateGroups()
self.assertEqual(len(ret), count,
"Number added didn't equal the number in the db.")
ret = self.source_groups.enumerateGroups(id='group_1', exact_match=True)
self.assertEqual(len(ret), 1)
ret = self.source_groups.enumerateGroups(max_results=5)
self.assertEqual(len(ret), 5)
ret = self.source_groups.enumerateGroups(max_results=20)
self.assertEqual(len(ret), count)
for x in range(count):
groupname = 'group_%i' % x
self.source_groups.removeGroup(groupname)
def testEnumerateGroupSearching(self):
ret = self.source_groups.enumerateGroups()
self.assertEqual(len(ret), 0)
count = 10
for x in range(count):
groupname = 'group_%i' % x
self.source_groups.addGroup(groupname)
ret = self.source_groups.enumerateGroups()
self.assertEqual(len(ret), count,
"Number added didn't equal the number in the db.")
# Exact match Multiple Group Test
ret = self.source_groups.enumerateGroups(
id=['group_2','group_3'], exact_match=True)
self.assertEqual(len(ret), 2,
"Failed multi-fetch")
# Fuzzy Match Test
ret = self.source_groups.enumerateGroups(
id=['group_%'])
self.assertEqual(len(ret), 10,
"Failed the fuzzy match on 'id' test")
# Exact Match test
ret = self.source_groups.enumerateGroups(
id=['group_1','group_1'], exact_match=1)
self.assertEqual(len(ret), 1)
ret = self.source_groups.enumerateGroups(max_results=5)
self.assertEqual(len(ret), 5)
ret = self.source_groups.enumerateGroups(max_results=20)
self.assertEqual(len(ret), count)
for x in range(count):
groupname = 'group_%i' % x
self.source_groups.removeGroup(groupname)
def testIGroupIntrospection_getGroupById_getGroups(self):
group = self.source_groups.getGroupById(self.groupname)
self.failUnless(group is None)
# add group
self.source_groups.addGroup(self.groupname)
group = self.source_groups.getGroupById(self.groupname)
self.failIf(group is None)
self.failUnless(isinstance(group, PloneGroup))
# add another group
self.source_groups.addGroup(self.groupname+'1')
groups = self.source_groups.getGroups()
self.assertEqual(len(groups),2)
self.failUnless(isinstance(groups[0], PloneGroup))
self.failUnless(isinstance(groups[1], PloneGroup))
# Cleanup
self.source_groups.removeGroup(self.groupname)
def testIGroupIntrospection_getGroupIds(self):
ret = self.source_groups.getGroupIds()
self.assertEqual(len(ret), 0)
# add one group
self.source_groups.addGroup(self.groupname)
ret = self.source_groups.getGroupIds()
self.assertEqual(len(ret), 1)
# add another group
self.source_groups.addGroup(self.groupname+'1')
ret = self.source_groups.getGroupIds()
self.assertEqual(len(ret), 2)
# add one user
self.source_users.doAddUser(self.username, self.password)
self.source_groups.addPrincipalToGroup(self.username, self.groupname)
ret = self.source_groups.getGroupIds()
self.assertEqual(len(ret), 2)
# Cleanup
self.source_users.removeUser(self.username)
self.source_groups.removeGroup(self.groupname)
self.source_groups.removeGroup(self.groupname+'1')
def testIGroupIntrospection_getGroupMembers(self):
# add group
self.source_groups.addGroup(self.groupname)
ret = self.source_groups.getGroupMembers(self.groupname)
self.assertEqual(len(ret), 0)
# add users
users_number = 3
for i in range(users_number):
username = '%s_%s'%(self.username,i)
self.source_users.doAddUser(username, self.password)
self.source_groups.addPrincipalToGroup(username, self.groupname)
ret = self.source_groups.getGroupMembers(self.groupname)
self.assertEqual(len(ret), 3)
# Cleanup
for i in range(users_number):
username = '%s_%s'%(self.username,i)
self.source_users.removeUser(username)
self.source_groups.removeGroup(self.groupname)
def testIGroupCapability(self):
ret = self.source_groups.allowGroupAdd(self.username, self.groupname)
self.assertEqual(ret, False)
ret = self.source_groups.allowGroupRemove(self.username, self.groupname)
self.assertEqual(ret, False)
# add group
self.source_groups.addGroup(self.groupname)
ret = self.source_groups.allowGroupAdd(self.username, self.groupname)
self.assertEqual(ret, True)
ret = self.source_groups.allowGroupRemove(self.username, self.groupname)
self.assertEqual(ret, False)
# add user
self.source_users.doAddUser(self.username, self.password)
self.source_groups.addPrincipalToGroup(self.username, self.groupname)
ret = self.source_groups.allowGroupRemove(self.username, self.groupname)
self.assertEqual(ret, True)
# Cleanup
self.source_users.removeUser(self.username)
self.source_groups.removeGroup(self.groupname)
def test_suite( ):
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestGroupManager))
return suite
| [
"ashok.hariharan@fc5d704a-7d24-0410-8c4a-57ddeba10ffc"
] | ashok.hariharan@fc5d704a-7d24-0410-8c4a-57ddeba10ffc |
b4b41d922e148e8957787f9ba6595f80af7ff5cd | f83ef53177180ebfeb5a3e230aa29794f52ce1fc | /opencv/opencv-3.4.2/modules/python/test/test_algorithm_rw.py | c925a99e7b378e4a2de119a09516b8791426e911 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | msrLi/portingSources | fe7528b3fd08eed4a1b41383c88ee5c09c2294ef | 57d561730ab27804a3172b33807f2bffbc9e52ae | refs/heads/master | 2021-07-08T01:22:29.604203 | 2019-07-10T13:07:06 | 2019-07-10T13:07:06 | 196,183,165 | 2 | 1 | Apache-2.0 | 2020-10-13T14:30:53 | 2019-07-10T10:16:46 | null | UTF-8 | Python | false | false | 945 | py | #!/usr/bin/env python
"""Algorithm serializaion test."""
import tempfile
import os
import cv2 as cv
from tests_common import NewOpenCVTests
class algorithm_rw_test(NewOpenCVTests):
def test_algorithm_rw(self):
fd, fname = tempfile.mkstemp(prefix="opencv_python_algorithm_", suffix=".yml")
os.close(fd)
# some arbitrary non-default parameters
gold = cv.AKAZE_create(descriptor_size=1, descriptor_channels=2, nOctaves=3, threshold=4.0)
gold.write(cv.FileStorage(fname, cv.FILE_STORAGE_WRITE), "AKAZE")
fs = cv.FileStorage(fname, cv.FILE_STORAGE_READ)
algorithm = cv.AKAZE_create()
algorithm.read(fs.getNode("AKAZE"))
self.assertEqual(algorithm.getDescriptorSize(), 1)
self.assertEqual(algorithm.getDescriptorChannels(), 2)
self.assertEqual(algorithm.getNOctaves(), 3)
self.assertEqual(algorithm.getThreshold(), 4.0)
os.remove(fname)
| [
"[email protected]"
] | |
0372fae3ea1b5eb2527a825a9ca4ed488c4356f6 | dcce56815dca2b18039e392053376636505ce672 | /dumpscripts/urllib_parse_parse_qs.py | ac161429c77b20833f8c030beec791aacc03a8ed | [] | no_license | robertopauletto/PyMOTW-it_3.0 | 28ff05d8aeccd61ade7d4107a971d9d2576fb579 | c725df4a2aa2e799a969e90c64898f08b7eaad7d | refs/heads/master | 2021-01-20T18:51:30.512327 | 2020-01-09T19:30:14 | 2020-01-09T19:30:14 | 63,536,756 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | # urllib_parse_parse_qs.py
from urllib.parse import parse_qs, parse_qsl
encoded = 'foo=foo1&foo=foo2'
print('parse_qs :', parse_qs(encoded))
print('parse_qsl:', parse_qsl(encoded))
| [
"[email protected]"
] | |
51240397ecfcb413842739d74156a0b5203fe35a | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/PostenMapping/Model/Post050403123.py | d2a98778acbe6ecbe359b1c4d49180ede0abb679 | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 3,021 | py | # coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post050403123(StandaardPost):
def __init__(self):
super().__init__(
nummer='0504.03123',
beschrijving='Steenslagfundering met continue korrelverdeling zonder toevoegsels, type I volgens 5-4.3, dikte 23 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw.type',
dotnotation='type',
defaultWaarde='steenslag-met-continue-korrelverdeling-zonder-toevoegsel---type-I',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03123')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotation='laagRol',
defaultWaarde='fundering',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03123')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotation='dikte',
defaultWaarde='23',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03123')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Onderbouw',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotation='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0504.03123')])
| [
"[email protected]"
] | |
b65a91e413453aed3092f09662080605f87d4241 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_170/ch56_2020_04_13_18_33_51_184386.py | 97a6e222fa37eaba3134408fe7f6157aca76afa4 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | import math
def calcula_norma(vetor):
m = math.sqrt((vetor[0]**2)+(vetor[1]**2))
return m | [
"[email protected]"
] | |
eb8d813a61a828c9efb6c8ec52af1b6ef4a5c794 | 91824d746654fe12881b4fc3b55c553aae0d22ac | /py/redundant-connection.py | ab5fad839fdb7996e843187b4f2991bc9032bb7b | [
"Apache-2.0"
] | permissive | ckclark/leetcode | a1a173c67a36a3256b198f853fcd3d15aa5abbb7 | 844c6f18d06dcb397db76436e5f4b8ddcb1beddc | refs/heads/master | 2021-01-15T08:14:43.368516 | 2020-02-14T07:25:05 | 2020-02-14T07:30:10 | 42,386,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | from collections import defaultdict
class Solution(object):
def findRedundantConnection(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
visited = set()
parent = dict()
neighbors = defaultdict(list)
edge_idx = {tuple(sorted(e)): i for i, e in enumerate(edges)}
def cycle_edge(v1, v2):
e = tuple(sorted([v1, v2]))
max_idx, ans = edge_idx[e], e
v = v1
while v != v2:
e = tuple(sorted([v, parent[v]]))
if edge_idx[e] > max_idx:
max_idx = edge_idx[e]
ans = e
v = parent[v]
return list(ans)
def dfs(cur):
visited.add(cur)
for neighbor in neighbors[cur]:
if neighbor != parent[cur]:
if neighbor in visited:
yield cycle_edge(cur, neighbor)
else:
parent[neighbor] = cur
for x in dfs(neighbor):
yield x
for v1, v2 in edges:
neighbors[v1].append(v2)
neighbors[v2].append(v1)
parent[v1] = -1
return next(dfs(v1))
| [
"[email protected]"
] | |
43c894cedddac6fcbb06773d61b19cb93d4bf03c | 2a8c18a9fd129337c043fd2363b48450f0c3185f | /test/gst-msdk/transcode/vc1.py | 7e4155599e586463311c20b1dd91b128b51208b1 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | intel/vaapi-fits | 9641af9d684a7643c18c7f95e9411b807b2a3ae1 | a60be1833b408fee665d0f717a10804ac2fb8ed4 | refs/heads/master | 2023-09-03T01:15:21.981271 | 2023-08-25T18:33:26 | 2023-09-01T14:05:15 | 164,735,279 | 26 | 34 | BSD-3-Clause | 2023-09-14T11:10:49 | 2019-01-08T21:24:18 | Python | UTF-8 | Python | false | false | 515 | py | ##
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ....lib.gstreamer.msdk.util import *
from ....lib.gstreamer.msdk.transcoder import TranscoderTest
spec = load_test_spec("vc1", "transcode")
class default(TranscoderTest):
@slash.parametrize(("case"), sorted_by_resolution(spec))
def test(self, case):
vars(self).update(spec[case].copy())
vars(self).update(
case = case,
codec = "vc1",
)
self.transcode()
| [
"[email protected]"
] | |
7c0227292f7dfe64bf7c5b85a67c03683090503b | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /samples/cli/accelbyte_py_sdk_cli/platform/_create_user_payment_order.py | c36e358769fc38c7aedebc890ffc637ee047d686 | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 2,810 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Platform Service (4.32.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.platform import (
create_user_payment_order as create_user_payment_order_internal,
)
from accelbyte_py_sdk.api.platform.models import ErrorEntity
from accelbyte_py_sdk.api.platform.models import PaymentOrderCreate
from accelbyte_py_sdk.api.platform.models import PaymentOrderInfo
from accelbyte_py_sdk.api.platform.models import ValidationErrorEntity
@click.command()
@click.argument("user_id", type=str)
@click.option("--body", "body", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def create_user_payment_order(
user_id: str,
body: Optional[str] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(create_user_payment_order_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
if body is not None:
try:
body_json = json.loads(body)
body = PaymentOrderCreate.create_from_dict(body_json)
except ValueError as e:
raise Exception(f"Invalid JSON for 'body'. {str(e)}") from e
result, error = create_user_payment_order_internal(
user_id=user_id,
body=body,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"createUserPaymentOrder failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
create_user_payment_order.operation_id = "createUserPaymentOrder"
create_user_payment_order.is_deprecated = False
| [
"[email protected]"
] | |
b08feec8aa0ebf1c8e0ffc71a57504ca08bfc238 | b550eda62179ffd8e49a59df7f8a30163140204f | /backend/openshift/services/data/data/dependencies/xml_templates.py | c3e9ee5b62ab44ae3a4c242f93e504762286d3bc | [
"Apache-2.0"
] | permissive | bgoesswe/openeo-repeatability | 6222fb235b70fda9da998b63fec92c0e5ac07169 | 087b9965e710d16cd6f29cb25e2cb94e443c2b30 | refs/heads/master | 2022-12-11T03:43:35.365574 | 2018-08-07T20:02:02 | 2018-08-07T20:02:02 | 139,158,921 | 0 | 1 | null | 2022-12-08T02:15:15 | 2018-06-29T14:27:34 | Python | UTF-8 | Python | false | false | 2,291 | py | xml_base = (
"<?xml version='1.0' encoding='ISO-8859-1' standalone='no'?>"
"<csw:GetRecords "
"xmlns:csw='http://www.opengis.net/cat/csw/2.0.2' "
"xmlns:ogc='http://www.opengis.net/ogc' "
"service='CSW' "
"version='2.0.2' "
"resultType='results' "
"startPosition='{start_position}' "
"maxRecords='1000' "
"outputFormat='application/json' "
"outputSchema='{output_schema}' "
"xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' "
"xsi:schemaLocation='http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd' "
"xmlns:gml='http://www.opengis.net/gml' "
"xmlns:gmd='http://www.isotc211.org/2005/gmd' "
"xmlns:apiso='http://www.opengis.net/cat/csw/apiso/1.0'>"
"<csw:Query typeNames='csw:Record'>"
"<csw:ElementSetName>full</csw:ElementSetName>"
"<csw:Constraint version='1.1.0'>"
"<ogc:Filter>"
"{children}"
"</ogc:Filter>"
"</csw:Constraint>"
"<ogc:SortBy>"
"<ogc:SortProperty>"
"<ogc:PropertyName>dc:date</ogc:PropertyName>"
"<ogc:SortOrder>ASC</ogc:SortOrder>"
"</ogc:SortProperty>"
"</ogc:SortBy>"
"</csw:Query>"
"</csw:GetRecords>")
xml_and = (
"<ogc:And>"
"{children}"
"</ogc:And>")
xml_series = (
"<ogc:PropertyIsEqualTo>"
"<ogc:PropertyName>apiso:Type</ogc:PropertyName>"
"<ogc:Literal>series</ogc:Literal>"
"</ogc:PropertyIsEqualTo>")
xml_product = (
"<ogc:PropertyIsEqualTo>"
"<ogc:PropertyName>{property}</ogc:PropertyName>"
"<ogc:Literal>{product}</ogc:Literal>"
"</ogc:PropertyIsEqualTo>")
xml_begin = (
"<ogc:PropertyIsGreaterThanOrEqualTo>"
"<ogc:PropertyName>apiso:TempExtent_begin</ogc:PropertyName>"
"<ogc:Literal>{start}</ogc:Literal>"
"</ogc:PropertyIsGreaterThanOrEqualTo>")
xml_end = (
"<ogc:PropertyIsLessThanOrEqualTo>"
"<ogc:PropertyName>apiso:TempExtent_end</ogc:PropertyName>"
"<ogc:Literal>{end}</ogc:Literal>"
"</ogc:PropertyIsLessThanOrEqualTo>")
xml_bbox = (
"<ogc:BBOX>"
"<ogc:PropertyName>ows:BoundingBox</ogc:PropertyName>"
"<gml:Envelope>"
"<gml:lowerCorner>{bbox.x1} {bbox.y1}</gml:lowerCorner>"
"<gml:upperCorner>{bbox.x2} {bbox.y2}</gml:upperCorner>"
"</gml:Envelope>"
"</ogc:BBOX>")
| [
"[email protected]"
] | |
8630e43fdef88303ac3890a023c2a9d9ba234303 | f062af64ce156719203b79de9c2502b265af27de | /tensorflow_datasets/image_classification/imagenet2012_fewshot.py | 8ceb0f003db906d5f9ee337a88de732cb52626d6 | [
"Apache-2.0"
] | permissive | tensorflow/datasets | d0c58f3db7ce06347671558b9e5a41e12e6913ce | 41ae3cf1439711ed2f50f99caa0e6702082e6d37 | refs/heads/master | 2023-08-31T03:23:16.581638 | 2023-08-30T17:25:34 | 2023-08-30T17:29:38 | 148,221,325 | 4,224 | 1,738 | Apache-2.0 | 2023-09-14T14:04:22 | 2018-09-10T21:27:22 | Python | UTF-8 | Python | false | false | 938 | py | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset definition for imagenet2012_fewshot.
DEPRECATED!
If you want to use the Imagenet2012Fewshot dataset builder class, use:
tfds.builder_cls('imagenet2012_fewshot')
"""
from tensorflow_datasets.core import lazy_builder_import
Imagenet2012Fewshot = lazy_builder_import.LazyBuilderImport(
'imagenet2012_fewshot'
)
| [
"[email protected]"
] | |
c6f1362845b8706526e3c4be24be5c5c66831c22 | 2a0865c583a12c66fdd1e7a62535b3e35482d37b | /CarAI/joshua_work/old/tutorial/Code/04_code/Lights/src/Application.py | cd596a8094bb301022e4217d73fa06d9531e57e5 | [] | no_license | MyAusweis/UnrealAI | fe4a6df2859143cd4ca66a063016fc4d22d62bb7 | 9e5ad6b93df7ecf2293de10d41f09969c42404b3 | refs/heads/master | 2022-02-11T12:43:52.129313 | 2018-07-01T22:08:23 | 2018-07-01T22:08:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | from direct.showbase.ShowBase import ShowBase
from direct.actor.Actor import Actor
from panda3d.core import *
class Application(ShowBase):
def __init__(self):
ShowBase.__init__(self)
self.panda = Actor("panda", {"walk": "panda-walk"})
self.panda.reparentTo(render)
self.panda.loop("walk")
cm = CardMaker("plane")
cm.setFrame(-10, 10, -10, 10)
plane = render.attachNewNode(cm.generate())
plane.setP(270)
self.cam.setPos(0, -40, 6)
ambLight = AmbientLight("ambient")
ambLight.setColor(Vec4(0.2, 0.1, 0.1, 1.0))
ambNode = render.attachNewNode(ambLight)
render.setLight(ambNode)
dirLight = DirectionalLight("directional")
dirLight.setColor(Vec4(0.1, 0.4, 0.1, 1.0))
dirNode = render.attachNewNode(dirLight)
dirNode.setHpr(60, 0, 90)
render.setLight(dirNode)
pntLight = PointLight("point")
pntLight.setColor(Vec4(0.8, 0.8, 0.8, 1.0))
pntNode = render.attachNewNode(pntLight)
pntNode.setPos(0, 0, 15)
self.panda.setLight(pntNode)
sptLight = Spotlight("spot")
sptLens = PerspectiveLens()
sptLight.setLens(sptLens)
sptLight.setColor(Vec4(1.0, 0.0, 0.0, 1.0))
sptLight.setShadowCaster(True)
sptNode = render.attachNewNode(sptLight)
sptNode.setPos(-10, -10, 20)
sptNode.lookAt(self.panda)
render.setLight(sptNode)
render.setShaderAuto()
| [
"[email protected]"
] | |
c3d153ca37a314d7c408e5e91912577ac96dcadd | 1c69aaf4ff5c9bbabd4e4e3486e3f442808f96ea | /models/r4/datarequirement.py | 937eab9daf208c74b3db857de895f03fdab3d00f | [] | no_license | glow-mdsol/devday-boston-clinical-research | 72565289b27e9d6105640ec14749e07d7bc14014 | 560a8141bc3bd1ae5a31b110e82863e25b4ce9f8 | refs/heads/master | 2020-03-20T00:48:32.862642 | 2018-06-20T15:33:29 | 2018-06-20T15:33:29 | 137,056,522 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,253 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.3.0 (http://hl7.org/fhir/StructureDefinition/DataRequirement) on 2018-05-12.
# 2018, SMART Health IT.
from . import element
class DataRequirement(element.Element):
""" Describes a required data item.
Describes a required data item for evaluation in terms of the type of data,
and optional code or date-based filters of the data.
"""
resource_type = "DataRequirement"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.codeFilter = None
""" What codes are expected.
List of `DataRequirementCodeFilter` items (represented as `dict` in JSON). """
self.dateFilter = None
""" What dates/date ranges are expected.
List of `DataRequirementDateFilter` items (represented as `dict` in JSON). """
self.limit = None
""" Number of results.
Type `int`. """
self.mustSupport = None
""" Indicates that specific structure elements are referenced by the
knowledge module.
List of `str` items. """
self.profile = None
""" The profile of the required data.
List of `str` items. """
self.sort = None
""" Order of the results.
List of `DataRequirementSort` items (represented as `dict` in JSON). """
self.subjectCodeableConcept = None
""" E.g. Patient, Practitioner, RelatedPerson, Organization, Location,
Device.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.subjectReference = None
""" E.g. Patient, Practitioner, RelatedPerson, Organization, Location,
Device.
Type `FHIRReference` (represented as `dict` in JSON). """
self.type = None
""" The type of the required data.
Type `str`. """
super(DataRequirement, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DataRequirement, self).elementProperties()
js.extend([
("codeFilter", "codeFilter", DataRequirementCodeFilter, True, None, False),
("dateFilter", "dateFilter", DataRequirementDateFilter, True, None, False),
("limit", "limit", int, False, None, False),
("mustSupport", "mustSupport", str, True, None, False),
("profile", "profile", str, True, None, False),
("sort", "sort", DataRequirementSort, True, None, False),
("subjectCodeableConcept", "subjectCodeableConcept", codeableconcept.CodeableConcept, False, "subject", False),
("subjectReference", "subjectReference", fhirreference.FHIRReference, False, "subject", False),
("type", "type", str, False, None, True),
])
return js
class DataRequirementCodeFilter(element.Element):
""" What codes are expected.
Code filters specify additional constraints on the data, specifying the
value set of interest for a particular element of the data. Each code
filter defines an additional constraint on the data, i.e. code filters are
AND'ed, not OR'ed.
"""
resource_type = "DataRequirementCodeFilter"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" What code is expected.
List of `Coding` items (represented as `dict` in JSON). """
self.path = None
""" The code-valued attribute of the filter.
Type `str`. """
self.valueSetCanonical = None
""" Valueset for the filter.
Type `str`. """
self.valueSetUri = None
""" Valueset for the filter.
Type `str`. """
super(DataRequirementCodeFilter, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DataRequirementCodeFilter, self).elementProperties()
js.extend([
("code", "code", coding.Coding, True, None, False),
("path", "path", str, False, None, True),
("valueSetCanonical", "valueSetCanonical", str, False, "valueSet", False),
("valueSetUri", "valueSetUri", str, False, "valueSet", False),
])
return js
class DataRequirementDateFilter(element.Element):
""" What dates/date ranges are expected.
Date filters specify additional constraints on the data in terms of the
applicable date range for specific elements. Each date filter specifies an
additional constraint on the data, i.e. date filters are AND'ed, not OR'ed.
"""
resource_type = "DataRequirementDateFilter"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.path = None
""" The date-valued attribute of the filter.
Type `str`. """
self.valueDateTime = None
""" The value of the filter, as a Period, DateTime, or Duration value.
Type `FHIRDate` (represented as `str` in JSON). """
self.valueDuration = None
""" The value of the filter, as a Period, DateTime, or Duration value.
Type `Duration` (represented as `dict` in JSON). """
self.valuePeriod = None
""" The value of the filter, as a Period, DateTime, or Duration value.
Type `Period` (represented as `dict` in JSON). """
super(DataRequirementDateFilter, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DataRequirementDateFilter, self).elementProperties()
js.extend([
("path", "path", str, False, None, True),
("valueDateTime", "valueDateTime", fhirdate.FHIRDate, False, "value", False),
("valueDuration", "valueDuration", duration.Duration, False, "value", False),
("valuePeriod", "valuePeriod", period.Period, False, "value", False),
])
return js
class DataRequirementSort(element.Element):
""" Order of the results.
Specifies the order of the results to be returned.
"""
resource_type = "DataRequirementSort"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.direction = None
""" ascending | descending.
Type `str`. """
self.path = None
""" The name of the attribute to perform the sort.
Type `str`. """
super(DataRequirementSort, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DataRequirementSort, self).elementProperties()
js.extend([
("direction", "direction", str, False, None, True),
("path", "path", str, False, None, True),
])
return js
from . import codeableconcept
from . import coding
from . import duration
from . import fhirdate
from . import fhirreference
from . import period
| [
"[email protected]"
] | |
79dbda9c85e5d684dfa0a8d30200484398c05def | c83473c2f9b63429f40e8a4806ab49305815c81d | /feature_cross_script/feature_cross_run.py | ba39e49faa933a1e1c10cfa9a816e7e26ae97afd | [] | no_license | pelinbalci/machinelearning | f8f84cda07a2ae87f23598188a6c148badb6e15f | 33e9786ea49f114c24c02dbf24e33434d0421f65 | refs/heads/master | 2022-11-15T19:55:46.633659 | 2020-07-05T18:38:54 | 2020-07-05T18:38:54 | 273,779,533 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | from feature_cross_script.feature_cross import import_dataset, scale_label, shuffle_df, create_model, train_model, \
plot_the_loss_curve, create_feature_columns, create_feature_layer, create_bucket_features, create_crossed_feature,\
evaluate_model
from parameters import learning_rate, epochs, batch_size, label_name, resolution_in_degrees
test_path = '/Users/pelin.balci/PycharmProjects/machinelearning/inputs/california_housing_test.csv'
train_path = '/Users/pelin.balci/PycharmProjects/machinelearning/inputs/california_housing_train.csv'
train_df = import_dataset(train_path)
test_df = import_dataset(test_path)
train_df = scale_label(train_df)
test_df = scale_label(test_df)
shuffled_train_df = shuffle_df(train_df)
# Use floating latitude ang longitude vectors seperately:
feature_columns = create_feature_columns()
fp_feature_layer = create_feature_layer(feature_columns)
# Bucketize them intp 10 integer points, we still have two separate vectors:
feature_columns, buckets_feature_layer = create_bucket_features(train_df, resolution_in_degrees)
''' Bucket representation outperformed floating-point representations. '''
# In real life we have two dimension vectors for latitude and longitude, cross them:
feature_cross_feature_layer = create_crossed_feature(feature_columns)
''' Representing these features as a feature cross produced much lower loss values than
representing these features as buckets'''
# Create and compile the model's topography.
my_model = create_model(learning_rate, feature_cross_feature_layer)
# Train the model on the training set.
epochs, rmse = train_model(my_model, train_df, epochs, batch_size, label_name)
plot_the_loss_curve(epochs, rmse)
evaluation_test_result = evaluate_model(my_model, test_df, label_name, batch_size)
print('done')
| [
"[email protected]"
] | |
22c64eb6e9eab3d47ae40216f2afd52aa7f58a5a | 1000884faf988644d9abe02525c885307fd36f98 | /day17/about_md/about_md/settings.py | d507ca2e1f90a0399406b7229861aae74f267488 | [
"MIT"
] | permissive | gengna92/PythonProjects | e6f88eb36f636420fbede8e204490c0b3e4c24fc | 12d223eb1ec8f90992ea87df79f10ea8c745c4cb | refs/heads/master | 2021-08-27T20:48:26.675942 | 2021-08-14T06:59:34 | 2021-08-14T06:59:34 | 166,172,714 | 0 | 0 | MIT | 2021-08-14T07:00:08 | 2019-01-17T06:28:52 | HTML | UTF-8 | Python | false | false | 3,263 | py | """
Django settings for about_md project.
Generated by 'django-admin startproject' using Django 1.11.20.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o)btb@#(-2p*my0wc^qa#zj5uj)tf0$6-ox323m)*t3=(5+2ne'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app01.apps.App01Config',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'app01.my_middleware.middlewares.MD1',
'app01.my_middleware.middlewares.MD2',
]
ROOT_URLCONF = 'about_md.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'about_md.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
f8eccd51f10bf0fadf99e4d474c60d6a0231a31e | a782e8b77eb9a32ffb2c3f417125553693eaee86 | /scripts/fuzzing/check_fuzzer.py | bcb62977c4837b69f26a61f593dc9e38d7478b1d | [
"BSD-3-Clause"
] | permissive | xyuan/fuchsia | 9e5251517e88447d3e4df12cf530d2c3068af290 | db9b631cda844d7f1a1b18cefed832a66f46d56c | refs/heads/master | 2022-06-30T17:53:09.241350 | 2020-05-13T12:28:17 | 2020-05-13T12:28:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,737 | py | #!/usr/bin/env python2.7
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import sys
from lib.args import ArgParser
from lib.device import Device
from lib.fuzzer import Fuzzer
from lib.host import Host
def main():
parser = ArgParser(
'Reports status for the fuzzer matching NAME if ' +
'provided, or for all running fuzzers. Status includes execution ' +
'state, corpus size, and number of artifacts.')
parser.require_name(False)
args = parser.parse_args()
host = Host.from_build()
device = Device.from_args(host, args)
fuzzers = Fuzzer.filter(host.fuzzers, args.name)
pids = device.getpids()
silent = True
for pkg, tgt in fuzzers:
fuzzer = Fuzzer(device, pkg, tgt)
if not args.name and tgt not in pids:
continue
silent = False
if tgt in pids:
print(str(fuzzer) + ': RUNNING')
else:
print(str(fuzzer) + ': STOPPED')
print(' Output path: ' + fuzzer.data_path())
print(
' Corpus size: %d inputs / %d bytes' % fuzzer.measure_corpus())
artifacts = fuzzer.list_artifacts()
if len(artifacts) == 0:
print(' Artifacts: None')
else:
print(' Artifacts: ' + artifacts[0])
for artifact in artifacts[1:]:
print(' ' + artifact)
if silent:
print(
'No fuzzers are running. Include \'name\' to check specific ' +
'fuzzers.')
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
fb5988d10e71565e8e3ad8c771429b17c9cbe261 | 16fa36449c5b7843923ab40fe37e0563b0f811cf | /r05.py | fe6bda675e4ac8d9443bddda2d5ecdb8a5937750 | [
"Apache-2.0"
] | permissive | netlabcode/reader | 1d34787c3d619af4d28c08f989e8c0976f18773a | 9d42bef6ccb35266abec87db5a2df9bc9d77c355 | refs/heads/main | 2023-04-17T08:45:15.397981 | 2021-05-06T05:09:10 | 2021-05-06T05:09:10 | 354,858,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,139 | py | import psycopg2
from datetime import datetime
import binascii
import _thread
import time
import socket
PORT1 = 8805
def serverX():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s1:
s1.bind(('',PORT1))
s1.listen()
conn1, addr = s1.accept()
with conn1:
print('Server Substation 5 from:',addr)
conn = psycopg2.connect(host="131.180.165.7",database="CRoF",user="postgres", password="crpg")
cursor = conn.cursor()
#Value id 34-41
cursor.execute('''SELECT value from objects WHERE id=34''')
result = cursor.fetchone()
record1 = result[0]
cursor.execute('''SELECT value from objects WHERE id=35''')
result = cursor.fetchone()
record2 = result[0]
cursor.execute('''SELECT value from objects WHERE id=36''')
result = cursor.fetchone()
record3 = result[0]
cursor.execute('''SELECT value from objects WHERE id=37''')
result = cursor.fetchone()
record4 = result[0]
cursor.execute('''SELECT value from objects WHERE id=38''')
result = cursor.fetchone()
record5 = result[0]
cursor.execute('''SELECT value from objects WHERE id=39''')
result = cursor.fetchone()
record6 = result[0]
cursor.execute('''SELECT value from objects WHERE id=40''')
result = cursor.fetchone()
record7 = result[0]
cursor.execute('''SELECT value from objects WHERE id=41''')
result = cursor.fetchone()
record8 = result[0]
#Value code
cursor.execute('''SELECT code from objects WHERE id=34''')
result = cursor.fetchone()
r1 = result[0]
cursor.execute('''SELECT code from objects WHERE id=35''')
result = cursor.fetchone()
r2 = result[0]
cursor.execute('''SELECT code from objects WHERE id=36''')
result = cursor.fetchone()
r3 = result[0]
cursor.execute('''SELECT code from objects WHERE id=37''')
result = cursor.fetchone()
r4 = result[0]
cursor.execute('''SELECT code from objects WHERE id=38''')
result = cursor.fetchone()
r5 = result[0]
cursor.execute('''SELECT code from objects WHERE id=39''')
result = cursor.fetchone()
r6 = result[0]
cursor.execute('''SELECT code from objects WHERE id=40''')
result = cursor.fetchone()
r7 = result[0]
cursor.execute('''SELECT code from objects WHERE id=41''')
result = cursor.fetchone()
r8 = result[0]
while True:
data = "a"
dataxy = data.encode()
try:
#Format: mu01_id+value
cursor.execute('''SELECT value from objects WHERE id=34''')
result = cursor.fetchone()
if record1 != result[0]:
print(result[0])
string = "mu01_"+str(r1)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record1 = result[0]
cursor.execute('''SELECT value from objects WHERE id=35''')
result = cursor.fetchone()
if record2 != result[0]:
print(result[0])
string = "mu01_"+str(r2)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record2 = result[0]
cursor.execute('''SELECT value from objects WHERE id=36''')
result = cursor.fetchone()
if record3 != result[0]:
print(result[0])
string = "mu02_"+str(r3)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record3 = result[0]
cursor.execute('''SELECT value from objects WHERE id=37''')
result = cursor.fetchone()
if record4 != result[0]:
print(result[0])
string = "mu02_"+str(r4)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record4 = result[0]
cursor.execute('''SELECT value from objects WHERE id=38''')
result = cursor.fetchone()
if record5 != result[0]:
print(result[0])
string = "mu02_"+str(r5)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record5 = result[0]
cursor.execute('''SELECT value from objects WHERE id=39''')
result = cursor.fetchone()
if record6 != result[0]:
print(result[0])
string = "mu05_"+str(r6)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record6 = result[0]
cursor.execute('''SELECT value from objects WHERE id=40''')
result = cursor.fetchone()
if record7 != result[0]:
print(result[0])
string = "mu03_"+str(r7)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record7 = result[0]
cursor.execute('''SELECT value from objects WHERE id=41''')
result = cursor.fetchone()
if record8 != result[0]:
print(result[0])
string = "mu03_"+str(r8)+"+"+str(result[0])
datax = string.encode()
conn1.sendall(datax)
print(string)
record8 = result[0]
conn1.sendall(dataxy)
#print(record1)
time.sleep(1)
except:
conn1.close()
conn.close()
print("Connection Close Substation 5")
break
conn1.close()
print("Restart Server Substation 6")
conn.close()
s1.close()
time.sleep(1)
serverX()
serverX()
| [
"[email protected]"
] | |
9953913a4f3c4ad0d5c3c64721526baac7fcac32 | 0f0530b06a59fe6cfaa74b0030d892256e33c666 | /aioworkers/net/web/client.py | 2734c1f7df6ac6cc16ae2630104baf89ad73f93e | [
"Apache-2.0"
] | permissive | nicoddemus/aioworkers | c269b470be5be0c2a59fb2a91d3a285f54125735 | 4ab85064844dc28141833d1348989d8c891f3d7d | refs/heads/master | 2022-12-06T21:22:29.976784 | 2020-08-22T11:42:53 | 2020-08-22T11:43:24 | 291,555,920 | 0 | 0 | Apache-2.0 | 2020-08-30T21:37:29 | 2020-08-30T21:37:28 | null | UTF-8 | Python | false | false | 3,021 | py | import logging
import urllib.error
import urllib.request
from http.client import HTTPResponse
from typing import Any, Awaitable, Callable, Mapping, Optional, Tuple, Union
from aioworkers.core.base import ExecutorEntity
from aioworkers.http import URL
logger = logging.getLogger(__name__)
class Request:
def __init__(self, session: 'Session', *args, **kwargs):
self._session = session
self._request = urllib.request.Request(*args, **kwargs)
self._response = None # type: Optional[Response]
async def __aenter__(self) -> 'Response':
logger.info('Request %r', self._request)
try:
response = await self._session.run(
self._session.opener.open, self._request,
)
except urllib.error.HTTPError as e:
response = e
logger.info('Response %r', response)
self._response = Response(response, self._session)
return self._response
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self._response.close()
class Response:
def __init__(
self, response: HTTPResponse,
session: 'Session',
):
self._response = response
self._session = session
@property
def status(self):
return self._response.status
@property
def reason(self):
return self._response.reason
@property
def headers(self):
return self._response.headers
async def read(self) -> bytes:
return await self._session.run(self._response.read)
def isclosed(self):
return self._response.isclosed()
async def close(self):
return await self._session.run(self._response.close)
class Session:
def __init__(
self,
runner: Callable[..., Awaitable[Any]],
headers: Mapping = None,
conn_timeout: float = 60,
read_timeout: float = 60,
handlers: Optional[Tuple[urllib.request.BaseHandler]] = None,
):
self.run = runner
self._headers = headers
self._conn_timeout = conn_timeout
self._read_timeout = read_timeout
if handlers is None:
handlers = (
urllib.request.HTTPCookieProcessor(),
)
self.opener = urllib.request.build_opener(*handlers)
if headers:
if isinstance(headers, Mapping):
self.opener.addheaders = list(headers.items())
else:
self.opener.addheaders = list(headers)
@classmethod
def from_entity(cls, entity: ExecutorEntity, **kwargs) -> 'Session':
kwargs.update(
runner=entity.run_in_executor,
)
return cls(**kwargs)
def request(self, url: Union[str, URL], method='get', **kwargs) -> Request:
if isinstance(url, URL):
url = str(url)
kwargs.update(
url=url,
method=method.upper(),
)
return Request(self, **kwargs)
async def close(self):
self.opener.close()
| [
"[email protected]"
] | |
a87f5d3a4d2ddee0778d8d514182dbd29955b027 | eb33ebb7c144cce5a6a1b039a2cb5dd331bd7863 | /asl/datasets/__init__.py | dcf753cb829208644b2265248b2084a642addeb7 | [] | no_license | zenna/asl | aeaa2c3cb0f204cf42e03528765d660bd3799ea8 | 6753eb29f3b1826c40df2eae1dff0dad34629423 | refs/heads/master | 2021-09-10T12:11:22.304508 | 2018-02-08T16:02:10 | 2018-02-08T16:02:10 | 59,302,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | from .omniglot import * | [
"[email protected]"
] | |
c38a71aa6745843566b5e5e68584da67b3d9b12f | 1ba8794a7e38c19fda1cf3a02a4e55004a4f99ec | /pandas/tests/scalar/test_na_scalar.py | e68e49814245f91df964719c373d4f4943563f87 | [
"BSD-3-Clause"
] | permissive | souravs17031999/pandas | 51003c705c12f2f78d6c264c4333aabf5da7d312 | 8cdbebd6be92d317e0d21679c5891a10dbf8efe7 | refs/heads/master | 2020-09-24T13:46:40.314995 | 2019-12-04T05:40:28 | 2019-12-04T05:40:28 | 225,654,387 | 1 | 0 | BSD-3-Clause | 2019-12-04T03:13:02 | 2019-12-03T15:33:22 | Python | UTF-8 | Python | false | false | 2,751 | py | import numpy as np
import pytest
from pandas._libs.missing import NA
from pandas.core.dtypes.common import is_scalar
import pandas as pd
import pandas.util.testing as tm
def test_singleton():
assert NA is NA
new_NA = type(NA)()
assert new_NA is NA
def test_repr():
assert repr(NA) == "NA"
assert str(NA) == "NA"
def test_truthiness():
with pytest.raises(TypeError):
bool(NA)
with pytest.raises(TypeError):
not NA
def test_hashable():
assert hash(NA) == hash(NA)
d = {NA: "test"}
assert d[NA] == "test"
def test_arithmetic_ops(all_arithmetic_functions):
op = all_arithmetic_functions
for other in [NA, 1, 1.0, "a", np.int64(1), np.nan]:
if op.__name__ == "rmod" and isinstance(other, str):
continue
if op.__name__ in ("divmod", "rdivmod"):
assert op(NA, other) is (NA, NA)
else:
assert op(NA, other) is NA
def test_comparison_ops():
for other in [NA, 1, 1.0, "a", np.int64(1), np.nan]:
assert (NA == other) is NA
assert (NA != other) is NA
assert (NA > other) is NA
assert (NA >= other) is NA
assert (NA < other) is NA
assert (NA <= other) is NA
if isinstance(other, np.int64):
# for numpy scalars we get a deprecation warning and False as result
# for equality or error for larger/lesser than
continue
assert (other == NA) is NA
assert (other != NA) is NA
assert (other > NA) is NA
assert (other >= NA) is NA
assert (other < NA) is NA
assert (other <= NA) is NA
def test_unary_ops():
assert +NA is NA
assert -NA is NA
assert abs(NA) is NA
assert ~NA is NA
def test_logical_and():
assert NA & True is NA
assert True & NA is NA
assert NA & False is False
assert False & NA is False
assert NA & NA is NA
with pytest.raises(TypeError):
NA & 5
def test_logical_or():
assert NA | True is True
assert True | NA is True
assert NA | False is NA
assert False | NA is NA
assert NA | NA is NA
with pytest.raises(TypeError):
NA | 5
def test_logical_xor():
assert NA ^ True is NA
assert True ^ NA is NA
assert NA ^ False is NA
assert False ^ NA is NA
assert NA ^ NA is NA
with pytest.raises(TypeError):
NA ^ 5
def test_logical_not():
assert ~NA is NA
def test_is_scalar():
assert is_scalar(NA) is True
def test_isna():
assert pd.isna(NA) is True
assert pd.notna(NA) is False
def test_series_isna():
s = pd.Series([1, NA], dtype=object)
expected = pd.Series([False, True])
tm.assert_series_equal(s.isna(), expected)
| [
"[email protected]"
] | |
d933617badf4c3b39e324ad69a7ee50c2c10378b | 5496b9682dec06925f3572e64d7f1eb48d78ebe1 | /src/advection_scenarios/create_grid_spacing.py | ab9a14e171f5c9db216a14e05f9a775bf97de885 | [] | no_license | VictorOnink/Lagrangian-Transport-Scenarios | 64bec8b992e2909a05b0258524dbae25f967ea29 | 586bcecc42d6a7f4f299507da8f0cb29c8d71a2e | refs/heads/master | 2023-04-14T12:22:29.309172 | 2022-07-11T18:46:38 | 2022-07-11T18:46:38 | 297,894,637 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,595 | py | import settings
import utils
import numpy as np
import xarray
import progressbar
def create_grid_spacing(output_name: str, grid: np.array, lon: np.array, lat: np.array):
grid_lon_spacing = np.zeros(grid.shape)
grid_lat_spacing = np.zeros(grid.shape)
for lat_step in progressbar.progressbar(range(grid.shape[0] - 1)):
for lon_step in range(grid.shape[1] - 1):
grid_lon_spacing[lat_step, lon_step] = np.abs(lon[lon_step] - lon[lon_step + 1])
grid_lat_spacing[lat_step, lon_step] = np.abs(lat[lat_step] - lat[lat_step + 1])
grid_lon_spacing = fill_last(grid_lon_spacing)
grid_lat_spacing = fill_last(grid_lat_spacing)
# Saving the entire grid spacing fields
coords = [('time', np.array([0])), ('lat', lat), ('lon', lon)]
lon_space = xarray.DataArray(grid_lon_spacing[np.newaxis, :, :], coords=coords)
lat_space = xarray.DataArray(grid_lat_spacing[np.newaxis, :, :], coords=coords)
dcoo = {'time': np.array([0]), 'lat': lat, 'lon': lon}
dset = xarray.Dataset({'lon_spacing': lon_space, 'lat_spacing': lat_space}, coords=dcoo)
dset.to_netcdf(output_name)
# Checks to see if the grid spacing calculation works as expected
utils.print_statement("The maximum lon spacing is {}, and the minimum is {}".format(grid_lon_spacing.max(), grid_lon_spacing.min()))
utils.print_statement("The maximum lat spacing is {}, and the minimum is {}".format(grid_lat_spacing.max(), grid_lat_spacing.min()))
def fill_last(array: np.array):
array[:, -1] = array[:, -2]
array[-1, :] = array[-2, :]
return array
| [
"[email protected]"
] | |
8475480405ab57c758c966b738b679c6d1f6aef0 | b9e99a828952ffeab9767e625c0061cb3ea5b670 | /Python编程从入门到实践/data_visual/csv_test/highs_lows.py | 2d91031ecfd543481f909f68a6c6648d8d98b679 | [] | no_license | ZGA101421/Python3_Project | 95d95e23858ef92f6825f018605089c105303ad3 | fa30f876fd13890743bc81d1521534c340575132 | refs/heads/master | 2022-04-03T07:03:46.369710 | 2019-12-30T15:22:21 | 2019-12-30T15:22:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,117 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : highs_lows.py
@Time : 2019/04/06 22:32:59
@Author : leacoder
@Version : 1.0
@Contact : [email protected]
@License :
@Desc : 处理CSV文件数据,matplotlib绘制最高温度最低温度折线图
'''
# here put the import lib
# c sv 模块包含在Python标准库中
import csv
from matplotlib import pyplot as plt
# 模块datetime 处理日期
from datetime import datetime
# 从文件中获取日期、 最高气温和最低气温
filename = 'death_valley_2014.csv'
with open(filename) as f:
# 创建一个与该文件相关联的阅读器(reader ) 对象
reader = csv.reader(f)
# 模块csv 包含函数next() , 调用它并将阅读器对象传递给它时, 它将返回文件中的下一行。 在前面的代码中, 我们只调用了next() 一次, 因此得到的是文件的第一行, 其中包含文件头
header_row = next(reader)
dates, highs, lows = [], [], []
for row in reader: # 遍历文件中余下的各行
try: # 错误检查
current_date = datetime.strptime(row[0], "%Y-%m-%d") # '2014-7-1
high = int(row[1])
low = int(row[3])
except ValueError:
print(current_date, 'missing data')
else:
dates.append(current_date)
highs.append(high)
lows.append(low)
# 根据数据绘制图形
fig = plt.figure(dpi=123, figsize=(10, 6))
'''
plot(*args[, scalex, scaley, data])
Plot y versus x as lines and/or markers.
alpha: float Set the alpha value used for blending - not supported on all backends.
'''
plt.plot(dates, highs, c='red', alpha=0.5) # 绘制最高温度
plt.plot(dates, lows, c='blue', alpha=0.5) # 绘制最低温度
'''
fill_between(x, y1[, y2, where, ...])
Fill the area between two horizontal curves.
'''
plt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)
# 设置图形的格式
plt.title("Daily high temperatures - 2014", fontsize=24)
plt.xlabel('', fontsize=16)
'''
autofmt_xdate(self, bottom=0.2, rotation=30, ha='right', which=None)
Date ticklabels often overlap, so it is useful to rotate them and right align them.
bottom : scalar
The bottom of the subplots for subplots_adjust().
rotation : angle in degrees
The rotation of the xtick labels.
ha : string
The horizontal alignment of the xticklabels.
which : {None, 'major', 'minor', 'both'}
Selects which ticklabels to rotate. Default is None which works the same as major.
'''
fig.autofmt_xdate()
title = "Daily high and low temperatures - 2014\nDeath Valley, CA"
plt.title(title, fontsize=24)
'''
tick_params([axis])
Change the appearance of ticks, tick labels, and gridlines. 更改刻度,刻度标签和网格线的外观
axis : {'x', 'y', 'both'}, optional
Which axis to apply the parameters to.
which : {'major', 'minor', 'both'}
Default is 'major'; apply arguments to which ticks.
'''
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
| [
"[email protected]"
] | |
504f5eee881638bdd210fa2544365df828b26b6e | ca2b69402afa6149d3b4995c790e9462ecd245b3 | /sandbox/static/__init__.py | af788b2604708cd27c0678d84780a7588ddabfbc | [] | no_license | rmorshea/idom-sandbox | 9f3d6a8b65312d37e774aafa68395b7653978edf | 77e885e70254e3c37c4c3e322c4a6b5edc03b223 | refs/heads/master | 2020-07-06T00:02:23.564898 | 2019-09-09T16:34:35 | 2019-09-09T16:34:35 | 202,823,384 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | from .view import SandboxView
from .server import SandboxServer
| [
"[email protected]"
] | |
01bd12889f388d73cf9422ad1c2e2a22e6480f55 | c92a60d7968130cf21b20a943976c0e1929b6eb8 | /apps/yoga_schedule/apps.py | 0ae9c7f6dfa79f96758b668ccb0054d74c3d34e7 | [] | no_license | BeAnhTran/yoga-center-website | efb40103f343b9be627ce926156e66fe9f985435 | 8b7532f103a73a467cd903923f7cd2ccfc09d949 | refs/heads/master | 2022-11-25T09:51:27.000385 | 2020-08-03T01:44:22 | 2020-08-03T01:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | from django.apps import AppConfig
class YogaScheduleConfig(AppConfig):
name = 'yoga schedule'
| [
"[email protected]"
] | |
5f939ea970bddbb00c0137ac68aa23127fa09de8 | affe80fe068100199dcf64e16c305d1e2b8ca0bf | /official/modeling/progressive/train_lib_test.py | a64b6da3fd504b3c60317df44a150dc891cb416b | [
"Apache-2.0"
] | permissive | farshbafdoustar/models | 2c8c474e2e6f0bb38e1679282e8b64c39f58d3b1 | 3a2e407ce3871551b0074bcf10a0d6ee180bbdb2 | refs/heads/master | 2023-01-31T19:11:23.084582 | 2020-12-19T09:26:44 | 2020-12-19T09:26:44 | 322,809,980 | 0 | 0 | Apache-2.0 | 2020-12-19T09:27:09 | 2020-12-19T09:22:00 | Python | UTF-8 | Python | false | false | 5,807 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the progressive train_lib."""
import os
from absl import flags
from absl.testing import parameterized
import dataclasses
import orbit
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.common import flags as tfm_flags
# pylint: disable=unused-import
from official.common import registry_imports
# pylint: enable=unused-import
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import optimization
from official.modeling.hyperparams import params_dict
from official.modeling.progressive import policies
from official.modeling.progressive import train_lib
from official.modeling.progressive import trainer as prog_trainer_lib
from official.utils.testing import mock_task
FLAGS = flags.FLAGS
tfm_flags.define_flags()
@dataclasses.dataclass
class ProgTaskConfig(cfg.TaskConfig):
pass
@task_factory.register_task_cls(ProgTaskConfig)
class ProgMockTask(policies.ProgressivePolicy, mock_task.MockTask):
"""Progressive task for testing."""
def __init__(self, params: cfg.TaskConfig, logging_dir: str = None):
mock_task.MockTask.__init__(
self, params=params, logging_dir=logging_dir)
policies.ProgressivePolicy.__init__(self)
def num_stages(self):
return 2
def num_steps(self, stage_id):
return 2 if stage_id == 0 else 4
def get_model(self, stage_id, old_model=None):
del stage_id, old_model
return self.build_model()
def get_optimizer(self, stage_id):
"""Build optimizer for each stage."""
params = optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 0.01,
'end_learning_rate': 0.0,
'power': 1.0,
'decay_steps': 10,
},
},
'warmup': {
'polynomial': {
'power': 1,
'warmup_steps': 2,
},
'type': 'polynomial',
}
})
opt_factory = optimization.OptimizerFactory(params)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
return optimizer
def get_train_dataset(self, stage_id):
del stage_id
strategy = tf.distribute.get_strategy()
return orbit.utils.make_distributed_dataset(
strategy, self.build_inputs, None)
def get_eval_dataset(self, stage_id):
del stage_id
strategy = tf.distribute.get_strategy()
return orbit.utils.make_distributed_dataset(
strategy, self.build_inputs, None)
class TrainTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(TrainTest, self).setUp()
self._test_config = {
'trainer': {
'checkpoint_interval': 10,
'steps_per_loop': 10,
'summary_interval': 10,
'train_steps': 10,
'validation_steps': 5,
'validation_interval': 10,
'continuous_eval_timeout': 1,
'optimizer_config': {
'optimizer': {
'type': 'sgd',
},
'learning_rate': {
'type': 'constant'
}
}
},
}
@combinations.generate(
combinations.combine(
distribution_strategy=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
mode='eager',
flag_mode=['train', 'eval', 'train_and_eval'],
run_post_eval=[True, False]))
def test_end_to_end(self, distribution_strategy, flag_mode, run_post_eval):
model_dir = self.get_temp_dir()
experiment_config = cfg.ExperimentConfig(
trainer=prog_trainer_lib.ProgressiveTrainerConfig(),
task=ProgTaskConfig())
experiment_config = params_dict.override_params_dict(
experiment_config, self._test_config, is_strict=False)
with distribution_strategy.scope():
task = task_factory.get_task(experiment_config.task,
logging_dir=model_dir)
_, logs = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=flag_mode,
params=experiment_config,
model_dir=model_dir,
run_post_eval=run_post_eval)
if run_post_eval:
self.assertNotEmpty(logs)
else:
self.assertEmpty(logs)
if flag_mode == 'eval':
return
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(model_dir, 'checkpoint')))
# Tests continuous evaluation.
_, logs = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode='continuous_eval',
params=experiment_config,
model_dir=model_dir,
run_post_eval=run_post_eval)
print(logs)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
d90e9983e678c614e1ad4d95be16bb3c39cb1ccb | 6ccb55befcbc69caa351b8337fdd40e55dbb802f | /venv/bin/update-tld-names | f14760f6fbf606e71bb165ba44b46f3f3723847d | [] | no_license | FiacreT/M-moire | cc0791cbf98bf565ea637e6ec409611bcc596c57 | 4089755191ffc848614247e98bbb641c1933450d | refs/heads/master | 2022-12-12T21:55:23.679854 | 2019-09-06T23:28:03 | 2019-09-06T23:28:03 | 187,702,532 | 2 | 2 | null | 2022-12-08T01:04:58 | 2019-05-20T19:39:21 | Python | UTF-8 | Python | false | false | 192 | #!/home/fiacre/Python-Project/datasploit-master/venv/bin/python2
# We should have absolute imports here
from tld.commands.update_tld_names import main
if __name__ == "__main__":
main()
| [
"[email protected]"
] | ||
c8d425241dc48565765bc90cd2afed9745c89bb4 | ddb185b0cf581d85a1dd733a6d1e5d027ba3e0ca | /phase1/993.py | 9933eaf67c3581eb880d47702addd2ff933ea70c | [] | no_license | GavinPHR/code | 8a319e1223a307e755211b7e9b34c5abb00b556b | b1d8d49633db362bbab246c0cd4bd28305964b57 | refs/heads/master | 2020-05-16T04:09:19.026207 | 2020-04-30T10:00:06 | 2020-04-30T10:00:06 | 182,766,600 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | # Cousins in Binary Tree
from binarytree import TreeNode, makeTree
import collections
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
if root.val == x or root.val == y:
return False
def re(root, val, level):
if not root:
return
if root.left and root.left.val == val:
return (level + 1, root.val)
if root.right and root.right.val == val:
return (level + 1, root.val)
l = re(root.left, val, level + 1)
r = re(root.right, val, level + 1)
return l if l else r
a = re(root, x, 0)
b = re(root, y, 0)
if a[0] != b[0]:
return False
if a[1] == b[1]:
return False
return True
if __name__ == '__main__':
root = makeTree([1,2,3,None,4,None,5])
s = Solution()
print(s.isCousins(root, 4, 5)) | [
"[email protected]"
] | |
ff7d9e4d8bf6c338b93d2ca521b0c02ba9e27b95 | 109d501eeb83981c058bf1e01f10c7851f3866b5 | /people/forms.py | 22d0e3360774fbe291e21dbe27ebd94aaac1f2fb | [] | no_license | alikhundmiri/revenue_source_directory | 028f8164d7a6a5d62f24b1a0214664718c866291 | 636311269557352d901e47f847b044dc2a7545dc | refs/heads/master | 2022-12-16T20:51:44.327649 | 2018-09-24T13:50:17 | 2018-09-24T13:50:17 | 137,063,499 | 0 | 0 | null | 2022-12-08T02:14:33 | 2018-06-12T11:43:40 | JavaScript | UTF-8 | Python | false | false | 1,237 | py | from django import forms
from django.core.validators import validate_email
from .models import contact_details
# Form for accepting new interviews
class InterviewRequestForm(forms.ModelForm):
class Meta:
model = contact_details
fields = [
'contact_form',
'contact',
]
def clean_contact(self):
this_contact = self.cleaned_data.get('contact')
this_contact_form = self.cleaned_data.get('contact_form')
if this_contact_form == 'email':
try:
validate_email(this_contact)
except forms.ValidationError:
raise forms.ValidationError("Please enter a Valid Email address")
existing_contact = contact_details.objects.filter(contact=this_contact, contact_form=this_contact_form)
if existing_contact:
raise forms.ValidationError("You already submitted a request with these credentials")
else:
return this_contact
def __init__(self, *args , **kwargs):
super(InterviewRequestForm, self).__init__(*args, **kwargs)
self.fields["contact_form"].help_text = "Select a social media to get in contact with you, example: Twitter, or an email"
self.fields["contact"].help_text = "Enter your User ID or e-mail"
self.fields["contact"].label = "User ID"
self.fields["contact_form"].label = "Platform"
| [
"[email protected]"
] | |
915041a61b6a71d83948b1377ec6af9eacdb1a07 | 89213af925471c5954a12d0fe5bb47dfd988c351 | /tree/0199_right_side_view_BT.py | b9c870d5a7eb7f463f9325d660bcbb7aa25e8deb | [] | no_license | seanchen513/leetcode | be554dd668221b6d03c598090d6684165bc512c5 | 4723a64b00502c824bb9b848a1737478096aa3e1 | refs/heads/master | 2021-11-10T11:50:16.674255 | 2021-11-10T02:57:02 | 2021-11-10T02:57:02 | 237,393,266 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,972 | py | """
199. Binary Tree Right Side View
Medium
Given a binary tree, imagine yourself standing on the right side of it, return the values of the nodes you can see ordered from top to bottom.
Example:
Input: [1,2,3,null,5,null,4]
Output: [1, 3, 4]
Explanation:
1 <---
/ \
2 3 <---
\ \
5 4 <---
"""
#import sys
#sys.path.insert(1, '../tree/')
from binary_tree import TreeNode, print_tree, array_to_bt_lc
from typing import List
###############################################################################
"""
Solution 1: BFS and take last element of each level.
O(n) time
O(n) extra space (due to last level)
O(h) space for output
"""
class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
if not root:
return []
view = []
level = [root]
while level:
view += [level[-1].val]
next_level = []
for node in level:
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
level = next_level
return view
###############################################################################
"""
Solution 2: modified postorder traversal (LRP, left-right-parent).
For each level, the rightmost element is visited first.
O(n) time
O(h) space for recursion stack
O(h) space for dict and output
"""
class Solution2:
def rightSideView(self, root: TreeNode) -> List[int]:
def dfs(node, depth=0):
if not node:
return
dfs(node.right, depth + 1)
dfs(node.left, depth + 1)
if depth not in view:
view[depth] = node.val
view = {}
dfs(root)
return [view[d] for d in range(len(view))]
###############################################################################
if __name__ == "__main__":
def test(arr, comment=None):
root = array_to_bt_lc(arr)
solutions = [Solution(), Solution2()]
res = [s.rightSideView(root) for s in solutions]
print("="*80)
if comment:
print(comment, "\n")
print(arr, "\n")
print_tree(root)
print(f"\nSolutions: {res}")
comment = "Tree w/ depth 1"
arr = [1]
test(arr, comment)
comment = "Tree w/ depth 2"
arr = [1, 2,3]
test(arr, comment)
comment = "Tree w/ depth 3"
arr = [1, 2,3, 4,5,6,7]
test(arr, comment)
comment = ""
arr = [1,None,3,None,7]
test(arr, comment)
comment = ""
arr = [1,2,None,4]
test(arr, comment)
comment = ""
arr = [1, 2,3, 4,None,None,7, 8,None,None,15]
test(arr, comment)
comment = "LC example 1; answer = [1, 3, 4]"
arr = [1, 2,3, None,5,None,4]
test(arr, comment)
| [
"[email protected]"
] | |
d8c4edc6e935f0f2cb02543ebdf08f69695c523c | d77a0d5a18af141d36005eba1769f7384f5ce1d4 | /mDataAn_venv/Lib/site-packages/numpy/testing/tests/test_decorators.py | 69c1c9ad4a3558ad02edf36dd0e0855c6e0df6d9 | [] | no_license | LukasPolon/MData | 32d756d0df8c8847cf45b8def6e5ef760963d895 | 2178a0b2f60c4c638fd696a6e11b0ef801724bf4 | refs/heads/master | 2022-12-11T15:02:07.528855 | 2018-01-07T16:22:58 | 2018-01-07T16:22:58 | 99,687,079 | 1 | 0 | null | 2021-06-01T22:04:39 | 2017-08-08T11:51:11 | Python | UTF-8 | Python | false | false | 4,493 | py | from __future__ import division, absolute_import, print_function
import warnings
from numpy.testing import (dec, assert_, assert_raises, run_module_suite,
SkipTest, KnownFailureException)
def test_slow():
@dec.slow
def slow_func(x, y, z):
pass
assert_(slow_func.slow)
def test_setastest():
@dec.setastest()
def f_default(a):
pass
@dec.setastest(True)
def f_istest(a):
pass
@dec.setastest(False)
def f_isnottest(a):
pass
assert_(f_default.__test__)
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
class DidntSkipException(Exception):
pass
def test_skip_functions_hardcoded():
@dec.skipif(True)
def f1(x):
raise DidntSkipException
try:
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except SkipTest:
pass
@dec.skipif(False)
def f2(x):
raise DidntSkipException
try:
f2('a')
except DidntSkipException:
pass
except SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_functions_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.skipif(skip_tester)
def f1(x):
raise DidntSkipException
try:
skip_flag = 'skip me!'
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except SkipTest:
pass
@dec.skipif(skip_tester)
def f2(x):
raise DidntSkipException
try:
skip_flag = 'five is right out!'
f2('a')
except DidntSkipException:
pass
except SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_generators_hardcoded():
@dec.knownfailureif(True, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
for j in g1(10):
pass
except KnownFailureException:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
for j in g2(10):
pass
except KnownFailureException:
raise Exception('Marked incorrectly as known failure')
except DidntSkipException:
pass
def test_skip_generators_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
skip_flag = 'skip me!'
for j in g1(10):
pass
except KnownFailureException:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
skip_flag = 'do not skip'
for j in g2(10):
pass
except KnownFailureException:
raise Exception('Marked incorrectly as known failure')
except DidntSkipException:
pass
def test_deprecated():
@dec.deprecated(True)
def non_deprecated_func():
pass
@dec.deprecated()
def deprecated_func():
import warnings
warnings.warn("TEST: deprecated func", DeprecationWarning)
@dec.deprecated()
def deprecated_func2():
import warnings
warnings.warn("AHHHH")
raise ValueError
@dec.deprecated()
def deprecated_func3():
import warnings
warnings.warn("AHHHH")
# marked as deprecated, but does not raise DeprecationWarning
assert_raises(AssertionError, non_deprecated_func)
# should be silent
deprecated_func()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # do not propagate unrelated warnings
# fails if deprecated decorator just disables test. See #1453.
assert_raises(ValueError, deprecated_func2)
# warning is not a DeprecationWarning
assert_raises(AssertionError, deprecated_func3)
if __name__ == '__main__':
run_module_suite()
| [
"[email protected]"
] | |
476bac64cef86314952c912a9db75dc57df22e7f | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano3676.py | 87a1e1712ffb762de3c1ab0b52a020fbd9e03258 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/100000/E376F74E-44AE-E947-B57B-37A12C3E004C.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest3676.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
] | |
430153ab989c70328c2d747dca69ee952a2d761e | 03ae0dee75698f5012d80b218e71500181cd3e68 | /cd2h_repo_project/utils.py | 62197a6d92d58546d86f4a14193d1f23d3f6e3fb | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | galterlibrary/InvenioRDM-at-NU | 0b5e5043f67d0dc12960c00617ad2b7e14f4f6e8 | 5aff6ac7c428c9a61bdf221627bfc05f2280d1a3 | refs/heads/master | 2020-03-20T14:45:50.950239 | 2019-09-26T19:14:45 | 2019-09-26T19:14:45 | 137,494,704 | 6 | 0 | MIT | 2019-09-26T19:14:47 | 2018-06-15T14:06:06 | Python | UTF-8 | Python | false | false | 630 | py | """General utility functions for any module."""
from flask_principal import AnonymousIdentity, Identity, RoleNeed, UserNeed
def get_identity(user):
"""Returns the identity for a given user instance.
This is needed because we are more explicit then Flask-Principal
and it is MUCH more convenient for tests.
"""
if hasattr(user, 'id'):
identity = Identity(user.id)
identity.provides.add(UserNeed(user.id))
else:
return AnonymousIdentity()
for role in getattr(user, 'roles', []):
identity.provides.add(RoleNeed(role.name))
identity.user = user
return identity
| [
"[email protected]"
] | |
457faf21e9fc6f3d361e17dc6880d7510c0822b5 | 2d0bada349646b801a69c542407279cc7bc25013 | /src/vai_optimizer/example/pruning/tensorflow_v1/resnet50_pruning.py | 3f6b05b1cd1911eaf0f109cd895db49dffccf932 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | Xilinx/Vitis-AI | 31e664f7adff0958bb7d149883ab9c231efb3541 | f74ddc6ed086ba949b791626638717e21505dba2 | refs/heads/master | 2023-08-31T02:44:51.029166 | 2023-07-27T06:50:28 | 2023-07-27T06:50:28 | 215,649,623 | 1,283 | 683 | Apache-2.0 | 2023-08-17T09:24:55 | 2019-10-16T21:41:54 | Python | UTF-8 | Python | false | false | 2,145 | py | # MIT License
#
# Copyright (c) 2023 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from tf1_nndct.optimization.pruning import IterativePruningRunner
import tensorflow as tf
import numpy as np
from nets.resnet_v2 import resnet_v2_50
def eval_fn(frozen_graph_def: tf.compat.v1.GraphDef) -> float:
with tf.compat.v1.Session().as_default() as sess:
return 0.5
def main():
with tf.compat.v1.Session() as sess:
opt = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)
images = tf.convert_to_tensor(np.ones((1, 224, 224, 3), dtype=np.float32))
net, _ = resnet_v2_50(images, 1000)
print(net)
loss = tf.reduce_sum(net)
sess.run(tf.global_variables_initializer())
pruner = IterativePruningRunner("resnet50", sess, {}, ["resnet_v2_50/SpatialSqueeze"])
pruner.ana(eval_fn, gpu_ids=['/GPU:0', '/GPU:1'])
shape_tensors, masks = pruner.prune(sparsity=0.5)
variables = tf.trainable_variables()
sess.run(opt.minimize(loss, var_list=variables))
slim_graph_def = pruner.get_slim_graph_def(shape_tensors, masks)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
17329d67a9eef61295267530f6d4eed9da6ec6a4 | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/api/datahub/access/tests/modules/collector/no_factory_test.py | 7cb318fa5707ad99aaceecb1edeccc1dd7dd0110 | [
"MIT"
] | permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 3,992 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import httpretty
import pytest
from datahub.access.collectors.base_collector import BaseAccessTask
from datahub.access.tests.fixture import conftest
from datahub.access.tests.fixture.access import * # noqa
from datahub.access.tests.utils import post
param = {
"bk_app_code": "bk_dataweb",
"bk_username": "admin",
"data_scenario": "unknow",
"bk_biz_id": 591,
"description": "xx",
"access_raw_data": {
"raw_data_name": "log_new_00011",
"maintainer": "xxxx",
"raw_data_alias": "asdfsaf",
"data_source": "svr",
"data_encoding": "UTF-8",
"sensitivity": "private",
"description": "xx",
},
"access_conf_info": {
"collection_model": {"collection_type": "incr", "start_at": 1, "period": 0},
"filters": {
"delimiter": "|",
"fields": [{"index": 1, "op": "=", "logic_op": "and", "value": "111"}],
},
"resource": {
"scope": [
{
"module_scope": [{"bk_obj_id": "set", "bk_inst_id": 123}],
"host_scope": [{"bk_cloud_id": 1, "ip": "x.x.x.x"}],
"scope_config": {
"paths": [
{
"path": ["/tmp/*.log", "/tmp/*.l", "/tmp/*.aaaz"],
"system": "linux",
}
]
},
}
]
},
},
}
@pytest.mark.django_db
def test_error_factory_deploy_plan():
httpretty.enable()
httpretty.reset()
conftest.mock_user_perm("admin")
conftest.mock_app_perm("bk_dataweb")
conftest.mock_create_data_id()
conftest.mock_get_data_id("unknow")
conftest.mock_collector_hub_deploy_plan()
url = "/v3/access/deploy_plan/"
res = post(url, param)
assert not res["result"]
assert res["code"] == "1577209"
@pytest.mark.django_db
def test_error_factory_deploy():
httpretty.enable()
httpretty.reset()
conftest.mock_user_perm("admin")
conftest.mock_app_perm("bk_dataweb")
url = "/v3/access/collector/unknow/deploy/"
res = post(url, param)
assert not res["result"]
assert res["code"] == "1577209"
@pytest.mark.usefixtures("init_task_log")
@pytest.mark.django_db
def test_task_log():
conftest.mock_user_perm("admin")
conftest.mock_app_perm("bk_dataweb")
task = BaseAccessTask(task_id=100)
task.log("test debug", level="debug", task_log=False, time=None)
| [
"[email protected]"
] | |
ff370d8533a9be416e4441f7a342c23059b406b2 | 973713f993166b1d0c2063f6e84361f05803886d | /Day01-15/09_exercise_8.py | bcd5d38fd6e777cae500b75373d3bdd0c6b4c445 | [
"MIT"
] | permissive | MaoningGuan/Python-100-Days | 20ad669bcc0876b5adfbf2c09b4d25fd4691061a | d36e49d67a134278455438348efc41ffb28b778a | refs/heads/master | 2022-11-17T12:24:45.436100 | 2020-07-18T02:24:42 | 2020-07-18T02:24:42 | 275,157,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,995 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
扑克游戏
"""
import random
class Card(object):
"""一张牌"""
def __init__(self, suite, face):
self._suite = suite
self._face = face
@property
def face(self):
return self._face
@property
def suite(self):
return self._suite
def __str__(self):
if self._face == 1:
face_str = 'A'
elif self._face == 11:
face_str = 'J'
elif self._face == 12:
face_str = 'Q'
elif self._face == 13:
face_str = 'K'
else:
face_str = str(self._face)
return '%s%s' % (self._suite, face_str)
def __repr__(self):
return self.__str__()
class Poker(object):
"""一副牌"""
def __init__(self):
self._cards = [Card(suite, face)
for suite in '♠♥♣♦'
for face in range(1, 14)]
self._current = 0
@property
def cards(self):
return self._cards
def shuffle(self):
"""洗牌(随机乱序"""
self._current = 0
random.shuffle(self._cards) # 将序列元素随机排序
@property
def next(self):
"""发牌"""
card = self._cards[self._current]
self._current += 1
return card
@property
def has_next(self):
"""还有没有牌"""
return self._current < len(self._cards)
class Player(object):
"""玩家"""
def __init__(self, name):
self._name = name
self._cards_on_hand = []
@property
def name(self):
return self._name
@property
def cards_on_hand(self):
return self._cards_on_hand
def get(self, card):
"""摸牌"""
self._cards_on_hand.append(card)
def arrange(self, card_key):
"""玩家整理手上的牌"""
self._cards_on_hand.sort(key=card_key)
# 排序规则-先根据花色再根据点数排序
def get_key(card):
return (card.suite, card.face)
def main():
p = Poker()
p.shuffle()
players = [Player('东邪'), Player('西毒'), Player('南帝'), Player('北丐')]
game_name = '21点'
cards_number = 2
for _ in range(cards_number):
for player in players:
player.get(p.next)
print(game_name + ':')
cards_number = []
for player in players:
print(player.name + ':', end=' ')
player.arrange(get_key)
print(player.cards_on_hand)
# 计算玩家的点数
pokers_sum = 0
cards = player.cards_on_hand
card_0 = cards[0].face
card_1 = cards[1].face
if 1 not in [card_0, card_1]:
if card_0 in [11, 12, 13]:
card_0 = 10
if card_1 in [11, 12, 13]:
card_1 = 10
pokers_sum = card_0 + card_1
else:
if card_0 == 1 and card_1 == 1:
pokers_sum = 12
else:
if card_0 == 1:
if card_1 in [11, 12, 13]:
card_1 = 10
pokers_sum = card_0 + card_1
number = card_1 + 11
if pokers_sum < number <= 21:
pokers_sum = number
elif card_1 == 1:
if card_0 in [11, 12, 13]:
card_0 = 10
pokers_sum = card_0 + card_1
number = card_0 + 11
if pokers_sum < number <= 21:
pokers_sum = number
cards_number.append(pokers_sum)
print(cards_number)
max_card_num = 0
player = -1
for index, card_number in enumerate(cards_number):
if max_card_num < card_number <= 21:
max_card_num = card_number
player = players[index]
if max_card_num != 0:
print(f'赢家为{player.name},点数为{max_card_num}')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a60c7006ec0c959e30a9e86876873f261277a809 | aa6e1dd07a71a73bc08574b76f9e57a3ce8c8286 | /077.Test_BeeWare_windows/beeware-tutorial/beeware-venv/Lib/site-packages/git/refs/reference.py | aaa9b63fe7fa10fe82bb3632e3410b491b042f8e | [
"MIT"
] | permissive | IvanaXu/PyTools | 0aff5982f50bb300bfa950405192c78473b69537 | 358ae06eef418fde35f424909d4f13049ca9ec7b | refs/heads/master | 2023-06-07T21:45:44.242363 | 2023-06-06T16:00:25 | 2023-06-06T16:00:25 | 163,940,845 | 60 | 8 | MIT | 2022-12-23T02:49:05 | 2019-01-03T07:54:16 | Python | UTF-8 | Python | false | false | 4,408 | py | from git.util import (
LazyMixin,
Iterable,
)
from .symbolic import SymbolicReference
__all__ = ["Reference"]
#{ Utilities
def require_remote_ref_path(func):
"""A decorator raising a TypeError if we are not a valid remote, based on the path"""
def wrapper(self, *args):
if not self.is_remote():
raise ValueError("ref path does not point to a remote reference: %s" % self.path)
return func(self, *args)
# END wrapper
wrapper.__name__ = func.__name__
return wrapper
#}END utilities
class Reference(SymbolicReference, LazyMixin, Iterable):
"""Represents a named reference to any object. Subclasses may apply restrictions though,
i.e. Heads can only point to commits."""
__slots__ = ()
_points_to_commits_only = False
_resolve_ref_on_create = True
_common_path_default = "refs"
def __init__(self, repo, path, check_path=True):
"""Initialize this instance
:param repo: Our parent repository
:param path:
Path relative to the .git/ directory pointing to the ref in question, i.e.
refs/heads/master
:param check_path: if False, you can provide any path. Otherwise the path must start with the
default path prefix of this type."""
if check_path and not path.startswith(self._common_path_default + '/'):
raise ValueError("Cannot instantiate %r from path %s" % (self.__class__.__name__, path))
super(Reference, self).__init__(repo, path)
def __str__(self):
return self.name
#{ Interface
def set_object(self, object, logmsg=None): # @ReservedAssignment
"""Special version which checks if the head-log needs an update as well
:return: self"""
oldbinsha = None
if logmsg is not None:
head = self.repo.head
if not head.is_detached and head.ref == self:
oldbinsha = self.commit.binsha
# END handle commit retrieval
# END handle message is set
super(Reference, self).set_object(object, logmsg)
if oldbinsha is not None:
# /* from refs.c in git-source
# * Special hack: If a branch is updated directly and HEAD
# * points to it (may happen on the remote side of a push
# * for example) then logically the HEAD reflog should be
# * updated too.
# * A generic solution implies reverse symref information,
# * but finding all symrefs pointing to the given branch
# * would be rather costly for this rare event (the direct
# * update of a branch) to be worth it. So let's cheat and
# * check with HEAD only which should cover 99% of all usage
# * scenarios (even 100% of the default ones).
# */
self.repo.head.log_append(oldbinsha, logmsg)
# END check if the head
return self
# NOTE: Don't have to overwrite properties as the will only work without a the log
@property
def name(self):
""":return: (shortest) Name of this reference - it may contain path components"""
# first two path tokens are can be removed as they are
# refs/heads or refs/tags or refs/remotes
tokens = self.path.split('/')
if len(tokens) < 3:
return self.path # could be refs/HEAD
return '/'.join(tokens[2:])
@classmethod
def iter_items(cls, repo, common_path=None):
"""Equivalent to SymbolicReference.iter_items, but will return non-detached
references as well."""
return cls._iter_items(repo, common_path)
#}END interface
#{ Remote Interface
@property
@require_remote_ref_path
def remote_name(self):
"""
:return:
Name of the remote we are a reference of, such as 'origin' for a reference
named 'origin/master'"""
tokens = self.path.split('/')
# /refs/remotes/<remote name>/<branch_name>
return tokens[2]
@property
@require_remote_ref_path
def remote_head(self):
""":return: Name of the remote head itself, i.e. master.
:note: The returned name is usually not qualified enough to uniquely identify
a branch"""
tokens = self.path.split('/')
return '/'.join(tokens[3:])
#} END remote interface
| [
"[email protected]"
] | |
bce5d26895cbbaa98ad04b69f939816ec032ddc7 | 7feebb0a6c7751ad2b1870efd63c85c2d8f670bb | /txaioetcd/_client_aio.py | 6de2f28359bb6d9ba8c403a216e4fef45e309398 | [
"MIT"
] | permissive | om26er/txaio-etcd | edbcfe65ac19c8f4326944f0b0bcae986e7aa3fe | 0ed71ba01ab13acebf874ddf650f880bb1e676a5 | refs/heads/master | 2021-01-02T23:36:01.330887 | 2017-09-14T18:28:21 | 2017-09-14T18:28:21 | 99,502,890 | 0 | 0 | null | 2017-08-06T17:36:17 | 2017-08-06T17:36:17 | null | UTF-8 | Python | false | false | 2,579 | py | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
__all__ = (
'Client',
)
class _None(object):
pass
class Client(object):
"""
etcd asyncio client that talks to the gRPC HTTP gateway endpoint of etcd v3.
See: https://coreos.com/etcd/docs/latest/dev-guide/apispec/swagger/rpc.swagger.json
"""
def __init__(self, loop, url):
pass
def status(self):
raise Exception('not implemented')
def set(self, key, value, lease=None, return_previous=None):
raise Exception('not implemented')
def get(self,
key,
count_only=None,
keys_only=None,
limit=None,
max_create_revision=None,
min_create_revision=None,
min_mod_revision=None,
revision=None,
serializable=None,
sort_order=None,
sort_target=None):
raise Exception('not implemented')
def delete(self, key, return_previous=None):
raise Exception('not implemented')
def watch(self, keys, on_watch, start_revision=None):
raise Exception('not implemented')
def submit(self, txn):
raise Exception('not implemented')
def lease(self, time_to_live, lease_id=None):
raise Exception('not implemented')
| [
"[email protected]"
] | |
20803dc548bd9f4077e54216bfe95ec8e10adf13 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /gfsa/training/simple_train.py | d352630a277e5c92d7087e72fc751cf46dc4ca7d | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,569 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple training runner."""
from absl import app
from absl import flags
import gin
from gfsa.training import simple_runner
flags.DEFINE_string("train_log_dir", None, "Path to log directory.")
flags.DEFINE_string("train_artifacts_dir", None,
"Path to save params and other artifacts.")
flags.DEFINE_multi_string("gin_files", [], "Gin config files to use.")
flags.DEFINE_multi_string("gin_include_dirs", [],
"Directories to search when resolving gin includes.")
flags.DEFINE_multi_string(
"gin_bindings", [],
"Gin bindings to override the values set in the config files.")
flags.DEFINE_enum("task", None, {"maze", "edge_supervision", "var_misuse"},
"Task to run.")
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
# pylint:disable=g-import-not-at-top
if FLAGS.task == "maze":
from gfsa.training import train_maze_lib
train_fn = train_maze_lib.train
elif FLAGS.task == "edge_supervision":
from gfsa.training import train_edge_supervision_lib
train_fn = train_edge_supervision_lib.train
elif FLAGS.task == "var_misuse":
from gfsa.training import train_var_misuse_lib
train_fn = train_var_misuse_lib.train
else:
raise ValueError(f"Unrecognized task {FLAGS.task}")
# pylint:enable=g-import-not-at-top
print("Setting up Gin configuration")
for include_dir in FLAGS.gin_include_dirs:
gin.add_config_file_search_path(include_dir)
gin.bind_parameter("simple_runner.training_loop.artifacts_dir",
FLAGS.train_artifacts_dir)
gin.bind_parameter("simple_runner.training_loop.log_dir", FLAGS.train_log_dir)
gin.parse_config_files_and_bindings(
FLAGS.gin_files,
FLAGS.gin_bindings,
finalize_config=False,
skip_unknown=False)
gin.finalize()
train_fn(runner=simple_runner)
if __name__ == "__main__":
app.run(main)
| [
"[email protected]"
] | |
f86f0b7160e46973c357dad4d8d75a61687f58b3 | ff413ecba8eb6a3f8afc225bd1339abac453202c | /project/admin_bot/keyboards/adding_products_to_package_kb.py | fb360b13d90fd94339878386bb1388ab12bda04c | [] | no_license | Artvell/bot | 561b614fde5d19335736ac390e35814afd6b6180 | 0b85a5efc4c302f522bf23a23fbbbc8a9efc7008 | refs/heads/main | 2023-08-10T17:10:21.500433 | 2021-09-11T12:54:32 | 2021-09-11T12:54:32 | 405,372,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup
def adding_kb(package_id):
keyboard = InlineKeyboardMarkup()
keyboard.add(InlineKeyboardButton("Добавить",switch_inline_query_current_chat=""))
keyboard.add(InlineKeyboardButton("Закончить добавление",callback_data=f"end_adding_{package_id}"))
return keyboard | [
"[email protected]"
] | |
bb9b72d22626e0d25fcc15eb9591c80556396118 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02699/s167779272.py | 5ae2267cef04e2bb394c6351bcb67a32ba3d4e70 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | def main():
S, W = list(map(int, input().split()))
print("unsafe" if S <= W else "safe")
pass
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
6154ea85af213859285844a8e9cbf8f6b018ff55 | d37f798101bc6cc795b3ff7e5f9444ff30b4cd83 | /kubernetes/client/models/v1alpha1_parent_reference.py | 18f7447520f6057f4d3b3f73bc21bd1645ac457f | [
"Apache-2.0"
] | permissive | MorningSong/python | bdd8b9d60b7c2185457fc1bbbc64d098f9682981 | ae7b5ddd219fe09b6ed0be715dcca3377a029584 | refs/heads/master | 2023-08-30T14:41:41.582335 | 2023-08-23T16:15:28 | 2023-08-23T16:15:28 | 139,396,247 | 0 | 0 | Apache-2.0 | 2023-09-14T00:11:24 | 2018-07-02T05:47:43 | Python | UTF-8 | Python | false | false | 6,766 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.27
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha1ParentReference(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'group': 'str',
'name': 'str',
'namespace': 'str',
'resource': 'str',
'uid': 'str'
}
attribute_map = {
'group': 'group',
'name': 'name',
'namespace': 'namespace',
'resource': 'resource',
'uid': 'uid'
}
def __init__(self, group=None, name=None, namespace=None, resource=None, uid=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1ParentReference - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._group = None
self._name = None
self._namespace = None
self._resource = None
self._uid = None
self.discriminator = None
if group is not None:
self.group = group
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
if resource is not None:
self.resource = resource
if uid is not None:
self.uid = uid
@property
def group(self):
"""Gets the group of this V1alpha1ParentReference. # noqa: E501
Group is the group of the object being referenced. # noqa: E501
:return: The group of this V1alpha1ParentReference. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this V1alpha1ParentReference.
Group is the group of the object being referenced. # noqa: E501
:param group: The group of this V1alpha1ParentReference. # noqa: E501
:type: str
"""
self._group = group
@property
def name(self):
"""Gets the name of this V1alpha1ParentReference. # noqa: E501
Name is the name of the object being referenced. # noqa: E501
:return: The name of this V1alpha1ParentReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1alpha1ParentReference.
Name is the name of the object being referenced. # noqa: E501
:param name: The name of this V1alpha1ParentReference. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1alpha1ParentReference. # noqa: E501
Namespace is the namespace of the object being referenced. # noqa: E501
:return: The namespace of this V1alpha1ParentReference. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1alpha1ParentReference.
Namespace is the namespace of the object being referenced. # noqa: E501
:param namespace: The namespace of this V1alpha1ParentReference. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def resource(self):
"""Gets the resource of this V1alpha1ParentReference. # noqa: E501
Resource is the resource of the object being referenced. # noqa: E501
:return: The resource of this V1alpha1ParentReference. # noqa: E501
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V1alpha1ParentReference.
Resource is the resource of the object being referenced. # noqa: E501
:param resource: The resource of this V1alpha1ParentReference. # noqa: E501
:type: str
"""
self._resource = resource
@property
def uid(self):
"""Gets the uid of this V1alpha1ParentReference. # noqa: E501
UID is the uid of the object being referenced. # noqa: E501
:return: The uid of this V1alpha1ParentReference. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1alpha1ParentReference.
UID is the uid of the object being referenced. # noqa: E501
:param uid: The uid of this V1alpha1ParentReference. # noqa: E501
:type: str
"""
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1ParentReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1ParentReference):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
eafd3efda8542b0d4645d3e90c82145723998525 | 2fb738f3bdabebf32296150405486377dba7812b | /nuitka/freezer/DependsExe.py | 18433052e9e87ad3cdd5729630829cc4c783b534 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | goslion/Nuitka | 31f62c9083a1eaec104d64eeebc0b9fb50560812 | 4a30a987b1586271c31822f574ca2584d1107212 | refs/heads/master | 2023-03-03T08:06:13.420278 | 2021-02-07T22:06:13 | 2021-02-07T22:06:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,923 | py | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Interface to depends.exe on Windows.
We use depends.exe to investigate needed DLLs of Python DLLs.
"""
from nuitka.Options import assumeYesForDownloads
from nuitka.utils.Download import getCachedDownload
from nuitka.utils.Utils import getArchitecture
def getDependsExePath():
"""Return the path of depends.exe (for Windows).
Will prompt the user to download if not already cached in AppData
directory for Nuitka.
"""
if getArchitecture() == "x86":
depends_url = "http://dependencywalker.com/depends22_x86.zip"
else:
depends_url = "http://dependencywalker.com/depends22_x64.zip"
return getCachedDownload(
url=depends_url,
is_arch_specific=True,
binary="depends.exe",
flatten=True,
specifity="", # Note: If there ever was an update, put version here.
message="""\
Nuitka will make use of Dependency Walker (http://dependencywalker.com) tool
to analyze the dependencies of Python extension modules.""",
reject="Nuitka does not work in --standalone on Windows without.",
assume_yes_for_downloads=assumeYesForDownloads(),
)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.