filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_19090 | from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc = Location(env, mgr.LE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
h_r = Hint("h_r1", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r3", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i2", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
return frozenset(res)
|
the-stack_0_19092 | from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class MariaDBGeoBackupEnabled(BaseResourceValueCheck):
def __init__(self):
name = "Ensure that MariaDB server enables geo-redundant backups"
id = "CKV_AZURE_129"
supported_resources = ['azurerm_mariadb_server']
categories = [CheckCategories.BACKUP_AND_RECOVERY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return "geo_redundant_backup_enabled"
check = MariaDBGeoBackupEnabled()
|
the-stack_0_19094 | # -*- coding: utf-8 -*-
"""
test.apis
~~~~~~~~~
:author: Dave Caraway
:copyright: (c) 2014-2015, Fog Mine LLC
:license: Proprietary, see LICENSE for more details.
templated from https://github.com/ryanolson/cookiecutter-webapp
"""
from app import api
class SecureView(api.BaseView):
@api.secure_endpoint()
def index(self):
return {
"secret": "shhhhhh, keep this quiet",
}
def classy_api(app):
"""Create an Flask-Classy-based API on app"""
bp = api.v1.create_blueprint('test')
SecureView.register(bp)
api.register_blueprint(app, bp)
|
the-stack_0_19096 | import json
import time
import boto3
import logging
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key, Attr
# Set up our logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# Connect to DynamoDB
dynamodb = boto3.resource('dynamodb')
pictures_table = dynamodb.Table('fg-pictures-table')
def last_month():
seconds_in_day = 86400
days_in_four_weeks = 28
return int(time.time() - (seconds_in_day * days_in_four_weeks))
def lambda_handler(event, context):
# Request parsing
params = event.get('queryStringParameters', None)
start_key = params.get('key', None) if params else None
# Response formatting
status_code = 200
body = {}
# Scan db items
try:
logger.info('Scanning feed pictures')
# Scan parameters
scan_kwargs = {
'FilterExpression': Attr('modifiedDate').gte(str(last_month()))
}
if start_key:
scan_kwargs['ExclusiveStartKey'] = start_key
response = pictures_table.scan(**scan_kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
logger.warn(response['Error']['Message'])
status_code = response['ResponseMetadata']['HTTPStatusCode']
body['errors'] = [ response['Error']['Message'] ]
else:
logger.info('Scanned feed pictures')
body['nextKey'] = response.get('LastEvaluatedKey', None)
body['items'] = response.get('Items', [])
body['items'].sort(key=lambda x: x['modifiedDate'], reverse=True)
except ClientError as e:
logger.warn(e.response['Error']['Message'])
status_code = e.response['ResponseMetadata']['HTTPStatusCode']
body['errors'] = [ e.response['Error']['Message'] ]
return {
'statusCode': status_code,
'body': json.dumps(body)
}
|
the-stack_0_19097 | class Solution:
def numFactoredBinaryTrees(self, A):
"""
:type A: List[int]
:rtype: int
"""
A.sort()
nums, res, trees, factors = set(A), 0, {}, collections.defaultdict(set)
for i, num in enumerate(A):
for n in A[:i]:
if num % n == 0 and num // n in nums: factors[num].add(n)
for root in A:
trees[root] = 1
for fac in factors[root]: trees[root] += trees[fac] * trees[root // fac]
return sum(trees.values()) % ((10 ** 9) + 7) |
the-stack_0_19102 | # -*- coding: utf-8 -*-
class HttpResponse(object):
"""Information about an HTTP Response including its status code, returned
headers, and raw body
Attributes:
status_code (int): The status code response from the server that
corresponds to this response.
reason_phrase (string): The reason phrase returned by the server.
headers (dict): A dictionary of headers (key : value) that were
returned with the response
text (string): The Raw body of the HTTP Response as a string
request (HttpRequest): The request that resulted in this response.
"""
def __init__(self,
status_code,
reason_phrase,
headers,
text,
request):
"""Constructor for the HttpResponse class
Args:
status_code (int): The response status code.
reason_phrase (string): The response reason phrase.
headers (dict): The response headers.
text (string): The raw body from the server.
request (HttpRequest): The request that resulted in this response.
"""
self.status_code = status_code
self.reason_phrase = reason_phrase
self.headers = headers
self.text = text
self.request = request
|
the-stack_0_19104 | #! /usr/bin/env python
from itertools import product
import numpy as np
import traceback
import os
import sys
sys.path.append('..')
from lib.problem import Problem
from lib.partitioning import FMPartitioning, SpectralClustering
from lib.partitioning.utils import all_partitions_contiguous
from lib.algorithms import NcfEpi
from benchmarks.benchmark_consts import HOLDOUT_PROBLEMS
OUTPUT_CSV = 'grid-search.csv'
LOG_DIR = 'grid-search-logs'
def print_(*args, file=None):
if file is None:
file = sys.stdout
print(*args, file=file)
file.flush()
def grid_search(
problem_name,
topo_fname,
tm_fname,
num_paths_to_sweep=[4],
edge_disjoint_to_sweep=[True, False],
dist_metrics_to_sweep=['inv-cap'],
partition_algos_to_sweep=['fm_partitioning', 'spectral_clustering'],
num_parts_scale_factors_to_sweep=[1, 2, 3, 4]):
problem = Problem.from_file(topo_fname, tm_fname)
assert problem_name == problem.name
print_(problem.name, tm_fname)
traffic_seed = problem.traffic_matrix.seed
total_demand = np.sum(problem.traffic_matrix.tm)
print_('traffic seed: {}'.format(traffic_seed))
print_('traffic matrix model: {}'.format(problem.traffic_matrix.model))
print_('traffic scale factor: {}'.format(
problem.traffic_matrix.scale_factor))
print_('total demand: {}'.format(total_demand))
num_parts_to_sweep = [
sf * int(np.sqrt(len(problem.G.nodes)))
for sf in num_parts_scale_factors_to_sweep
]
for partition_algo, num_partitions_to_set, num_paths, edge_disjoint, dist_metric in product(
partition_algos_to_sweep, num_parts_to_sweep, num_paths_to_sweep,
edge_disjoint_to_sweep, dist_metrics_to_sweep):
if partition_algo == 'fm_partitioning':
partitioner = FMPartitioning(num_partitions_to_set)
elif partition_algo == 'spectral_clustering':
partitioner = SpectralClustering(num_partitions_to_set)
print_(
'\nNCFlow, {} partitioner, {} partitions, {} paths, edge disjoint {}, dist metric {}'
.format(partition_algo, num_partitions_to_set, num_paths,
edge_disjoint, dist_metric))
run_nc_dir = os.path.join(
LOG_DIR, 'ncflow', partition_algo,
'{}-partitions'.format(num_partitions_to_set),
'{}-paths'.format(num_paths),
'edge-disjoint-{}'.format(edge_disjoint),
'{}-dist-metric'.format(dist_metric))
if not os.path.exists(run_nc_dir):
os.makedirs(run_nc_dir)
with open(
os.path.join(
run_nc_dir,
'{}-ncflow-{}_partitioner-{}_partitions-{}_paths-{}_edge_disjoint-{}_dist_metric.txt'
.format(problem.name, partition_algo,
num_partitions_to_set, num_paths, edge_disjoint,
dist_metric)), 'w') as log:
partition_vector = partitioner.partition(problem)
if not all_partitions_contiguous(problem, partition_vector):
print_(
'Topology {}, partitioner {}, num_partitions_to_set {} did not find a valid partition'
.format(topo_fname, partition_algo, num_partitions_to_set))
continue
try:
ncflow = NcfEpi.new_max_flow(num_paths,
edge_disjoint=edge_disjoint,
dist_metric=dist_metric,
out=log)
ncflow.solve(problem, partitioner)
num_partitions = len(np.unique(ncflow._partition_vector))
size_of_largest_partition = partitioner.size_of_largest_partition
runtime = ncflow.runtime_est(14)
total_flow = ncflow.obj_val
with open(OUTPUT_CSV, 'a') as w:
print_('{},{},{},{},{},{},{},{},{},{}'.format(
problem.name, os.path.basename(tm_fname),
partition_algo, num_partitions,
size_of_largest_partition, num_paths, edge_disjoint,
dist_metric, total_flow, runtime),
file=w)
except:
print_(
'TM {}, {} partitioner, {} partitions, {} paths, edge disjoint {}, dist metric {} failed'
.format(tm_fname, partition_algo, num_partitions_to_set,
num_paths, edge_disjoint, dist_metric))
traceback.print_exc(file=sys.stdout)
if __name__ == '__main__':
with open(OUTPUT_CSV, 'a') as w:
print_(
'problem,tm_fname,partition_algo,num_partitions,size_of_largest_partition,num_paths,edge_disjoint,dist_metric,total_flow,runtime',
file=w)
for problem_name, topo_fname, tm_fname in HOLDOUT_PROBLEMS:
grid_search(problem_name, topo_fname, tm_fname)
|
the-stack_0_19105 | from typing import Tuple, Union
import numpy as np
import metrohash
from cached_property import cached_property
import logging
def _leading_zeros64(x: np.uint64, num_bits: int = 64) -> int:
"""
LeadingZeros64 returns the number of leading zero bits in x; the result is 64 for x == 0.
"""
return (np.binary_repr(x, num_bits) + "1").index("1")
def _metro_hash_128(val: bytes, seed: int = 1337):
h: bytes = metrohash.metrohash128(val, seed)
h1, h2 = np.frombuffer(h, dtype=np.uint64, offset=0)
return h1, h2
class HyperMinHash:
"""
HyperMinHash is a sketch for cardinality estimation based on LogLog counting
"""
def __init__(self, p: int = 14, q: int = 6, r: int = 10, c: float = 0.169919487159739093975315012348):
"""
:param p: number of bits for each register
:param q: number of bits for the LogLog hash
:param r: number of bits for the bbit hash
"""
assert p - r >= 0
self.p = p
self.q = q
self.r = r
self._c = c
logging.debug(f"New HyperMinHash({self.m}).")
self.reg = np.zeros(self.m, dtype=np.uint16)
@cached_property
def m(self):
return np.uint32(1 << self.p)
@cached_property
def _max(self):
return np.uint32(64 - self.p)
@cached_property
def _maxX(self):
return np.uint64(np.iinfo(np.uint64).max >> self._max)
@cached_property
def _alpha(self):
return 0.7213 / (1 + 1.079 / np.float64(self.m))
@cached_property
def _2q(self):
return 1 << self.q
@cached_property
def _2r(self):
return 1 << self.r
@cached_property
def _u64_p(self):
return np.uint64(self.p)
@cached_property
def _mr(self):
return np.uint64(64) - np.uint64(self.r)
def lz(self, val: np.uint16) -> np.uint8:
return np.uint8(val >> (16 - self.q))
def _add_hash(self, x: np.uint64, y: np.uint64) -> None:
"""
AddHash takes in a "hashed" value (bring your own hashing)
"""
k = x >> self._max
lz = _leading_zeros64((x << self._u64_p) ^ self._maxX) + 1
sig = y << self._mr >> self._mr
sig = np.uint16(sig)
reg = np.uint16((lz << self.r) | sig)
self.reg[k] = max(self.reg[k], reg)
def add(self, value: Union[bytes, str, int]) -> None:
"""
Add inserts a value into the sketch
"""
if isinstance(value, int):
value = str(value)
if isinstance(value, str):
value = str.encode(value)
logging.debug(f"HyperMinHash.add({value}).")
h1, h2 = _metro_hash_128(value)
self._add_hash(h1, h2)
def extend(self, args):
for arg in args:
self.add(arg)
@staticmethod
def _beta(ez: np.float64) -> np.float64:
zl = np.log(ez + 1)
val = np.polyval([0.00042419, -0.005384159, 0.03738027, -0.09237745, 0.16339839, 0.17393686, 0.070471823, -0.370393911 * ez], zl)
return np.float64(val)
def reg_sum_and_zeros(self) -> Tuple[np.float64, np.float64]:
lz = np.uint8(self.reg >> (16 - self.q))
return \
np.float64((1 / np.power(2, np.float64(lz))).sum()), \
np.float64((lz == 0).sum())
def cardinality(self) -> np.uint64:
"""
Cardinality returns the number of unique elements added to the sketch
"""
sm, ez = self.reg_sum_and_zeros()
res = np.uint64(self._alpha * np.float64(self.m) * (np.float64(self.m) - ez) / (self._beta(ez) + sm))
logging.debug(f"HyperMinHash.cardinality sm={sm}, ez={ez}, res={res}.")
return res
def __len__(self):
return int(self.cardinality())
def merge(self, other: "HyperMinHash") -> "HyperMinHash":
"""
Merge returns a new union sketch of both sk and other
"""
if len(self.reg) != len(other.reg):
raise ValueError(f"self / other have different lengths: {len(self.reg)} / {len(other.reg)}.")
self.reg = np.maximum(self.reg, other.reg)
return self
def similarity(self, other: "HyperMinHash") -> np.float64:
"""
Similarity return a Jaccard Index similarity estimation
"""
c = np.float64(((self.reg != 0) & (self.reg == other.reg)).sum())
n = np.float64(((self.reg != 0) | (other.reg != 0)).sum())
if c == 0:
return np.float64(0)
crd_slf = np.float64(self.cardinality())
crd_otr = np.float64(other.cardinality())
ec = self._approximate_expected_collisions(crd_slf, crd_otr)
# FIXME: must be a better way to pre-detect this
if c < ec:
return np.float64(0)
res = np.float64((c - ec) / n)
logging.debug(f"HyperMinHash.similarity "
f"c={c}, n={n}, crd_slf={crd_slf}, crd_otr={crd_otr}, ec={ec}, res={res}.")
return res
def _approximate_expected_collisions(self, n: np.float(64), m: np.float(64)) -> np.float(64):
if n < m:
n, m = m, n
if n > np.power(2, np.power(2, self.q) + self.r):
return np.iinfo(np.uint64).max
elif n > np.power(2, self.p + 5):
d = (4 * n / m) / np.power((1 + n) / m, 2)
return self._c * np.power(2, self.p - self.r) * d + 0.5
else:
return self._expected_collision(n, m) / np.float64(self.p)
def _expected_collision(self, n: np.float(64), m: np.float(64)) -> np.float(64):
x = np.float64(0)
for i in range(1, self._2q + 1):
for j in range(1, self._2r + 1):
den = np.power(2, self.p + self.r + i - (i == self._2q))
if den <= 0:
b1: np.float64 = np.float64(0)
b2: np.float64 = np.float64(0)
else:
if i != self._2q:
b1: np.float64 = (self._2r + j) / den
b2: np.float64 = (self._2r + j + 1) / den
else:
b1: np.float64 = j / den
b2: np.float64 = (j + 1) / den
prx = np.power(1 - b2, n) - np.power(1 - b1, n)
pry = np.power(1 - b2, m) - np.power(1 - b1, m)
x += (prx * pry)
return (x * np.float64(self.p)) + 0.5
def intersection(self, other: "HyperMinHash") -> np.uint64:
"""
Intersection returns number of intersections between sk and other
"""
sim = self.similarity(other)
return np.uint64((sim * np.float64(self.merge(other).cardinality()) + 0.5))
|
the-stack_0_19106 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Tuple
from federatedml.protobuf.generated.feature_selection_meta_pb2 import FeatureSelectionMeta
from federatedml.protobuf.generated.feature_selection_param_pb2 import FeatureSelectionParam, \
FeatureSelectionFilterParam, FeatureValue, LeftCols
from federatedml.protobuf.model_migrate.converter.converter_base import AutoReplace
from federatedml.protobuf.model_migrate.converter.converter_base import ProtoConverterBase
class HeteroFeatureSelectionConverter(ProtoConverterBase):
def convert(self, param: FeatureSelectionParam, meta: FeatureSelectionMeta,
guest_id_mapping: Dict,
host_id_mapping: Dict,
arbiter_id_mapping: Dict
) -> Tuple:
replacer = AutoReplace(guest_id_mapping, host_id_mapping, arbiter_id_mapping)
host_col_name_objs = list(param.host_col_names)
for col_obj in host_col_name_objs:
old_party_id = col_obj.party_id
col_obj.party_id = str(host_id_mapping[int(old_party_id)])
col_names = list(col_obj.col_names)
for idx, col_name in enumerate(col_names):
col_obj.col_names[idx] = replacer.replace(col_name)
filter_results = list(param.results)
new_results = []
for idx, result in enumerate(filter_results):
host_feature_values = list(result.host_feature_values)
new_feature_value_list = []
for this_host in host_feature_values:
feature_values = dict(this_host.feature_values)
new_feature_values = {replacer.replace(k): v for k, v in feature_values.items()}
new_feature_value_list.append(FeatureValue(feature_values=new_feature_values))
left_col_list = list(result.host_left_cols)
new_host_left_col = []
for left_col_obj in left_col_list:
original_cols = [replacer.replace(x) for x in left_col_obj.original_cols]
left_cols = {replacer.replace(k): v for k, v in dict(left_col_obj.left_cols).items()}
new_host_left_col.append(LeftCols(original_cols=original_cols,
left_cols=left_cols))
new_result = FeatureSelectionFilterParam(feature_values=result.feature_values,
host_feature_values=new_feature_value_list,
left_cols=result.left_cols,
host_left_cols=new_host_left_col,
filter_name=result.filter_name)
new_results.append(new_result)
param = FeatureSelectionParam(
results=new_results,
final_left_cols=param.final_left_cols,
col_names=param.col_names,
host_col_names=param.host_col_names,
header=param.header
)
return param, meta
|
the-stack_0_19107 | import unittest
from nose.plugins.attrib import attr
import os
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
from jnpr.junos import Device
from jnpr.junos.utils.fs import FS
from jnpr.junos.exception import RpcError
from mock import patch, MagicMock, call
from lxml import etree
__author__ = "Nitin Kumar, Rick Sherman"
__credits__ = "Jeremy Schulman"
@attr('unit')
class TestFS(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(host='1.1.1.1', user='rick', password='password123',
gather_facts=False)
self.dev.open()
self.fs = FS(self.dev)
@patch('jnpr.junos.device.warnings')
def test_cat_wrong_path_return_none(self, mock_warnings):
path = 'test/report'
self.assertEqual(self.fs.cat(path), None)
def test_cat(self):
self.fs._dev.rpc.file_show = MagicMock(side_effect=self._mock_manager)
path = 'test/cat.txt'
self.assertTrue('testing cat functionality' in self.fs.cat(path))
self.fs._dev.rpc.file_show.assert_called_with(filename='test/cat.txt')
def test_cwd(self):
self.fs._dev.rpc.set_cli_working_directory = MagicMock(
side_effect=self._mock_manager)
folder = 'change/directory'
self.assertEqual('change/directory', self.fs.cwd(folder))
self.fs._dev.rpc.set_cli_working_directory.\
assert_called_with(directory='change/directory')
@patch('jnpr.junos.Device.execute')
def test_pwd(self, mock_execute):
mock_execute.side_effect = MagicMock(side_effect=self._mock_manager)
self.fs.pwd()
self.assertEqual(self.fs.pwd(), '/cf/var/home/rick')
@patch('jnpr.junos.device.warnings')
def test_checksum_return_none(self, mock_warnings):
path = 'test/report'
self.assertEqual(self.fs.checksum(path), None)
def test_checksum_unknown_calc(self):
path = 'test/report'
self.assertRaises(ValueError, self.fs.checksum, path=path, calc='abc')
def test_checksum_return_rsp(self):
self.fs.dev.rpc.get_sha256_checksum_information = \
MagicMock(side_effect=self._mock_manager)
path = 'test/checksum'
self.assertEqual(self.fs.checksum(path, 'sha256'), 'xxxx')
self.fs.dev.rpc.get_sha256_checksum_information.\
assert_called_with(path='test/checksum')
def test_stat_calling___decode_file(self):
path = 'test/stat/decode_file'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.stat(path),
{'owner': 'pqr', 'path': '/var/abc.sh',
'permissions': 755,
'permissions_text': '-rwxr-xr-x', 'size': 2,
'ts_date': 'Mar 13 06:54',
'ts_epoc': '1394693680',
'type': 'file'})
def test_stat_calling___decode_dir(self):
path = 'test/stat/decode_dir'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.stat(path),
{'path': '/var', 'type': 'dir', 'file_count': 1,
'size': 2})
def test_stat_return_none(self):
path = 'test/abc'
self.fs.dev.rpc.file_list = MagicMock()
self.fs.dev.rpc.file_list.find.return_value = 'output'
self.assertEqual(self.fs.stat(path), None)
def test_ls_calling___decode_file(self):
path = 'test/stat/decode_file'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path),
{'owner': 'pqr', 'path': '/var/abc.sh',
'permissions': 755,
'permissions_text': '-rwxr-xr-x', 'size': 2,
'ts_date': 'Mar 13 06:54',
'ts_epoc': '1394693680',
'type': 'file'})
def test_ls_calling___decode_dir(self):
path = 'test/stat/decode_dir'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path),
{'files':
{'abc': {'permissions_text': 'drwxr-xr-x',
'ts_date': 'Feb 17 15:30',
'ts_epoc': '1392651039',
'owner': 'root', 'path': 'abc',
'size': 2, 'type': 'dir',
'permissions': 555}},
'path': '/var', 'type': 'dir',
'file_count': 1,
'size': 2})
def test_ls_return_none(self):
path = 'test/abc'
self.fs.dev.rpc.file_list = MagicMock()
self.fs.dev.rpc.file_list.find.return_value = 'output'
self.assertEqual(self.fs.ls(path), None)
@patch('jnpr.junos.utils.fs.FS._decode_file')
def test_ls_link_path_false(self, mock_decode_file):
mock_decode_file.get.return_value = False
path = 'test/stat/decode_file'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.fs.ls(path, followlink=False)
mock_decode_file.assert_has_calls([call().get('link')])
def test_ls_brief_true(self):
path = 'test/stat/decode_dir'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path, brief=True),
{'files': ['abc'], 'path': '/var',
'type': 'dir', 'file_count': 1, 'size': 2})
def test_ls_calling___decode_dir_type_symbolic_link(self):
path = 'test/stat/decode_symbolic_link'
self.fs.dev.rpc.file_list = \
MagicMock(side_effect=self._mock_manager)
self.assertEqual(self.fs.ls(path),
{'files':
{'abc': {'permissions_text': 'drwxr-xr-x',
'ts_date': 'Feb 17 15:30',
'link': 'symlink test',
'ts_epoc': '1392651039',
'owner': 'root', 'path': 'abc',
'size': 2, 'type': 'link',
'permissions': 555}},
'path': '/var', 'type': 'dir', 'file_count': 1,
'size': 2})
def test_rm_return_true(self):
self.fs.dev.rpc.file_delete = MagicMock(return_value=True)
path = 'test/abc'
self.assertTrue(self.fs.rm(path))
self.fs.dev.rpc.file_delete.assert_called_once_with(
path='test/abc')
def test_rm_return_false(self):
path = 'test/abc'
self.fs.dev.rpc.file_delete = MagicMock(return_value=False)
self.assertFalse(self.fs.rm(path))
self.fs.dev.rpc.file_delete.assert_called_once_with(
path='test/abc')
def test_copy_return_true(self):
self.fs.dev.rpc.file_copy = MagicMock()
initial = 'test/abc'
final = 'test/xyz'
self.assertTrue(self.fs.cp(initial, final))
self.fs.dev.rpc.file_copy.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_copy_return_false(self):
initial = 'test/abc'
final = 'test/xyz'
self.fs.dev.rpc.file_copy = MagicMock(side_effect=Exception)
self.assertFalse(self.fs.cp(initial, final))
self.fs.dev.rpc.file_copy.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_move_return_true(self):
self.fs.dev.rpc.file_rename = MagicMock(return_value=True)
initial = 'test/abc'
final = 'test/xyz'
self.assertTrue(self.fs.mv(initial, final))
self.fs.dev.rpc.file_rename.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_move_return_false(self):
initial = 'test/abc'
final = 'test/xyz'
self.fs.dev.rpc.file_rename = MagicMock(return_value=False)
self.assertFalse(self.fs.mv(initial, final))
self.fs.dev.rpc.file_rename.assert_called_once_with(
source='test/abc',
destination='test/xyz')
def test_tgz_return_true(self):
src = 'test/tgz.txt'
dst = 'test/xyz'
self.fs.dev.rpc.file_archive = MagicMock(return_value=True)
self.assertTrue(self.fs.tgz(src, dst))
self.fs.dev.rpc.file_archive.assert_called_once_with(
source='test/tgz.txt',
destination='test/xyz', compress=True)
@patch('jnpr.junos.Device.execute')
def test_tgz_return_error(self, mock_execute):
mock_execute.side_effect = self._mock_manager
src = 'test/tgz.txt'
dst = 'test/xyz'
self.assertTrue('testing tgz' in self.fs.tgz(src, dst))
@patch('jnpr.junos.utils.fs.StartShell')
def test_rmdir(self, mock_StartShell):
path = 'test/rmdir'
print(self.fs.rmdir(path))
calls = [
call().__enter__(),
call().__enter__().run('rmdir test/rmdir'),
call().__exit__(None, None, None)]
mock_StartShell.assert_has_calls(calls)
@patch('jnpr.junos.utils.fs.StartShell')
def test_mkdir(self, mock_StartShell):
path = 'test/mkdir'
print(self.fs.mkdir(path))
calls = [
call().__enter__(),
call().__enter__().run('mkdir -p test/mkdir'),
call().__exit__(None, None, None)]
mock_StartShell.assert_has_calls(calls)
@patch('jnpr.junos.utils.fs.StartShell')
def test_symlink(self, mock_StartShell):
src = 'test/tgz.txt'
dst = 'test/xyz'
print(self.fs.symlink(src, dst))
calls = [
call().__enter__(),
call().__enter__().run('ln -sf test/tgz.txt test/xyz'),
call().__exit__(None, None, None)]
mock_StartShell.assert_has_calls(calls)
@patch('jnpr.junos.Device.execute')
def test_storage_usage(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.fs.storage_usage(),
{'/dev/abc':
{'avail_block': 234234,
'used_blocks': 2346455, 'used_pct': '1',
'mount': '/', 'total_blocks': 567431,
'avail': '2F', 'used': '481M',
'total': '4F'}})
@patch('jnpr.junos.Device.execute')
def test_directory_usage(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.fs.directory_usage(path="/var/tmp", depth=1),
{'/var/tmp': {'blocks': 456076, 'bytes': 233510912,
'size': '223M'},
'/var/tmp/gres-tp': {'blocks': 68, 'bytes': 34816,
'size': '34K'},
'/var/tmp/install': {'blocks': 4, 'bytes': 2048,
'size': '2.0K'},
'/var/tmp/pics': {'blocks': 4, 'bytes': 2048,
'size': '2.0K'},
'/var/tmp/rtsdb': {'blocks': 4, 'bytes': 2048,
'size': '2.0K'},
'/var/tmp/sec-download': {'blocks': 8, 'bytes': 4096,
'size': '4.0K'},
'/var/tmp/vi.recover': {'blocks': 4, 'bytes': 2048,
'size': '2.0K'}}
)
@patch('jnpr.junos.Device.execute')
def test_directory_usage_error(self, mock_execute):
mock_execute.return_value = etree.fromstring("""
<directory-usage-information>
<directory>
<used-space used-blocks="456076">
223M
</used-space>
</directory>
</directory-usage-information>""")
self.assertRaises(
RpcError,
self.fs.directory_usage,
path="/var/tmp",
depth=1)
@patch('jnpr.junos.Device.execute')
def test_directory_usage_no_directory(self, mock_execute):
mock_execute.side_effect = self._mock_manager_error1
self.assertRaises(
RpcError,
self.fs.directory_usage,
path="/var/tmp",
depth="1")
@patch('jnpr.junos.Device.execute')
def test_directory_usage_no_dir_name(self, mock_execute):
mock_execute.side_effect = self._mock_manager_error2
self.assertRaises(
RpcError,
self.fs.directory_usage,
path="/var/tmp",
depth="1")
@patch('jnpr.junos.Device.execute')
def test_storage_cleanup(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.fs.storage_cleanup(),
{'/var/abc.txt':
{'ts_date': 'Apr 25 10:38', 'size': 11}})
@patch('jnpr.junos.Device.execute')
def test_storage_cleanup_check(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.assertEqual(self.fs.storage_cleanup_check(),
{'/var/abc.txt':
{'ts_date': 'Apr 25 10:38', 'size': 11}})
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__),
'rpc-reply', fname)
foo = open(fpath).read()
if (fname == 'get-rpc-error.xml' or
fname == 'get-index-error.xml' or
fname == 'get-system-core-dumps.xml'):
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())
elif (fname == 'show-configuration.xml' or
fname == 'show-system-alarms.xml' or
fname == 'set-cli-working-directory.xml'):
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())._NCElement__doc
else:
rpc_reply = NCElement(foo, self.dev._conn._device_handler
.transform_reply())._NCElement__doc[0]
return rpc_reply
def _mock_manager(self, *args, **kwargs):
if kwargs:
# if 'path' in kwargs and 'detail' in kwargs:
# return self._read_file('dir_list_detail.xml')
if 'path' in kwargs:
if kwargs['path'] == 'test/stat/decode_dir':
return self._read_file('file-list_dir.xml')
elif kwargs['path'] == 'test/stat/decode_file':
return self._read_file('file-list_file.xml')
elif kwargs['path'] == 'test/checksum':
return self._read_file('checksum.xml')
elif kwargs['path'] == 'test/stat/decode_symbolic_link':
return self._read_file('file-list_symlink.xml')
if 'directory' in kwargs:
if kwargs['directory'] == 'change/directory':
return self._read_file('set-cli-working-directory.xml')
if 'filename' in kwargs:
if kwargs['filename'] == 'test/cat.txt':
return self._read_file('file-show.xml')
device_params = kwargs['device_params']
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
elif args:
if args[0].tag == 'command':
if args[0].text == 'show cli directory':
return self._read_file('show-cli-directory.xml')
elif args[0].tag == 'get-system-storage':
return self._read_file('get-system-storage.xml')
elif args[0].tag == 'get-directory-usage-information':
return self._read_file('get-directory-usage-information.xml')
elif args[0].tag == 'request-system-storage-cleanup':
return self._read_file('request-system-storage-cleanup.xml')
elif args[0].tag == 'file-archive':
return self._read_file('file-archive.xml')
def _mock_manager_error1(self, *args, **kwargs):
if args:
if args[0].tag == 'get-directory-usage-information':
return self._read_file(
'get-directory-usage-information_error1.xml')
def _mock_manager_error2(self, *args, **kwargs):
if args:
if args[0].tag == 'get-directory-usage-information':
return self._read_file(
'get-directory-usage-information_error2.xml')
|
the-stack_0_19108 | """Tests for Gosper's algorithm for hypergeometric summation. """
from sympy import binomial, factorial, gamma, Poly, S, simplify, sqrt, exp, \
log, Symbol, pi, Rational
from sympy.abc import a, b, j, k, m, n, r, x
from sympy.concrete.gosper import gosper_normal, gosper_sum, gosper_term
def test_gosper_normal():
eq = 4*n + 5, 2*(4*n + 1)*(2*n + 3), n
assert gosper_normal(*eq) == \
(Poly(Rational(1, 4), n), Poly(n + Rational(3, 2)), Poly(n + Rational(1, 4)))
assert gosper_normal(*eq, polys=False) == \
(Rational(1, 4), n + Rational(3, 2), n + Rational(1, 4))
def test_gosper_term():
assert gosper_term((4*k + 1)*factorial(
k)/factorial(2*k + 1), k) == (-k - S.Half)/(k + Rational(1, 4))
def test_gosper_sum():
assert gosper_sum(1, (k, 0, n)) == 1 + n
assert gosper_sum(k, (k, 0, n)) == n*(1 + n)/2
assert gosper_sum(k**2, (k, 0, n)) == n*(1 + n)*(1 + 2*n)/6
assert gosper_sum(k**3, (k, 0, n)) == n**2*(1 + n)**2/4
assert gosper_sum(2**k, (k, 0, n)) == 2*2**n - 1
assert gosper_sum(factorial(k), (k, 0, n)) is None
assert gosper_sum(binomial(n, k), (k, 0, n)) is None
assert gosper_sum(factorial(k)/k**2, (k, 0, n)) is None
assert gosper_sum((k - 3)*factorial(k), (k, 0, n)) is None
assert gosper_sum(k*factorial(k), k) == factorial(k)
assert gosper_sum(
k*factorial(k), (k, 0, n)) == n*factorial(n) + factorial(n) - 1
assert gosper_sum((-1)**k*binomial(n, k), (k, 0, n)) == 0
assert gosper_sum((
-1)**k*binomial(n, k), (k, 0, m)) == -(-1)**m*(m - n)*binomial(n, m)/n
assert gosper_sum((4*k + 1)*factorial(k)/factorial(2*k + 1), (k, 0, n)) == \
(2*factorial(2*n + 1) - factorial(n))/factorial(2*n + 1)
# issue 6033:
assert gosper_sum(
n*(n + a + b)*a**n*b**n/(factorial(n + a)*factorial(n + b)), \
(n, 0, m)).simplify() == -exp(m*log(a) + m*log(b))*gamma(a + 1) \
*gamma(b + 1)/(gamma(a)*gamma(b)*gamma(a + m + 1)*gamma(b + m + 1)) \
+ 1/(gamma(a)*gamma(b))
def test_gosper_sum_indefinite():
assert gosper_sum(k, k) == k*(k - 1)/2
assert gosper_sum(k**2, k) == k*(k - 1)*(2*k - 1)/6
assert gosper_sum(1/(k*(k + 1)), k) == -1/k
assert gosper_sum(-(27*k**4 + 158*k**3 + 430*k**2 + 678*k + 445)*gamma(2*k
+ 4)/(3*(3*k + 7)*gamma(3*k + 6)), k) == \
(3*k + 5)*(k**2 + 2*k + 5)*gamma(2*k + 4)/gamma(3*k + 6)
def test_gosper_sum_parametric():
assert gosper_sum(binomial(S.Half, m - j + 1)*binomial(S.Half, m + j), (j, 1, n)) == \
n*(1 + m - n)*(-1 + 2*m + 2*n)*binomial(S.Half, 1 + m - n)* \
binomial(S.Half, m + n)/(m*(1 + 2*m))
def test_gosper_sum_algebraic():
assert gosper_sum(
n**2 + sqrt(2), (n, 0, m)) == (m + 1)*(2*m**2 + m + 6*sqrt(2))/6
def test_gosper_sum_iterated():
f1 = binomial(2*k, k)/4**k
f2 = (1 + 2*n)*binomial(2*n, n)/4**n
f3 = (1 + 2*n)*(3 + 2*n)*binomial(2*n, n)/(3*4**n)
f4 = (1 + 2*n)*(3 + 2*n)*(5 + 2*n)*binomial(2*n, n)/(15*4**n)
f5 = (1 + 2*n)*(3 + 2*n)*(5 + 2*n)*(7 + 2*n)*binomial(2*n, n)/(105*4**n)
assert gosper_sum(f1, (k, 0, n)) == f2
assert gosper_sum(f2, (n, 0, n)) == f3
assert gosper_sum(f3, (n, 0, n)) == f4
assert gosper_sum(f4, (n, 0, n)) == f5
# the AeqB tests test expressions given in
# www.math.upenn.edu/~wilf/AeqB.pdf
def test_gosper_sum_AeqB_part1():
f1a = n**4
f1b = n**3*2**n
f1c = 1/(n**2 + sqrt(5)*n - 1)
f1d = n**4*4**n/binomial(2*n, n)
f1e = factorial(3*n)/(factorial(n)*factorial(n + 1)*factorial(n + 2)*27**n)
f1f = binomial(2*n, n)**2/((n + 1)*4**(2*n))
f1g = (4*n - 1)*binomial(2*n, n)**2/((2*n - 1)**2*4**(2*n))
f1h = n*factorial(n - S.Half)**2/factorial(n + 1)**2
g1a = m*(m + 1)*(2*m + 1)*(3*m**2 + 3*m - 1)/30
g1b = 26 + 2**(m + 1)*(m**3 - 3*m**2 + 9*m - 13)
g1c = (m + 1)*(m*(m**2 - 7*m + 3)*sqrt(5) - (
3*m**3 - 7*m**2 + 19*m - 6))/(2*m**3*sqrt(5) + m**4 + 5*m**2 - 1)/6
g1d = Rational(-2, 231) + 2*4**m*(m + 1)*(63*m**4 + 112*m**3 + 18*m**2 -
22*m + 3)/(693*binomial(2*m, m))
g1e = Rational(-9, 2) + (81*m**2 + 261*m + 200)*factorial(
3*m + 2)/(40*27**m*factorial(m)*factorial(m + 1)*factorial(m + 2))
g1f = (2*m + 1)**2*binomial(2*m, m)**2/(4**(2*m)*(m + 1))
g1g = -binomial(2*m, m)**2/4**(2*m)
g1h = 4*pi -(2*m + 1)**2*(3*m + 4)*factorial(m - S.Half)**2/factorial(m + 1)**2
g = gosper_sum(f1a, (n, 0, m))
assert g is not None and simplify(g - g1a) == 0
g = gosper_sum(f1b, (n, 0, m))
assert g is not None and simplify(g - g1b) == 0
g = gosper_sum(f1c, (n, 0, m))
assert g is not None and simplify(g - g1c) == 0
g = gosper_sum(f1d, (n, 0, m))
assert g is not None and simplify(g - g1d) == 0
g = gosper_sum(f1e, (n, 0, m))
assert g is not None and simplify(g - g1e) == 0
g = gosper_sum(f1f, (n, 0, m))
assert g is not None and simplify(g - g1f) == 0
g = gosper_sum(f1g, (n, 0, m))
assert g is not None and simplify(g - g1g) == 0
g = gosper_sum(f1h, (n, 0, m))
# need to call rewrite(gamma) here because we have terms involving
# factorial(1/2)
assert g is not None and simplify(g - g1h).rewrite(gamma) == 0
def test_gosper_sum_AeqB_part2():
f2a = n**2*a**n
f2b = (n - r/2)*binomial(r, n)
f2c = factorial(n - 1)**2/(factorial(n - x)*factorial(n + x))
g2a = -a*(a + 1)/(a - 1)**3 + a**(
m + 1)*(a**2*m**2 - 2*a*m**2 + m**2 - 2*a*m + 2*m + a + 1)/(a - 1)**3
g2b = (m - r)*binomial(r, m)/2
ff = factorial(1 - x)*factorial(1 + x)
g2c = 1/ff*(
1 - 1/x**2) + factorial(m)**2/(x**2*factorial(m - x)*factorial(m + x))
g = gosper_sum(f2a, (n, 0, m))
assert g is not None and simplify(g - g2a) == 0
g = gosper_sum(f2b, (n, 0, m))
assert g is not None and simplify(g - g2b) == 0
g = gosper_sum(f2c, (n, 1, m))
assert g is not None and simplify(g - g2c) == 0
def test_gosper_nan():
a = Symbol('a', positive=True)
b = Symbol('b', positive=True)
n = Symbol('n', integer=True)
m = Symbol('m', integer=True)
f2d = n*(n + a + b)*a**n*b**n/(factorial(n + a)*factorial(n + b))
g2d = 1/(factorial(a - 1)*factorial(
b - 1)) - a**(m + 1)*b**(m + 1)/(factorial(a + m)*factorial(b + m))
g = gosper_sum(f2d, (n, 0, m))
assert simplify(g - g2d) == 0
def test_gosper_sum_AeqB_part3():
f3a = 1/n**4
f3b = (6*n + 3)/(4*n**4 + 8*n**3 + 8*n**2 + 4*n + 3)
f3c = 2**n*(n**2 - 2*n - 1)/(n**2*(n + 1)**2)
f3d = n**2*4**n/((n + 1)*(n + 2))
f3e = 2**n/(n + 1)
f3f = 4*(n - 1)*(n**2 - 2*n - 1)/(n**2*(n + 1)**2*(n - 2)**2*(n - 3)**2)
f3g = (n**4 - 14*n**2 - 24*n - 9)*2**n/(n**2*(n + 1)**2*(n + 2)**2*
(n + 3)**2)
# g3a -> no closed form
g3b = m*(m + 2)/(2*m**2 + 4*m + 3)
g3c = 2**m/m**2 - 2
g3d = Rational(2, 3) + 4**(m + 1)*(m - 1)/(m + 2)/3
# g3e -> no closed form
g3f = -(Rational(-1, 16) + 1/((m - 2)**2*(m + 1)**2)) # the AeqB key is wrong
g3g = Rational(-2, 9) + 2**(m + 1)/((m + 1)**2*(m + 3)**2)
g = gosper_sum(f3a, (n, 1, m))
assert g is None
g = gosper_sum(f3b, (n, 1, m))
assert g is not None and simplify(g - g3b) == 0
g = gosper_sum(f3c, (n, 1, m - 1))
assert g is not None and simplify(g - g3c) == 0
g = gosper_sum(f3d, (n, 1, m))
assert g is not None and simplify(g - g3d) == 0
g = gosper_sum(f3e, (n, 0, m - 1))
assert g is None
g = gosper_sum(f3f, (n, 4, m))
assert g is not None and simplify(g - g3f) == 0
g = gosper_sum(f3g, (n, 1, m))
assert g is not None and simplify(g - g3g) == 0
|
the-stack_0_19109 | import unittest
from edg import *
class DomeButtonConnector(FootprintBlock):
@init_in_parent
def __init__(self) -> None:
super().__init__()
self.led_a = self.Port(DigitalSink(
voltage_limits=(0, 15) * Volt, # arbitrary +3v tolerance
current_draw=(0, 10)*mAmp # TODO characterize current draw
))
self.led_k = self.Port(Ground(), [Common]) # TODO should be agnostic to high / low sided drive
self.sw2 = self.Port(Ground(), [Common])
self.sw1 = self.Port(DigitalSingleSource.low_from_supply(self.sw2))
def contents(self) -> None:
super().contents()
self.footprint(
'J', 'Connector_PinHeader_2.54mm:PinHeader_1x04_P2.54mm_Vertical',
{
'1': self.led_a,
'2': self.led_k,
'3': self.sw1,
'4': self.sw2,
},
mfr='Sparkfun', part='COM-09181' # TODO different colors
)
class TestSimon(BoardTop):
def contents(self) -> None:
super().contents()
self.mcu = self.Block(Nucleo_F303k8())
with self.implicit_connect(
ImplicitConnect(self.mcu.pwr_5v, [Power]),
ImplicitConnect(self.mcu.gnd, [Common]),
) as imp:
(self.spk_drv, self.spk), self.spk_chain = self.chain(
self.mcu.new_io(AnalogSource),
imp.Block(Lm4871()),
self.Block(Speaker()))
with self.implicit_connect(
ImplicitConnect(self.mcu.pwr_3v3, [Power]),
ImplicitConnect(self.mcu.gnd, [Common]),
) as imp:
self.rgb = imp.Block(IndicatorSinkRgbLed()) # status RGB
self.rgb_red_net = self.connect(self.mcu.new_io(DigitalBidir), self.rgb.red)
self.rgb_grn_net = self.connect(self.mcu.new_io(DigitalBidir), self.rgb.green)
self.rgb_blue_net = self.connect(self.mcu.new_io(DigitalBidir), self.rgb.blue)
(self.sw, self.sw_pull), self.sw_chain = self.chain(
imp.Block(DigitalSwitch()), imp.Block(PullupResistor(10 * kOhm(tol=0.05))),
self.mcu.new_io(DigitalBidir))
self.btn = ElementDict[DomeButtonConnector]()
self.btn_pull = ElementDict[PullupResistor]()
self.btn_drv = ElementDict[HighSideSwitch]() # TODO move to 12v
self.btn_zeroed_current = ElementDict[HighSideSwitch]() # TODO move to 12v
for i in range(4):
conn = self.btn[i] = imp.Block(DomeButtonConnector())
pull = self.btn_pull[i] = imp.Block(PullupResistor(10 * kOhm(tol=0.05)))
self.connect(pull.io, conn.sw1)
self.pwr = self.Block(Ap3012(output_voltage=12*Volt(tol=0.1)))
self.v3v3 = self.connect(self.mcu.pwr_3v3)
self.v5 = self.connect(self.pwr.pwr_in, self.mcu.pwr_5v)
self.gnd = self.connect(self.pwr.gnd, self.mcu.gnd)
self.v12 = self.connect(self.pwr.pwr_out)
with self.implicit_connect(
ImplicitConnect(self.pwr.pwr_out, [Power]),
ImplicitConnect(self.mcu.gnd, [Common]),
) as imp:
for i in range(4):
driver = self.btn_drv[i] = imp.Block(HighSideSwitch(frequency=(0.1, 10) * kHertz))
if i == 0: # only one draws current, since we assume only one will be lit at any point in time
self.connect(driver.output, self.btn[i].led_a)
else:
(self.btn_zeroed_current[i],), _ = self.chain(
driver.output,
self.Block(ForcedDigitalSinkCurrentDraw((0, 0))),
self.btn[i].led_a)
self.btn_drv0_net = self.connect(self.mcu.new_io(DigitalBidir), self.btn_drv[0].control)
self.btn_sw0_net = self.connect(self.mcu.new_io(DigitalBidir), self.btn[0].sw1)
self.btn_drv1_net = self.connect(self.mcu.new_io(DigitalBidir), self.btn_drv[1].control)
self.btn_sw1_net = self.connect(self.mcu.new_io(DigitalBidir), self.btn[1].sw1)
self.btn_drv2_net = self.connect(self.mcu.new_io(DigitalBidir), self.btn_drv[2].control)
self.btn_sw2_net = self.connect(self.mcu.new_io(DigitalBidir), self.btn[2].sw1)
self.btn_drv3_net = self.connect(self.mcu.new_io(DigitalBidir), self.btn_drv[3].control)
self.btn_sw3_net = self.connect(self.mcu.new_io(DigitalBidir), self.btn[3].sw1)
self.duck = self.Block(DuckLogo())
self.leadfree = self.Block(LeadFreeIndicator())
self.id = self.Block(IdDots4())
def refinements(self) -> Refinements:
return super().refinements() + Refinements(
instance_values=[
(['mcu', 'pin_assigns'], ';'.join([
'spk_chain_0=24',
'rgb_red_net=15',
'rgb_grn_net=14',
'rgb_blue_net=13',
'sw_chain_0=27',
'btn_drv0_net=5',
'btn_sw0_net=6',
'btn_drv1_net=7',
'btn_sw1_net=8',
'btn_drv2_net=9',
'btn_sw2_net=10',
'btn_drv3_net=11',
'btn_sw3_net=12',
]))
]
)
class SimonTestCase(unittest.TestCase):
def test_design(self) -> None:
compile_board_inplace(TestSimon)
|
the-stack_0_19110 | #! /usr/bin/env python
######################
# quality_check.py
######################
# A program to loop through and inspect SWS results, plot the QA result from
# Splitwavepy and print the measured SKS and SKKS splitting from Sheba
# A measure of measure of data quality and discernable discrepancy between results is then assigned
# to each event. This will allow bad results to by removed
#####################################################################################################
# Some Imports
import pandas as pd
import splitwavepy as sw
import obspy
import matplotlib.pyplot as plt
from summary_plot import plotall
import os.path
import SKS_SKKS_qa
import sys
class Inspecter:
def __init__(self, pairs,dir,mode):
self.pairs = pd.read_csv('{}/{}'.format(dir,pairs),delim_whitespace=True,converters={'TIME': lambda x: str(x)})
self.result_path = dir
self.qa_dir = '/Users/ja17375/Shear_Wave_Splitting/Sheba/Runs/Jacks_Split/SplitWavePy'
self.accepted_i = []
self.fs = []
self.qual = []
self.disc = []
self.QA_mode = mode
def write_qa(self,filestem, qual, disc,mode='a+'):
''' Writes the event filestem and the quality/discrepancy rating to a textfile'''
with open('{}/Inspection_Results.txt'.format(self.result_path),mode) as outfile:
outfile.write('{} {} {}\n'.format(filestem,qual,disc) )
def run_inspection(self):
if os.path.isfile('{}/Inspection_Results.txt'.format(self.result_path)):
chk = input('Inspection_Results.txt already exists, [o]verwrite or [c]ontinue? \n >')
if chk is 'c':
with open('{}/Inspection_Results.txt'.format(self.result_path)) as reader:
head= reader.readline()
for line in reader.readlines():
self.fs.append(line.split(' ')[0])
self.qual.append(line.split(' ')[1])
self.disc.append(' '.join(line.split(' ')[2:]))
elif chk is 'o':
#Over write
self.write_qa('Filestem', 'Data Quality', 'Discrepancy',mode='w+')
self.loop_thru_pairs()
else:
print('Initialise Inspection_Results.txt')
self.write_qa('Filestem', 'Data Quality', 'Discrepancy',mode='w+')
self.loop_thru_pairs()
def loop_thru_pairs(self):
'''Loops through the pairs and checks if there is a set of QA results'''
# Write HEaderline to outfile
for i,row in self.pairs.iterrows():
filestem = '{}_{}_{}'.format(row.STAT,row.DATE,row.TIME)
if i < len(self.fs):
if filestem == self.fs[i]:
print("Event {} already inspected. Qual {} Disc {}".format(self.fs[i],self.qual[i],self.disc[i]))
qual = self.qual[i]
disc = self
else:
print('Event {} Date: {} Time: {}, Stat: {} SNR_SKS: {} SNR_SKKS: {}'.format(i,row.DATE,row.TIME,row.STAT,row.SNR_SKS,row.SNR_SKKS))
print(self.QA_mode)
if row.SNR_SKS <= 2 or row.SNR_SKKS <= 2:
#Test to see if Signal-to-Noise is too high
print('SNR for SKS or SKKS less than 2, auto-reject')
qual = 'p'
disc = 'SNR <=2'
else:
print('Pass SNR')
if self.QA_mode == 'man':
# Option for more rigerous (and time consuming manual inspection of all remaining pairs)
sr = (row.FAST_SKS,row.DFAST_SKS,row.TLAG_SKS,row.DTLAG_SKS,row.FAST_SKKS,row.DFAST_SKKS,row.TLAG_SKKS,row.DTLAG_SKKS)
print('SHEBA RESULTS: SKS: phi = {} +/- {} dt = {} +/- {}. SKKS: phi = {} +/- {}, dt = {} +/- {}'.format(sr[0],sr[1],sr[2],sr[3],sr[4],sr[5],sr[6],sr[7]))
#Test if QA results (from SWP) exist
if (os.path.isfile('{}/{}_sks.eigm'.format(self.qa_dir,filestem)) and os.path.isfile('{}/{}_skks.xcrm'.format(self.qa_dir,filestem))) is False:
print('QA results do not exist, generating')
SKS_SKKS_qa.measure_sks_skks(filestem,self.qa_dir,[row.WBEG_SKS,row.WEND_SKS,row.WBEG_SKKS,row.WEND_SKKS])
# Now plot the TransM,Xcross and Eigm results
# fig = plt.figure(figsize=(15,10))
qual,disc = plotall('{}/{}'.format(self.qa_dir,filestem))
else:
print('Hello')
qual = 'i' # i for initial test pass
disc = '-' # Cannot search for discrepnacy just using SNR test. Build in stack test here maybe ??
self.write_qa(filestem,qual,disc)
if 'p' in qual:
# I.E. if the data quality is good (or OK)
print('Event rejected')
else:
self.accepted_i.append(i)
print('Event accepted')
#Slice the rows which correspond to good data and wirte them to a new pairs file
print('{} accepted'.format(len(self.accepted_i)))
accepted_pairs = self.pairs.iloc[self.accepted_i,:]
#print(accepted_pairs)
accepted_pairs.to_csv('{}/accepted_SKS_SKKS.pairs'.format(self.result_path),sep=' ',index=False,mode='a+')
if __name__ == '__main__' :
print('Hello World, this is quality_check.py. You are running me from the command line')
pair_file = sys.argv[1] # The pairs file we want to run the insplection for
results_dir = sys.argv[2] # Results directory the pairs file sits in
md = sys.argv[3] # Mode of inspection either 'snr' for just singal to noise test or 'man' for manual inspection
if len(sys.argv) is 1:
print('No inputs detected. Please input them now')
pair_file = input('Input pair filename \n >')
Review = Inspecter(pair_file,results_dir,md)
Review.run_inspection()
|
the-stack_0_19115 | # Electrum - lightweight Bitcoin client
# Copyright (C) 2012 [email protected]
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import threading
from typing import Optional, Dict, Mapping, Sequence
from . import util
from .bitcoin import hash_encode, int_to_hex, rev_hex
from .crypto import sha256d
from . import constants
from .constants import CHUNK_LEN
from .util import bfh, bh2u
from .simple_config import SimpleConfig
from .logging import get_logger, Logger
try:
import scrypt
getPoWHash = lambda x: scrypt.hash(x, x, N=1024, r=1, p=1, buflen=32)
except ImportError:
util.print_msg("Warning: package scrypt not available; synchronization could be very slow")
from .scrypt import scrypt_1024_1_1_80 as getPoWHash
_logger = get_logger(__name__)
HEADER_SIZE = 80 # bytes
MAX_TARGET = 0x00000FFFFF000000000000000000000000000000000000000000000000000000
class MissingHeader(Exception):
pass
class InvalidHeader(Exception):
pass
def serialize_header(header_dict: dict) -> str:
s = int_to_hex(header_dict['version'], 4) \
+ rev_hex(header_dict['prev_block_hash']) \
+ rev_hex(header_dict['merkle_root']) \
+ int_to_hex(int(header_dict['timestamp']), 4) \
+ int_to_hex(int(header_dict['bits']), 4) \
+ int_to_hex(int(header_dict['nonce']), 4)
return s
def deserialize_header(s: bytes, height: int) -> dict:
if not s:
raise InvalidHeader('Invalid header: {}'.format(s))
if len(s) != HEADER_SIZE:
raise InvalidHeader('Invalid header length: {}'.format(len(s)))
hex_to_int = lambda s: int.from_bytes(s, byteorder='little')
h = {}
h['version'] = hex_to_int(s[0:4])
h['prev_block_hash'] = hash_encode(s[4:36])
h['merkle_root'] = hash_encode(s[36:68])
h['timestamp'] = hex_to_int(s[68:72])
h['bits'] = hex_to_int(s[72:76])
h['nonce'] = hex_to_int(s[76:80])
h['block_height'] = height
return h
def hash_header(header: dict) -> str:
if header is None:
return '0' * 64
if header.get('prev_block_hash') is None:
header['prev_block_hash'] = '00'*32
return hash_raw_header(serialize_header(header))
def hash_raw_header(header: str) -> str:
return hash_encode(sha256d(bfh(header)))
def pow_hash_header(header):
return hash_encode(getPoWHash(bfh(serialize_header(header))))
# key: blockhash hex at forkpoint
# the chain at some key is the best chain that includes the given hash
blockchains = {} # type: Dict[str, Blockchain]
blockchains_lock = threading.RLock()
def read_blockchains(config: 'SimpleConfig'):
best_chain = Blockchain(config=config,
forkpoint=0,
parent=None,
forkpoint_hash=constants.net.GENESIS,
prev_hash=None)
blockchains[constants.net.GENESIS] = best_chain
# consistency checks
if best_chain.height() > constants.net.max_checkpoint():
header_after_cp = best_chain.read_header(constants.net.max_checkpoint()+1)
if not header_after_cp or not best_chain.can_connect(header_after_cp, check_height=False):
_logger.info("[blockchain] deleting best chain. cannot connect header after last cp to last cp.")
os.unlink(best_chain.path())
best_chain.update_size()
# forks
fdir = os.path.join(util.get_headers_dir(config), 'forks')
util.make_dir(fdir)
# files are named as: fork2_{forkpoint}_{prev_hash}_{first_hash}
l = filter(lambda x: x.startswith('fork2_') and '.' not in x, os.listdir(fdir))
l = sorted(l, key=lambda x: int(x.split('_')[1])) # sort by forkpoint
def delete_chain(filename, reason):
_logger.info(f"[blockchain] deleting chain {filename}: {reason}")
os.unlink(os.path.join(fdir, filename))
def instantiate_chain(filename):
__, forkpoint, prev_hash, first_hash = filename.split('_')
forkpoint = int(forkpoint)
prev_hash = (64-len(prev_hash)) * "0" + prev_hash # left-pad with zeroes
first_hash = (64-len(first_hash)) * "0" + first_hash
# forks below the max checkpoint are not allowed
if forkpoint <= constants.net.max_checkpoint():
delete_chain(filename, "deleting fork below max checkpoint")
return
# find parent (sorting by forkpoint guarantees it's already instantiated)
for parent in blockchains.values():
if parent.check_hash(forkpoint - 1, prev_hash):
break
else:
delete_chain(filename, "cannot find parent for chain")
return
b = Blockchain(config=config,
forkpoint=forkpoint,
parent=parent,
forkpoint_hash=first_hash,
prev_hash=prev_hash)
# consistency checks
h = b.read_header(b.forkpoint)
if first_hash != hash_header(h):
delete_chain(filename, "incorrect first hash for chain")
return
if not b.parent.can_connect(h, check_height=False):
delete_chain(filename, "cannot connect chain to parent")
return
chain_id = b.get_id()
assert first_hash == chain_id, (first_hash, chain_id)
blockchains[chain_id] = b
for filename in l:
instantiate_chain(filename)
def get_best_chain() -> 'Blockchain':
return blockchains[constants.net.GENESIS]
# block hash -> chain work; up to and including that block
_CHAINWORK_CACHE = {
"0000000000000000000000000000000000000000000000000000000000000000": 0, # virtual block at height -1
} # type: Dict[str, int]
class Blockchain(Logger):
"""
Manages blockchain headers and their verification
"""
def __init__(self, config: SimpleConfig, forkpoint: int, parent: Optional['Blockchain'],
forkpoint_hash: str, prev_hash: Optional[str]):
assert isinstance(forkpoint_hash, str) and len(forkpoint_hash) == 64, forkpoint_hash
assert (prev_hash is None) or (isinstance(prev_hash, str) and len(prev_hash) == 64), prev_hash
# assert (parent is None) == (forkpoint == 0)
if 0 < forkpoint <= constants.net.max_checkpoint():
raise Exception(f"cannot fork below max checkpoint. forkpoint: {forkpoint}")
Logger.__init__(self)
self.config = config
self.forkpoint = forkpoint # height of first header
self.parent = parent
self._forkpoint_hash = forkpoint_hash # blockhash at forkpoint. "first hash"
self._prev_hash = prev_hash # blockhash immediately before forkpoint
self.lock = threading.RLock()
self.update_size()
def with_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.lock:
return func(self, *args, **kwargs)
return func_wrapper
@property
def checkpoints(self):
return constants.net.CHECKPOINTS
def get_max_child(self) -> Optional[int]:
children = self.get_direct_children()
return max([x.forkpoint for x in children]) if children else None
def get_max_forkpoint(self) -> int:
"""Returns the max height where there is a fork
related to this chain.
"""
mc = self.get_max_child()
return mc if mc is not None else self.forkpoint
def get_direct_children(self) -> Sequence['Blockchain']:
with blockchains_lock:
return list(filter(lambda y: y.parent==self, blockchains.values()))
def get_parent_heights(self) -> Mapping['Blockchain', int]:
"""Returns map: (parent chain -> height of last common block)"""
with blockchains_lock:
result = {self: self.height()}
chain = self
while True:
parent = chain.parent
if parent is None: break
result[parent] = chain.forkpoint - 1
chain = parent
return result
def get_height_of_last_common_block_with_chain(self, other_chain: 'Blockchain') -> int:
last_common_block_height = 0
our_parents = self.get_parent_heights()
their_parents = other_chain.get_parent_heights()
for chain in our_parents:
if chain in their_parents:
h = min(our_parents[chain], their_parents[chain])
last_common_block_height = max(last_common_block_height, h)
return last_common_block_height
@with_lock
def get_branch_size(self) -> int:
return self.height() - self.get_max_forkpoint() + 1
def get_name(self) -> str:
return self.get_hash(self.get_max_forkpoint()).lstrip('0')[0:10]
def check_header(self, header: dict) -> bool:
header_hash = hash_header(header)
height = header.get('block_height')
return self.check_hash(height, header_hash)
def check_hash(self, height: int, header_hash: str) -> bool:
"""Returns whether the hash of the block at given height
is the given hash.
"""
assert isinstance(header_hash, str) and len(header_hash) == 64, header_hash # hex
try:
return header_hash == self.get_hash(height)
except Exception:
return False
def fork(parent, header: dict) -> 'Blockchain':
if not parent.can_connect(header, check_height=False):
raise Exception("forking header does not connect to parent chain")
forkpoint = header.get('block_height')
self = Blockchain(config=parent.config,
forkpoint=forkpoint,
parent=parent,
forkpoint_hash=hash_header(header),
prev_hash=parent.get_hash(forkpoint-1))
open(self.path(), 'w+').close()
self.save_header(header)
# put into global dict. note that in some cases
# save_header might have already put it there but that's OK
chain_id = self.get_id()
with blockchains_lock:
blockchains[chain_id] = self
return self
@with_lock
def height(self) -> int:
return self.forkpoint + self.size() - 1
@with_lock
def size(self) -> int:
return self._size
@with_lock
def update_size(self) -> None:
p = self.path()
self._size = os.path.getsize(p)//HEADER_SIZE if os.path.exists(p) else 0
@classmethod
def verify_header(cls, header: dict, prev_hash: str, target: int, expected_header_hash: str=None) -> None:
_hash = hash_header(header)
_powhash = pow_hash_header(header)
if expected_header_hash and expected_header_hash != _hash:
raise Exception("hash mismatches with expected: {} vs {}".format(expected_header_hash, _hash))
if prev_hash != header.get('prev_block_hash'):
raise Exception("prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash')))
if constants.net.TESTNET:
return
#bits = cls.target_to_bits(target)
#if bits != header.get('bits'):
# raise Exception("bits mismatch: %s vs %s" % (bits, header.get('bits')))
#block_hash_as_num = int.from_bytes(bfh(_powhash), byteorder='big')
#if block_hash_as_num > target:
# raise Exception(f"insufficient proof of work: {block_hash_as_num} vs target {target}")
def verify_chunk(self, index: int, data: bytes) -> None:
num = len(data) // HEADER_SIZE
start_height = index * CHUNK_LEN
prev_hash = self.get_hash(start_height - 1)
target = self.get_target(index-1)
for i in range(num):
height = start_height + i
try:
expected_header_hash = self.get_hash(height)
except MissingHeader:
expected_header_hash = None
raw_header = data[i*HEADER_SIZE : (i+1)*HEADER_SIZE]
header = deserialize_header(raw_header, index*CHUNK_LEN + i)
self.verify_header(header, prev_hash, target, expected_header_hash)
prev_hash = hash_header(header)
@with_lock
def path(self):
d = util.get_headers_dir(self.config)
if self.parent is None:
filename = 'blockchain_headers'
else:
assert self.forkpoint > 0, self.forkpoint
prev_hash = self._prev_hash.lstrip('0')
first_hash = self._forkpoint_hash.lstrip('0')
basename = f'fork2_{self.forkpoint}_{prev_hash}_{first_hash}'
filename = os.path.join('forks', basename)
return os.path.join(d, filename)
@with_lock
def save_chunk(self, index: int, chunk: bytes):
assert index >= 0, index
chunk_within_checkpoint_region = index < len(self.checkpoints)
# chunks in checkpoint region are the responsibility of the 'main chain'
if chunk_within_checkpoint_region and self.parent is not None:
main_chain = get_best_chain()
main_chain.save_chunk(index, chunk)
return
delta_height = (index * CHUNK_LEN - self.forkpoint)
delta_bytes = delta_height * HEADER_SIZE
# if this chunk contains our forkpoint, only save the part after forkpoint
# (the part before is the responsibility of the parent)
if delta_bytes < 0:
chunk = chunk[-delta_bytes:]
delta_bytes = 0
truncate = not chunk_within_checkpoint_region
self.write(chunk, delta_bytes, truncate)
self.swap_with_parent()
def swap_with_parent(self) -> None:
with self.lock, blockchains_lock:
# do the swap; possibly multiple ones
cnt = 0
while True:
old_parent = self.parent
if not self._swap_with_parent():
break
# make sure we are making progress
cnt += 1
if cnt > len(blockchains):
raise Exception(f'swapping fork with parent too many times: {cnt}')
# we might have become the parent of some of our former siblings
for old_sibling in old_parent.get_direct_children():
if self.check_hash(old_sibling.forkpoint - 1, old_sibling._prev_hash):
old_sibling.parent = self
def _swap_with_parent(self) -> bool:
"""Check if this chain became stronger than its parent, and swap
the underlying files if so. The Blockchain instances will keep
'containing' the same headers, but their ids change and so
they will be stored in different files."""
if self.parent is None:
return False
if self.height() >= self.height():
return False
self.logger.info(f"swapping {self.forkpoint} {self.parent.forkpoint}")
parent_branch_size = self.parent.height() - self.forkpoint + 1
forkpoint = self.forkpoint # type: Optional[int]
parent = self.parent # type: Optional[Blockchain]
child_old_id = self.get_id()
parent_old_id = parent.get_id()
# swap files
# child takes parent's name
# parent's new name will be something new (not child's old name)
self.assert_headers_file_available(self.path())
child_old_name = self.path()
with open(self.path(), 'rb') as f:
my_data = f.read()
self.assert_headers_file_available(parent.path())
assert forkpoint > parent.forkpoint, (f"forkpoint of parent chain ({parent.forkpoint}) "
f"should be at lower height than children's ({forkpoint})")
with open(parent.path(), 'rb') as f:
f.seek((forkpoint - parent.forkpoint)*HEADER_SIZE)
parent_data = f.read(parent_branch_size*HEADER_SIZE)
self.write(parent_data, 0)
parent.write(my_data, (forkpoint - parent.forkpoint)*HEADER_SIZE)
# swap parameters
self.parent, parent.parent = parent.parent, self # type: Optional[Blockchain], Optional[Blockchain]
self.forkpoint, parent.forkpoint = parent.forkpoint, self.forkpoint
self._forkpoint_hash, parent._forkpoint_hash = parent._forkpoint_hash, hash_raw_header(bh2u(parent_data[:HEADER_SIZE]))
self._prev_hash, parent._prev_hash = parent._prev_hash, self._prev_hash
# parent's new name
os.replace(child_old_name, parent.path())
self.update_size()
parent.update_size()
# update pointers
blockchains.pop(child_old_id, None)
blockchains.pop(parent_old_id, None)
blockchains[self.get_id()] = self
blockchains[parent.get_id()] = parent
return True
def get_id(self) -> str:
return self._forkpoint_hash
def assert_headers_file_available(self, path):
if os.path.exists(path):
return
elif not os.path.exists(util.get_headers_dir(self.config)):
raise FileNotFoundError('Electrum headers_dir does not exist. Was it deleted while running?')
else:
raise FileNotFoundError('Cannot find headers file but headers_dir is there. Should be at {}'.format(path))
@with_lock
def write(self, data: bytes, offset: int, truncate: bool=True) -> None:
filename = self.path()
self.assert_headers_file_available(filename)
with open(filename, 'rb+') as f:
if truncate and offset != self._size * HEADER_SIZE:
f.seek(offset)
f.truncate()
f.seek(offset)
f.write(data)
f.flush()
os.fsync(f.fileno())
self.update_size()
@with_lock
def save_header(self, header: dict) -> None:
delta = header.get('block_height') - self.forkpoint
data = bfh(serialize_header(header))
# headers are only _appended_ to the end:
assert delta == self.size(), (delta, self.size())
assert len(data) == HEADER_SIZE
self.write(data, delta*HEADER_SIZE)
self.swap_with_parent()
@with_lock
def read_header(self, height: int) -> Optional[dict]:
if height < 0:
return
if height < self.forkpoint:
return self.parent.read_header(height)
if height > self.height():
return
delta = height - self.forkpoint
name = self.path()
self.assert_headers_file_available(name)
with open(name, 'rb') as f:
f.seek(delta * HEADER_SIZE)
h = f.read(HEADER_SIZE)
if len(h) < HEADER_SIZE:
raise Exception('Expected to read a full header. This was only {} bytes'.format(len(h)))
if h == bytes([0])*HEADER_SIZE:
return None
return deserialize_header(h, height)
def header_at_tip(self) -> Optional[dict]:
"""Return latest header."""
height = self.height()
return self.read_header(height)
def get_hash(self, height: int) -> str:
def is_height_checkpoint():
within_cp_range = height <= constants.net.max_checkpoint()
at_chunk_boundary = (height+1) % CHUNK_LEN == 0
return within_cp_range and at_chunk_boundary
if height == -1:
return '0000000000000000000000000000000000000000000000000000000000000000'
elif height == 0:
return constants.net.GENESIS
elif is_height_checkpoint():
index = height // CHUNK_LEN
h, t, _ = self.checkpoints[index]
return h
else:
header = self.read_header(height)
if header is None:
raise MissingHeader(height)
return hash_header(header)
def get_timestamp(self, height):
if height < len(self.checkpoints) * CHUNK_LEN and (height+1) % CHUNK_LEN == 0:
index = height // CHUNK_LEN
_, _, ts = self.checkpoints[index]
return ts
return self.read_header(height).get('timestamp')
def get_target(self, index: int) -> int:
# compute target from chunk x, used in chunk x+1
if constants.net.TESTNET:
return 0
if index == -1:
return 0x00000FFFF0000000000000000000000000000000000000000000000000000000
if index < len(self.checkpoints):
h, t, _ = self.checkpoints[index]
return t
# new target
# Sumcoin: go back the full period unless it's the first retarget
first_timestamp = self.get_timestamp(index * CHUNK_LEN - 1 if index > 0 else 0)
last = self.read_header(index * CHUNK_LEN + CHUNK_LEN - 1)
if not first_timestamp or not last:
raise MissingHeader()
bits = last.get('bits')
target = self.bits_to_target(bits)
nActualTimespan = last.get('timestamp') - first_timestamp
nTargetTimespan = 84 * 60 * 60
nActualTimespan = max(nActualTimespan, nTargetTimespan // 4)
nActualTimespan = min(nActualTimespan, nTargetTimespan * 4)
new_target = min(MAX_TARGET, (target * nActualTimespan) // nTargetTimespan)
# not any target can be represented in 32 bits:
new_target = self.bits_to_target(self.target_to_bits(new_target))
return new_target
@classmethod
def bits_to_target(cls, bits: int) -> int:
bitsN = (bits >> 24) & 0xff
if not (0x03 <= bitsN <= 0x1e):
raise Exception("First part of bits should be in [0x03, 0x1e]")
bitsBase = bits & 0xffffff
if not (0x8000 <= bitsBase <= 0x7fffff):
raise Exception("Second part of bits should be in [0x8000, 0x7fffff]")
return bitsBase << (8 * (bitsN-3))
@classmethod
def target_to_bits(cls, target: int) -> int:
c = ("%064x" % target)[2:]
while c[:2] == '00' and len(c) > 6:
c = c[2:]
bitsN, bitsBase = len(c) // 2, int.from_bytes(bfh(c[:6]), byteorder='big')
if bitsBase >= 0x800000:
bitsN += 1
bitsBase >>= 8
return bitsN << 24 | bitsBase
def chainwork_of_header_at_height(self, height: int) -> int:
"""work done by single header at given height"""
chunk_idx = height // CHUNK_LEN - 1
target = self.get_target(chunk_idx)
work = ((2 ** 256 - target - 1) // (target + 1)) + 1
return work
@with_lock
def get_chainwork(self, height=None) -> int:
if height is None:
height = max(0, self.height())
if constants.net.TESTNET:
# On testnet/regtest, difficulty works somewhat different.
# It's out of scope to properly implement that.
return height
last_retarget = height // CHUNK_LEN * CHUNK_LEN - 1
cached_height = last_retarget
while _CHAINWORK_CACHE.get(self.get_hash(cached_height)) is None:
if cached_height <= -1:
break
cached_height -= CHUNK_LEN
assert cached_height >= -1, cached_height
running_total = _CHAINWORK_CACHE[self.get_hash(cached_height)]
while cached_height < last_retarget:
cached_height += CHUNK_LEN
work_in_single_header = self.chainwork_of_header_at_height(cached_height)
work_in_chunk = CHUNK_LEN * work_in_single_header
running_total += work_in_chunk
_CHAINWORK_CACHE[self.get_hash(cached_height)] = running_total
cached_height += CHUNK_LEN
work_in_single_header = self.chainwork_of_header_at_height(cached_height)
work_in_last_partial_chunk = (height % CHUNK_LEN + 1) * work_in_single_header
return running_total + work_in_last_partial_chunk
def can_connect(self, header: dict, check_height: bool=True) -> bool:
if header is None:
return False
height = header['block_height']
if check_height and self.height() != height - 1:
return False
if height == 0:
return hash_header(header) == constants.net.GENESIS
try:
prev_hash = self.get_hash(height - 1)
except:
return False
if prev_hash != header.get('prev_block_hash'):
return False
try:
target = self.get_target(height // CHUNK_LEN - 1)
except MissingHeader:
return False
try:
self.verify_header(header, prev_hash, target)
except BaseException as e:
return False
return True
def connect_chunk(self, idx: int, hexdata: str) -> bool:
assert idx >= 0, idx
try:
data = bfh(hexdata)
self.verify_chunk(idx, data)
self.save_chunk(idx, data)
return True
except BaseException as e:
self.logger.info(f'verify_chunk idx {idx} failed: {repr(e)}')
return False
def get_checkpoints(self):
# for each chunk, store the hash of the last block and the target after the chunk
cp = []
n = self.height() // CHUNK_LEN
for index in range(n):
h = self.get_hash((index+1) * CHUNK_LEN -1)
target = self.get_target(index)
# Sumcoin: also store the timestamp of the last block
tstamp = self.get_timestamp((index+1) * CHUNK_LEN - 1)
cp.append((h, target, tstamp))
return cp
def check_header(header: dict) -> Optional[Blockchain]:
if type(header) is not dict:
return None
with blockchains_lock: chains = list(blockchains.values())
for b in chains:
if b.check_header(header):
return b
return None
def can_connect(header: dict) -> Optional[Blockchain]:
with blockchains_lock: chains = list(blockchains.values())
for b in chains:
if b.can_connect(header):
return b
return None
|
the-stack_0_19117 | import numpy as np
import matplotlib.pyplot as mtp
def Runge4(x,y,func,h):
w1=h*func(x,y)
w2=h*func(x+0.5*h,y+0.5*h*w1)
w3=h*func(x+0.5*h,y+0.5*h*w2)
w4=h*func(x+h,y+h*w3)
y=y+(w1+2*w2+2*w3+w4)/6
return y
def sperical(x,y,a=1e-4,rhos=1.1,rhof=1.0,g=981.0,u=3.5e-2):
y1=y[0]
y2=(rhos-rhof)*g/rhos-(9/2)*u*y[1]/(a**2)/rhos
return np.array([y1,y2])
####
TimeBegin=0.0;TimeFinal=0.001
y01=0.0;y02=0.0
y=[y01,y02]
h=eval(input('Masukkan Nilai h = '))
ti=[];y1=[];y2=[]
##
while TimeBegin<TimeFinal:
y=Runge4(TimeBegin,y,sperical,h)
TimeBegin+=h
ti.append(TimeBegin)
y1.append(y[0])
y2.append(y[1])
panjang=len(ti)
##Ploting
mtp.subplot(1,2,1)
mtp.plot(ti,y1,'k')
mtp.ylabel('z (um)')
mtp.xlabel('time(s)')
mtp.subplot(1,2,2)
mtp.plot(ti,y2,'k')
mtp.ylabel('v (um/s)')
mtp.xlabel('time(s)')
mtp.show() |
the-stack_0_19118 | # -*- coding: utf-8 -*-
from random import randint
from functools import partial
import json as jsonOut
from gluon import current
from database import Dal, RawDal, num_queries
def plaintext():
current.response.headers['Content-Type'] = 'text/plain'
return 'Hello, World!'
def json():
current.response.headers['Content-Type'] = 'application/json'
return jsonOut.dumps({'message': 'Hello, World!'})
def db():
current.response.headers['Content-Type']='application/json'
return jsonOut.dumps(Dal('World').get_world(randint(1, 10000)))
def queries():
current.response.headers['Content-Type']='application/json'
db = RawDal() if current.optimized else Dal('World')
get_world = db.get_world
r10k = partial(randint, 1, 10000)
worlds = [get_world(r10k()) for _ in
xrange(num_queries(current.request.vars.queries))]
return jsonOut.dumps(worlds)
def updates():
current.response.headers['Content-Type']='application/json'
db = RawDal() if current.optimized else Dal('World')
get_world = db.get_world
update_world = db.update_world
r10k = partial(randint, 1, 10000)
worlds = []
for wid in (r10k() for _ in xrange(num_queries(current.request.vars.queries))):
world = get_world(wid)
newNumber = r10k()
world['randomNumber'] = newNumber
worlds.append(world)
update_world(wid, newNumber)
return jsonOut.dumps(worlds)
def fortune():
new_message = {'id': 0, 'message': 'Additional fortune added at request time.'}
db = RawDal() if current.optimized else Dal('Fortune')
fortunes = db.get_fortunes(new_message=new_message)
return current.response.render('fortune.html', fortunes=fortunes)
|
the-stack_0_19119 | # qubit number=3
# total number=68
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.y(input_qubit[2]) # number=56
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class380.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_0_19120 | from collections import OrderedDict
import pytest
from django.conf.urls import include, url
from django.db import models
from django.test import TestCase, override_settings
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.routers import SimpleRouter
from rest_framework.test import APIRequestFactory
from rest_framework.viewsets import GenericViewSet
factory = APIRequestFactory()
class BasicViewSet(GenericViewSet):
def list(self, request, *args, **kwargs):
return Response({'ACTION': 'LIST'})
class InstanceViewSet(GenericViewSet):
def dispatch(self, request, *args, **kwargs):
return self.dummy(request, *args, **kwargs)
def dummy(self, request, *args, **kwargs):
return Response({'view': self})
class Action(models.Model):
pass
class ActionViewSet(GenericViewSet):
queryset = Action.objects.all()
def list(self, request, *args, **kwargs):
response = Response()
response.view = self
return response
def retrieve(self, request, *args, **kwargs):
return Response()
@action(detail=False)
def list_action(self, request, *args, **kwargs):
response = Response()
response.view = self
return response
@action(detail=False, url_name='list-custom')
def custom_list_action(self, request, *args, **kwargs):
raise NotImplementedError
@action(detail=True)
def detail_action(self, request, *args, **kwargs):
raise NotImplementedError
@action(detail=True, url_name='detail-custom')
def custom_detail_action(self, request, *args, **kwargs):
raise NotImplementedError
@action(detail=True, url_path=r'unresolvable/(?P<arg>\w+)', url_name='unresolvable')
def unresolvable_detail_action(self, request, *args, **kwargs):
raise NotImplementedError
class ActionNamesViewSet(GenericViewSet):
def retrieve(self, request, *args, **kwargs):
return Response()
@action(detail=True)
def unnamed_action(self, request, *args, **kwargs):
raise NotImplementedError
@action(detail=True, name='Custom Name')
def named_action(self, request, *args, **kwargs):
raise NotImplementedError
@action(detail=True, suffix='Custom Suffix')
def suffixed_action(self, request, *args, **kwargs):
raise NotImplementedError
class ThingWithMapping:
def __init__(self):
self.mapping = {}
class ActionViewSetWithMapping(ActionViewSet):
mapper = ThingWithMapping()
router = SimpleRouter()
router.register(r'actions', ActionViewSet)
router.register(r'actions-alt', ActionViewSet, basename='actions-alt')
router.register(r'names', ActionNamesViewSet, basename='names')
router.register(r'mapping', ActionViewSetWithMapping, basename='mapping')
urlpatterns = [
url(r'^api/', include(router.urls)),
]
class InitializeViewSetsTestCase(TestCase):
def test_initialize_view_set_with_actions(self):
request = factory.get('/', '', content_type='application/json')
my_view = BasicViewSet.as_view(actions={
'get': 'list',
})
response = my_view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {'ACTION': 'LIST'}
def test_head_request_against_viewset(self):
request = factory.head('/', '', content_type='application/json')
my_view = BasicViewSet.as_view(actions={
'get': 'list',
})
response = my_view(request)
assert response.status_code == status.HTTP_200_OK
def test_initialize_view_set_with_empty_actions(self):
with pytest.raises(TypeError) as excinfo:
BasicViewSet.as_view()
assert str(excinfo.value) == (
"The `actions` argument must be provided "
"when calling `.as_view()` on a ViewSet. "
"For example `.as_view({'get': 'list'})`")
def test_initialize_view_set_with_both_name_and_suffix(self):
with pytest.raises(TypeError) as excinfo:
BasicViewSet.as_view(name='', suffix='', actions={
'get': 'list',
})
assert str(excinfo.value) == (
"BasicViewSet() received both `name` and `suffix`, "
"which are mutually exclusive arguments.")
def test_args_kwargs_request_action_map_on_self(self):
"""
Test a view only has args, kwargs, request, action_map
once `as_view` has been called.
"""
bare_view = InstanceViewSet()
view = InstanceViewSet.as_view(actions={
'get': 'dummy',
})(factory.get('/')).data['view']
for attribute in ('args', 'kwargs', 'request', 'action_map'):
self.assertNotIn(attribute, dir(bare_view))
self.assertIn(attribute, dir(view))
def test_viewset_action_attr(self):
view = ActionViewSet.as_view(actions={'get': 'list'})
get = view(factory.get('/'))
head = view(factory.head('/'))
assert get.view.action == 'list'
assert head.view.action == 'list'
def test_viewset_action_attr_for_extra_action(self):
view = ActionViewSet.as_view(actions=dict(ActionViewSet.list_action.mapping))
get = view(factory.get('/'))
head = view(factory.head('/'))
assert get.view.action == 'list_action'
assert head.view.action == 'list_action'
class GetExtraActionsTests(TestCase):
def test_extra_actions(self):
view = ActionViewSet()
actual = [action.__name__ for action in view.get_extra_actions()]
expected = [
'custom_detail_action',
'custom_list_action',
'detail_action',
'list_action',
'unresolvable_detail_action',
]
self.assertEqual(actual, expected)
def test_should_only_return_decorated_methods(self):
view = ActionViewSetWithMapping()
actual = [action.__name__ for action in view.get_extra_actions()]
expected = [
'custom_detail_action',
'custom_list_action',
'detail_action',
'list_action',
'unresolvable_detail_action',
]
self.assertEqual(actual, expected)
@override_settings(ROOT_URLCONF='tests.test_viewsets')
class GetExtraActionUrlMapTests(TestCase):
def test_list_view(self):
response = self.client.get('/api/actions/')
view = response.renderer_context['view']
expected = OrderedDict([
('Custom list action', 'http://testserver/api/actions/custom_list_action/'),
('List action', 'http://testserver/api/actions/list_action/'),
])
self.assertEqual(view.get_extra_action_url_map(), expected)
def test_detail_view(self):
response = self.client.get('/api/actions/1/')
view = response.renderer_context['view']
expected = OrderedDict([
('Custom detail action', 'http://testserver/api/actions/1/custom_detail_action/'),
('Detail action', 'http://testserver/api/actions/1/detail_action/'),
# "Unresolvable detail action" excluded, since it's not resolvable
])
self.assertEqual(view.get_extra_action_url_map(), expected)
def test_uninitialized_view(self):
self.assertEqual(ActionViewSet().get_extra_action_url_map(), OrderedDict())
def test_action_names(self):
# Action 'name' and 'suffix' kwargs should be respected
response = self.client.get('/api/names/1/')
view = response.renderer_context['view']
expected = OrderedDict([
('Custom Name', 'http://testserver/api/names/1/named_action/'),
('Action Names Custom Suffix', 'http://testserver/api/names/1/suffixed_action/'),
('Unnamed action', 'http://testserver/api/names/1/unnamed_action/'),
])
self.assertEqual(view.get_extra_action_url_map(), expected)
@override_settings(ROOT_URLCONF='tests.test_viewsets')
class ReverseActionTests(TestCase):
def test_default_basename(self):
view = ActionViewSet()
view.basename = router.get_default_basename(ActionViewSet)
view.request = None
assert view.reverse_action('list') == '/api/actions/'
assert view.reverse_action('list-action') == '/api/actions/list_action/'
assert view.reverse_action('list-custom') == '/api/actions/custom_list_action/'
assert view.reverse_action('detail', args=['1']) == '/api/actions/1/'
assert view.reverse_action('detail-action', args=['1']) == '/api/actions/1/detail_action/'
assert view.reverse_action('detail-custom', args=['1']) == '/api/actions/1/custom_detail_action/'
def test_custom_basename(self):
view = ActionViewSet()
view.basename = 'actions-alt'
view.request = None
assert view.reverse_action('list') == '/api/actions-alt/'
assert view.reverse_action('list-action') == '/api/actions-alt/list_action/'
assert view.reverse_action('list-custom') == '/api/actions-alt/custom_list_action/'
assert view.reverse_action('detail', args=['1']) == '/api/actions-alt/1/'
assert view.reverse_action('detail-action', args=['1']) == '/api/actions-alt/1/detail_action/'
assert view.reverse_action('detail-custom', args=['1']) == '/api/actions-alt/1/custom_detail_action/'
def test_request_passing(self):
view = ActionViewSet()
view.basename = router.get_default_basename(ActionViewSet)
view.request = factory.get('/')
# Passing the view's request object should result in an absolute URL.
assert view.reverse_action('list') == 'http://testserver/api/actions/'
# Users should be able to explicitly not pass the view's request.
assert view.reverse_action('list', request=None) == '/api/actions/'
|
the-stack_0_19122 | # coding = utf-8
# modified from https://github.com/pytorch/vision/blob/c558be6b3b6ed5270ed2db0c5edc872c0d089c52/torchvision/models/densenet.py
from typing import List
from collections import OrderedDict
import torch
from torch import nn, Tensor
from util.conf import Configuration
from model.commons import Squeeze, Reshape
class _DenseLayer(nn.Module):
def __init__(self, conf: Configuration, in_channels: int, dilation: int):
super(_DenseLayer, self).__init__()
dim_series = conf.getHP('dim_series')
kernel_size = conf.getHP('size_kernel')
padding = int(kernel_size / 2) * dilation
activation_name = conf.getHP('activation_conv')
bias = conf.getHP('layernorm_type') == 'none' or not conf.getHP('layernorm_elementwise_affine')
growth_rate = conf.getHP('dense_growth_rate')
bottleneck_multiplier = conf.getHP('dense_bottleneck_multiplier')
bottleneck_channels = int(growth_rate * bottleneck_multiplier)
self.__bottleneck = nn.Sequential(conf.getLayerNorm(dim_series),
conf.getActivation(activation_name),
conf.getWeightNorm(nn.Conv1d(in_channels, bottleneck_channels, 1, bias=bias)))
self.__convolution = nn.Sequential(conf.getLayerNorm(dim_series),
conf.getActivation(activation_name),
conf.getWeightNorm(nn.Conv1d(bottleneck_channels, growth_rate, kernel_size, padding=padding, dilation=dilation, bias=bias)))
def forward(self, input) -> Tensor:
if isinstance(input, List):
input = torch.cat(input, 1)
bottleneck = self.__bottleneck(input)
return self.__convolution(bottleneck)
class _DenseBlock(nn.ModuleDict):
def __init__(self, conf: Configuration, in_channels: int, dilation: int, num_layers: int):
super(_DenseBlock, self).__init__()
growth_rate = conf.getHP('dense_growth_rate')
for i in range(num_layers):
self.add_module('denselayer{:d}'.format(i + 1), _DenseLayer(conf, in_channels + i * growth_rate, dilation))
def forward(self, input: Tensor) -> Tensor:
latent = [input]
for _, layer in self.items():
latent.append(layer(latent))
return torch.cat(latent, 1)
class _Transition(nn.Sequential):
def __init__(self, conf: Configuration, in_channels: int):
super(_Transition, self).__init__()
bias = conf.getHP('layernorm_type') == 'none' or not conf.getHP('layernorm_elementwise_affine')
self.add_module('normalize', conf.getLayerNorm(conf.getHP('dim_series')))
self.add_module('activate', conf.getActivation(conf.getHP('activation_conv')))
self.add_module('convolute', conf.getWeightNorm(nn.Conv1d(in_channels, conf.getHP('dense_transition_channels'), 1, bias=bias)))
class _DenseNet(nn.Module):
def __init__(self, conf: Configuration, to_encode: bool):
super(_DenseNet, self).__init__()
kernel_size = conf.getHP('size_kernel')
bias = conf.getHP('layernorm_type') == 'none' or not conf.getHP('layernorm_elementwise_affine')
num_init_channels = conf.getHP('dense_init_channels')
# DenseNet is by default pre-activation
self.__model = nn.Sequential(OrderedDict([
('conv0', conf.getWeightNorm(nn.Conv1d(1, num_init_channels, kernel_size, padding=int(kernel_size / 2), dilation=1, bias=bias)))
]))
num_blocks = conf.getHP('num_en_denseblocks') if to_encode else conf.getHP('num_de_denseblocks')
num_layers = conf.getHP('num_en_denselayers') if to_encode else conf.getHP('num_de_denselayers')
growth_rate = conf.getHP('dense_growth_rate')
num_transition_channels = conf.getHP('dense_transition_channels')
if conf.getHP('dilation_type') == 'exponential':
assert num_blocks > 1 and 2 ** (num_blocks + 1) <= conf.getHP('dim_series') + 1
in_channels = num_init_channels
for depth in range(1, num_blocks + 1):
denseblock = _DenseBlock(conf, in_channels, conf.getDilatoin(depth, to_encode), num_layers)
self.__model.add_module('denseblock{:d}'.format(depth), denseblock)
in_channels = in_channels + num_layers * growth_rate
# different from original DenseNet, output_chennels is controlled as in the corresponding ResNet
transblock = _Transition(conf, in_channels)
self.__model.add_module('transition{:d}'.format(depth), transblock)
in_channels = num_transition_channels
# finalization of pre-activation mode
self.__model.add_module('normalize', conf.getLayerNorm(conf.getHP('dim_series')))
self.__model.add_module('activate', conf.getActivation(conf.getHP('activation_conv')))
def forward(self, input: Tensor) -> Tensor:
return self.__model(input)
class DenseEncoder(nn.Module):
def __init__(self, conf: Configuration):
super(DenseEncoder, self).__init__()
dim_embedding = conf.getHP('dim_embedding')
num_channels = conf.getHP('num_en_channels')
dim_latent = conf.getHP('dim_en_latent')
self.__model = nn.Sequential(_DenseNet(conf, to_encode=True),
nn.AdaptiveMaxPool1d(1),
Squeeze(),
nn.Linear(num_channels, dim_latent),
conf.getActivation(conf.getHP('activation_linear')),
nn.Linear(dim_latent, dim_embedding, bias=False),
nn.LayerNorm(dim_embedding, elementwise_affine=False) if conf.getHP('encoder_normalize_embedding') else nn.Identity())
self.__model.to(conf.getHP('device'))
def forward(self, input: Tensor) -> Tensor:
return self.__model(input)
class DenseDecoder(nn.Module):
def __init__(self, conf: Configuration):
super(DenseDecoder, self).__init__()
dim_series = conf.getHP('dim_series')
dim_embedding = conf.getHP('dim_embedding')
num_channels = conf.getHP('num_de_channels')
dim_latent = conf.getHP('dim_de_latent')
self.__model = nn.Sequential(Reshape([-1, 1, dim_embedding]),
nn.Linear(dim_embedding, dim_series),
conf.getActivation(conf.getHP('activation_linear')),
_DenseNet(conf, to_encode=False),
nn.AdaptiveMaxPool1d(1),
Reshape([-1, 1, num_channels]),
nn.Linear(num_channels, dim_latent),
conf.getActivation(conf.getHP('activation_linear')),
nn.Linear(dim_latent, dim_series, bias=False),
nn.LayerNorm(dim_series, elementwise_affine=False) if conf.getHP('decoder_normalize_reconstruction') else nn.Identity())
self.__model.to(conf.getHP('device'))
def forward(self, input: Tensor) -> Tensor:
return self.__model(input)
|
the-stack_0_19123 | DEBUG = True
USE_TZ = True
SECRET_KEY = "pQO26MjmglIoVGG40wrOmYOHYKr2R6EFOhxZHaFwlz6LpLvQ49"
DATABASES = {
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "database.sqlite3"}
}
ROOT_URLCONF = "tests.urls"
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"teryt_tree",
"tests",
]
SITE_ID = 1
MIDDLEWARE_CLASSES = ()
|
the-stack_0_19124 | #!/usr/bin/env python
# coding: utf-8
# by [email protected]
__version__ = "0.0.1"
from typing import Dict
from torch import no_grad, transpose
from SeqEN2.autoencoder.autoencoder_classifier import AutoencoderClassifier
from SeqEN2.autoencoder.autoencoder_ss_decoder import AutoencoderSSDecoder
from SeqEN2.autoencoder.utils import print_shapes
from SeqEN2.utils.custom_dataclasses import AECSSTrainingSettings
from SeqEN2.utils.seq_tools import output_to_ndx
# class for AAE
class AutoencoderClassifierSSDecoder(AutoencoderClassifier, AutoencoderSSDecoder):
def __init__(self, d1, dn, w, arch):
super(AutoencoderClassifierSSDecoder, self).__init__(d1, dn, w, arch)
# training components
self._training_settings = AECSSTrainingSettings()
@property
def training_settings(self) -> AECSSTrainingSettings:
return self._training_settings
@training_settings.setter
def training_settings(self, value=None) -> None:
if isinstance(value, Dict) or value is None or isinstance(value, AECSSTrainingSettings):
if isinstance(value, Dict):
try:
self._training_settings = AECSSTrainingSettings(**value)
except TypeError as e:
raise KeyError(f"missing/extra keys for AECSSTrainingSettings, {e}")
elif isinstance(value, AECSSTrainingSettings):
self._training_settings = value
else:
raise TypeError(
f"Training settings must be a dict or None or type AECSSTrainingSettings, {type(value)} is passed."
)
def forward_test(self, one_hot_input):
vectorized = self.vectorizer(one_hot_input.reshape((-1, self.d0)))
encoded = self.encoder(transpose(vectorized.reshape(-1, self.w, self.d1), 1, 2))
decoded = transpose(self.decoder(encoded), 1, 2).reshape(-1, self.d1)
devectorized = self.devectorizer(decoded)
classifier_output = self.classifier(encoded)
ss_decoder_output = transpose(self.ss_decoder(encoded), 1, 2).reshape(-1, self.ds)
return devectorized, classifier_output, ss_decoder_output, encoded
def unit_test_forward(self, one_hot_input):
vectorized = print_shapes(
one_hot_input.reshape((-1, self.d0)), self.vectorizer, "vectorizer"
)
encoded = print_shapes(
transpose(vectorized.reshape((-1, self.w, self.d1)), 1, 2), self.encoder, "encoder"
)
decoded = transpose(print_shapes(encoded, self.decoder, "decoder"), 1, 2).reshape(
-1, self.d1
)
devectorized = print_shapes(decoded, self.devectorizer, "devectorizer")
classifier_output = print_shapes(encoded, self.classifier, "classifier")
ss_decoder_output = transpose(
print_shapes(encoded, self.ss_decoder, "ss_decoder"), 1, 2
).reshape(-1, self.ds)
return devectorized, classifier_output, ss_decoder_output, encoded
def train_focused(self, **kwargs):
self.focused_optimizer.zero_grad()
loss = None
if self.focus in ["vectorizer", "encoder", "decoder", "devectorizer"]:
loss = self.autoencoder_focused(**kwargs)
elif self.focus == "classifier":
loss = self.classifier_focused(**kwargs)
elif self.focus == "ss_decoder":
loss = self.ss_decoder_focused(**kwargs)
if loss is not None:
self.focused_optimizer.step()
self._modular_training_settings.focused.lr = self.focused_lr_scheduler.get_last_lr()
self.focused_lr_scheduler.step(loss.item())
self.log(f"focused_{self.focus}_LR", self.focused_lr_scheduler.get_last_lr())
def train_one_batch(self, input_vals, input_noise=0.0, device=None, input_keys="ASC"):
if input_vals is not None:
input_ndx, target_vals_ss, target_vals_cl, one_hot_input = self.transform_input(
input_vals, device, input_noise=input_noise, input_keys=input_keys
)
self.train_reconstructor(one_hot_input, input_ndx)
if "C" in input_keys:
self.train_classifier(one_hot_input, target_vals_cl)
if "S" in input_keys:
self.train_ss_decoder(one_hot_input, target_vals_ss)
if self.focus is not None:
self.train_focused(
one_hot_input=one_hot_input,
input_ndx=input_ndx,
target_vals_cl=target_vals_cl,
target_vals_ss=target_vals_ss,
input_keys=input_keys,
)
@staticmethod
def assert_input_type(input_vals):
assert isinstance(input_vals, Dict), "AECSS requires a dict as input_vals"
def train_batch(self, input_vals, device, input_noise=0.0):
"""
Training for one batch of data, this will move into autoencoder module
:param input_vals:
:param device:
:param input_noise:
:return:
"""
self.assert_input_type(input_vals)
self.train()
if "cl" in input_vals.keys():
self.train_one_batch(
input_vals["cl"], input_noise=input_noise, device=device, input_keys="AC"
)
if "ss" in input_vals.keys():
self.train_one_batch(
input_vals["ss"], input_noise=input_noise, device=device, input_keys="AS"
)
if "clss" in input_vals.keys():
self.train_one_batch(
input_vals["clss"], input_noise=input_noise, device=device, input_keys="ASC"
)
def test_one_batch(self, input_vals, device, input_keys="ASC"):
if input_vals is not None:
input_ndx, target_vals_ss, target_vals_cl, one_hot_input = self.transform_input(
input_vals, device, input_keys=input_keys
)
(
reconstructor_output,
classifier_output,
ss_decoder_output,
encoded_output,
) = self.forward_test(one_hot_input)
self.test_reconstructor(reconstructor_output, input_ndx, device)
# test for continuity
self.test_continuity(encoded_output)
if "C" in input_keys:
self.test_classifier(classifier_output, target_vals_cl)
if "S" in input_keys:
self.test_ss_decoder(ss_decoder_output, target_vals_ss, device)
def test_batch(self, input_vals, device):
"""
Test a single batch of data, this will move into autoencoder
:param input_vals:
:return:
"""
self.assert_input_type(input_vals)
self.eval()
with no_grad():
if "cl" in input_vals.keys():
self.test_one_batch(input_vals["cl"], device, input_keys="AC")
if "ss" in input_vals.keys():
self.test_one_batch(input_vals["ss"], device, input_keys="AS")
if "clss" in input_vals.keys():
self.test_one_batch(input_vals["clss"], device, input_keys="ASC")
def eval_one_batch(self, input_vals, device, input_keys="A--", embed_only=False):
if input_vals is not None:
_, _, _, one_hot_input = self.transform_input(input_vals, device, input_keys=input_keys)
if embed_only:
encoded_output = self.forward_embed(one_hot_input)
return {"embedding": encoded_output}
else:
(
reconstructor_output,
classifier_output,
ss_decoder_output,
encoded_output,
) = self.forward_test(one_hot_input)
return {
"reconstructor_output": output_to_ndx(reconstructor_output, self.w),
"classifier_output": classifier_output,
"ss_decoder_output": output_to_ndx(ss_decoder_output, self.w),
"embedding": encoded_output,
}
|
the-stack_0_19125 | import tensorflow as tf
from train_config import config as cfg
def batch_non_max_suppression(
boxes, scores,labels,
score_threshold, iou_threshold,
max_boxes):
"""
Arguments:
boxes: a float tensor with shape [batch_size, N, 4].
scores: a float tensor with shape [batch_size, N].
score_threshold: a float number.
iou_threshold: a float number, threshold for IoU.
max_boxes: an integer, maximum number of retained boxes.
Returns:
boxes: a float tensor with shape [batch_size, max_boxes, 4].
scores: a float tensor with shape [batch_size, max_boxes].
num_detections: an int tensor with shape [batch_size].
"""
def fn(x):
boxes, scores,labels = x
# low scoring boxes are removed
ids = tf.where(tf.greater_equal(scores, score_threshold))
ids = tf.squeeze(ids, axis=1)
boxes = tf.gather(boxes, ids)
scores = tf.gather(scores, ids)
labels = tf.gather(labels, ids)
selected_indices = tf.image.non_max_suppression(
boxes, scores, max_boxes, iou_threshold
)
boxes = tf.gather(boxes, selected_indices)
scores = tf.gather(scores, selected_indices)
labels = tf.gather(labels, selected_indices)
num_boxes = tf.to_int32(tf.shape(boxes)[0])
zero_padding = max_boxes - num_boxes
boxes = tf.pad(boxes, [[0, zero_padding], [0, 0]])
scores = tf.pad(scores, [[0, zero_padding]])
labels = tf.pad(labels, [[0, zero_padding]],constant_values=-1)
boxes.set_shape([max_boxes, 4])
scores.set_shape([max_boxes])
labels.set_shape([max_boxes])
return boxes, scores,labels, num_boxes
boxes, scores, labels, num_detections = tf.map_fn(
fn, [boxes, scores,labels],
dtype=(tf.float32, tf.float32,tf.int64, tf.int32),
parallel_iterations=cfg.TEST.parallel_iterations,
back_prop=False, swap_memory=False, infer_shape=True
)
return boxes, scores,labels, num_detections
|
the-stack_0_19127 | # Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from numpy import nan
from numpy.random import randn
from legate import pandas as lp
a = randn(10)
b = randn(10)
c = randn(10)
for i in range(10):
if i % 3 == 0:
a[i] = nan
for i in range(10):
if i % 4 == 0:
b[i] = nan
df = pd.DataFrame({"a": a, "b": b, "c": c})
ldf = lp.DataFrame(df)
delta = 0.001
query = "(a + b) / 2.0 > c + @delta"
print("Query: " + query)
out_pd = df.query(query)
out_lp = ldf.query(query)
assert out_lp.equals(out_pd)
|
the-stack_0_19128 | import multiprocessing as mp
import sys
import time
import os
import cv2
import numpy as np
from PyQt5 import QtWidgets, QtGui, QtCore
from PyQt5 import uic
idleTime = 10 # second
threshold = 1.6
label_w = 800
label_h = 600
def resource_path(relative_path):
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
try :
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
alert = 24
GPIO.setup(alert, GPIO.OUT)
mainUI = resource_path(r'/home/pi/rasp/main.ui')
setOptionDialogUI = resource_path(r'/home/pi/rasp/setOptionDialog.ui')
infoDialogUI = resource_path(r'/home/pi/rasp/infoDialog.ui')
except :
mainUI = resource_path(r'./main.ui')
setOptionDialogUI = resource_path(r'./setOptionDialog.ui')
infoDialogUI = resource_path(r'./infoDialog.ui')
mainUi = uic.loadUiType(mainUI)[0]
setOptionDialogUi = uic.loadUiType(setOptionDialogUI)[0]
infoDialogUi = uic.loadUiType(os.path.abspath(infoDialogUI))[0]
class IPCamera:
def __init__(self, rtsp_url: str):
# 데이터 프로세스 전송 파이프
self.rtsp_url = rtsp_url
self.parent_conn, child_conn = mp.Pipe()
# load process
self.p = mp.Process(target=self.update, args=(child_conn, rtsp_url))
# start process
self.p.daemon = True
self.p.start()
def get_first_frame(self):
_, frame = cv2.VideoCapture(self.rtsp_url).read()
return frame
def end(self):
# 프로세스 종료 요청
self.parent_conn.send(2)
def update(self, conn, rtsp_url: str):
# load cam into separate process
cap = cv2.VideoCapture(rtsp_url)
run = True
while run:
# 버퍼에서 카메라 데이터 수신
cap.grab()
# 입력 데이터 수신
rec_dat = conn.recv()
if rec_dat == 1:
# 프레임 수신 완료했을 경우
ret, frame = cap.read()
conn.send(frame)
elif rec_dat == 3:
#GPIO.output(alert, GPIO.HIGH)
time.sleep(1)
elif rec_dat == 2:
# 요청이 없는 경우
cap.release()
run = False
time.sleep(1)
conn.close()
def get_frame(self, mode):
# 카메라 연결 프로세스에서 프레임 수신하는데 사용
# resize 값 50% 증가인 경우 1.5
if mode == "capture":
# send request
self.parent_conn.send(1)
frame = self.parent_conn.recv()
# reset request
self.parent_conn.send(0)
return frame
# resize if needed
elif mode == "signal":
self.parent_conn.send(3)
# reset request
self.parent_conn.send(0)
def setUrl(cameraProtocol, cameraID, cameraPassword, cameraIP, cameraPort, cameraProfileName):
return cameraProtocol + '://' + cameraID + ':' \
+ cameraPassword + '@' + cameraIP + ':' + cameraPort + '/' + cameraProfileName + '/media.smp'
class MotionDetector(QtCore.QObject):
idleTime = 5
def __init__(self, label, textBrowser):
super(MotionDetector, self).__init__()
self.cameraProtocol = 'rtsp'
self.cameraID = 'admin'
self.cameraPassword = '1q2w3e4r5t'
self.cameraIP = '192.168.0.4'
self.cameraPort = '554'
self.cameraProfileName = 'test'
self.label = label
self.textBrowser = textBrowser
self.label.resize(label_w, label_h)
self.logic = True # 반복 루프 제어
self.default_x, self.default_y, self.w, self.h = -1, -1, -1, -1
self.avgLoss = 0
self.lossCycle = 5
self.loopFlag = False
self.buffer_frame = None
# self.total_frame = 0
self.buffError = None # 이전 프레임 기준 오차율
self.idleMode = False # Flag변수, 이상 감지 후 유휴 상태 돌입
global idleTime
self.idleTime = idleTime
# self.threshold = threshold
self.fps = 1
def onMouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.default_x = x
self.default_y = y
elif event == cv2.EVENT_LBUTTONUP:
self.w = x - self.default_x
self.h = y - self.default_y
if self.w > 0 and self.h > 0:
img_draw = param.copy()
cv2.rectangle(img_draw, (self.default_x, self.default_y), (x, y), (0, 255, 255), 2)
cv2.imshow('Set RoI', img_draw)
def setRoI(self, frame):
cv2.startWindowThread()
cv2.imshow('Set RoI', frame)
cv2.setMouseCallback("Set RoI", self.onMouse, param=frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
def write_log(self, message: str):
now = time.localtime()
self.textBrowser.append(message + ': ' + str(now.tm_year) + "년 " + str(now.tm_mon) + "월 "
+ str(now.tm_mday) + "일 " + str(now.tm_hour) + "시 "
+ str(now.tm_min) + "분 " + str(now.tm_sec) + "초")
def loop(self):
if self.loopFlag: return
self.loopFlag = True
self.ip_camera = IPCamera(setUrl(self.cameraProtocol, self.cameraID, self.cameraPassword,
self.cameraIP, self.cameraPort, self.cameraProfileName))
self.frame = cv2.cvtColor(self.ip_camera.get_first_frame(), cv2.COLOR_BGR2GRAY)
# textBrowser 이벤트 처리
self.write_log('감지 시작')
# ROI 처리
if self.default_x == -1:
self.setRoI(self.frame)
# 첫 프레임 로드 받은 후 연산 처리 작업
if self.buffer_frame is None: # 첫 프레임인 경우에
self.frame = self.ip_camera.get_frame("capture")
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
self.buffer_frame = self.frame[self.default_y:self.default_y + self.h,
self.default_x:self.default_x + self.w]
subtract_frame = np.round(
np.sqrt(np.sum(np.abs(self.buffer_frame - self.buffer_frame) ** 2))) # L2 DISTANCE
self.buffError = subtract_frame
# 수정사항 -----------------
bounding_box_frame = self.frame.copy()
bounding_box_frame = cv2.resize(bounding_box_frame, dsize=(800, 600), interpolation=cv2.INTER_AREA)
output_frame = cv2.rectangle(bounding_box_frame, (self.default_x, self.default_y),
(self.default_x + self.w, self.default_y + self.h), (0, 255, 0),
thickness=5)
qimg = QtGui.QImage(output_frame.data, label_w, label_h,
output_frame.strides[0], QtGui.QImage.Format_Grayscale8)
pixmap = QtGui.QPixmap.fromImage(qimg)
self.label.setPixmap(pixmap)
# 두 번째 프레임 처리
self.frame = self.ip_camera.get_frame("capture")
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
self.roi_frame = self.frame[self.default_y:self.default_y + self.h, self.default_x:self.default_x + self.w]
subtract_frame = np.round(np.sqrt(np.sum(np.abs(self.buffer_frame - self.roi_frame) ** 2)))
self.buffer_frame = self.roi_frame
self.buffError = subtract_frame
bounding_box_frame = self.frame.copy()
bounding_box_frame = cv2.rectangle(bounding_box_frame, (self.default_x, self.default_y),
(self.default_x + self.w, self.default_y + self.h), (0, 255, 0),
thickness=5)
bounding_box_frame = cv2.resize(bounding_box_frame, dsize=(800, 600), interpolation=cv2.INTER_AREA)
qimg = QtGui.QImage(bounding_box_frame.data, label_w, label_h,
bounding_box_frame.strides[0], QtGui.QImage.Format_Grayscale8)
pixmap = QtGui.QPixmap.fromImage(qimg)
self.label.setPixmap(pixmap)
for i in range(self.lossCycle):
self.frame = self.ip_camera.get_frame("capture")
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
self.roi_frame = self.frame[self.default_y:self.default_y + self.h, self.default_x:self.default_x + self.w]
subtract_frame = np.round(np.sqrt(np.sum(np.abs(self.buffer_frame - self.roi_frame) ** 2)))
self.avgLoss += subtract_frame # 평균 로스 누적
self.buffer_frame = self.roi_frame
self.buffError = subtract_frame
bounding_box_frame = self.frame.copy()
bounding_box_frame = cv2.rectangle(bounding_box_frame, (self.default_x, self.default_y),
(self.default_x + self.w, self.default_y + self.h), (0, 255, 0),
thickness=5)
bounding_box_frame = cv2.resize(bounding_box_frame, dsize=(800, 600), interpolation=cv2.INTER_AREA)
qimg = QtGui.QImage(bounding_box_frame.data, label_w, label_h,
bounding_box_frame.strides[0], QtGui.QImage.Format_Grayscale8)
pixmap = QtGui.QPixmap.fromImage(qimg)
self.label.setPixmap(pixmap)
# 평균 로스 계산
self.threshold = 1 + round((((int(self.avgLoss) / self.lossCycle) / self.roi_frame.size) * 100), 2)
win.statusLabel.setText("일반 감지 상태")
previous_time = time.time()
while self.logic:
self.frame = self.ip_camera.get_frame("capture")
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
current_time = time.time() - previous_time
if current_time > 1. / self.fps:
previous_time = time.time()
# if self.buffer_frame is None: # 첫 프레임인 경우에
# self.buffer_frame = self.frame[self.default_y:self.default_y + self.h,
# self.default_x:self.default_x + self.w]
self.roi_frame = self.frame[self.default_y:self.default_y + self.h,
self.default_x:self.default_x + self.w]
subtract_frame = np.round(
np.sqrt(np.sum(np.abs(self.buffer_frame - self.roi_frame) ** 2)))
# if self.buffError is None:
# self.buffError = subtract_frame
print(subtract_frame)
print(self.buffError * self.threshold)
# 일반 감지 상태
if self.idleMode == False:
if subtract_frame > self.buffError * self.threshold:
self.write_log('이상 감지')
self.idleMode = True
self.idleInitTime = time.time()
self.ip_camera.get_frame("signal")
if self.idleMode == True:
win.statusLabel.setText("유휴 상태")
win.idleTimeLcd.display(
(self.idleInitTime + self.idleTime) - time.time())
if self.idleInitTime + self.idleTime <= time.time():
self.idleMode = False # 유휴상태 해제
win.statusLabel.setText("일반 감지 상태")
win.idleTimeLcd.display(0)
self.buffer_frame = self.roi_frame
self.buffError = subtract_frame
bounding_box_frame = self.frame.copy()
bounding_box_frame = cv2.rectangle(bounding_box_frame, (self.default_x, self.default_y),
(self.default_x + self.w, self.default_y + self.h), (0, 255, 0),
thickness=5)
bounding_box_frame = cv2.resize(bounding_box_frame, dsize=(800, 600), interpolation=cv2.INTER_AREA)
qimg = QtGui.QImage(bounding_box_frame.data, label_w, label_h,
bounding_box_frame.strides[0], QtGui.QImage.Format_Grayscale8)
pixmap = QtGui.QPixmap.fromImage(qimg)
self.label.setPixmap(pixmap)
class InfoDialog(QtWidgets.QDialog, infoDialogUi):
def __init__(self):
super(InfoDialog, self).__init__()
self.setupUi(self)
class SetOptionDialog(QtWidgets.QDialog, setOptionDialogUi):
def __init__(self):
super(SetOptionDialog, self).__init__()
self.setupUi(self)
self.cameraID.setText(str('admin'))
self.cameraPW.setText(str('1q2w3e4r5t'))
self.cameraIP.setText(str('192.168.0.4'))
self.cameraProfile.setText(str('test'))
self.cameraID.textChanged.connect(self.cameraidValueChanged)
self.cameraPW.textChanged.connect(self.camerapwValueChanged)
self.cameraIP.textChanged.connect(self.cameraipValueChanged)
self.cameraProfile.textChanged.connect(self.cameraprofileValueChanged)
self.idleTimeSpinBox.setValue(idleTime)
self.thresholdSlider.setSliderPosition(int((threshold - 1) / 0.05) + 1)
self.thresholdLCD.display((threshold - 1) / 0.05)
self.idleTimeSpinBox.valueChanged.connect(self.idleTimeValueChanged)
self.thresholdSlider.valueChanged.connect(self.thresholdValueChanged)
self.fpsSlider.valueChanged.connect(self.fpsValueChanged)
def cameraidValueChanged(self):
win.motionDetector.cameraID = self.cameraID.text()
def camerapwValueChanged(self):
win.motionDetector.cameraPassword = self.cameraPW.text()
def cameraipValueChanged(self):
win.motionDetector.cameraIP = self.cameraIP.text()
def cameraprofileValueChanged(self):
win.motionDetector.cameraProfileName = self.cameraProfile.text()
def idleTimeValueChanged(self):
win.motionDetector.idleTime = self.idleTimeSpinBox.value()
def thresholdValueChanged(self):
win.motionDetector.threshold = 1 + (0.05 * self.thresholdSlider.value())
self.thresholdLCD.display(self.thresholdSlider.value())
def fpsValueChanged(self):
win.motionDetector.fps = self.fpsSlider.value()
self.fpsLCD.display(self.fpsSlider.value())
class MainWindow(QtWidgets.QMainWindow, mainUi):
def __init__(self):
super(MainWindow, self).__init__()
self.setupUi(self)
self.thread = QtCore.QThread()
self.thread2 = QtCore.QThread()
self.thread.start()
self.thread2.start()
self.motionDetector = MotionDetector(self.label, self.textBrowser)
self.motionDetector.moveToThread(self.thread)
self.setOptionDialog = SetOptionDialog()
self.setOptionDialog.moveToThread(self.thread2)
self.infoDialog = InfoDialog()
self.startButton.clicked.connect(self.motionDetector.loop)
self.setOptionButton.clicked.connect(self.setOptionDialog.show)
self.exitButton.clicked.connect(self.quit)
self.actionQuit.triggered.connect(self.quit)
self.actionSetOption.triggered.connect(self.setOptionDialog.show)
self.actionInfo.triggered.connect(self.infoDialog.show)
def quit(self):
self.motionDetector.logic = False # 메인로직 반복 종료
try:
self.motionDetector.ip_camera.end()
except:
pass
GPIO.cleanup()
app.instance().quit()
app.quit()
win.thread.exit()
win.thread.quit()
win.thread.wait(5000)
if __name__ == "__main__":
mp.freeze_support() # for windows
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
app = QtWidgets.QApplication(sys.argv)
win = MainWindow()
win.show()
app.exec_()
|
the-stack_0_19129 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import shutil
import nox
BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"]
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black", "--check", *BLACK_PATHS,
)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
That run uses an image that doesn't have 3.6 installed. Before updating this
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
session.install(BLACK_VERSION)
session.run(
"black", *BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("asyncmock", "pytest-asyncio")
session.install("mock", "pytest", "pytest-cov")
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud.spanner",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
session.install("-e", ".[tracing]")
# Run py.test against the unit tests with OpenTelemetry.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud.spanner",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "") and not os.environ.get(
"SPANNER_EMULATOR_HOST", ""
):
session.skip(
"Credentials or emulator host must be set via environment variable"
)
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install(
"mock", "pytest", "google-cloud-testutils",
)
session.install("-e", ".[tracing]")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=99")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".[tracing]")
session.install("sphinx", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".[tracing]")
# sphinx-docfx-yaml supports up to sphinx version 1.5.5.
# https://github.com/docascode/sphinx-docfx-yaml/issues/97
session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
|
the-stack_0_19130 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import json
import unittest
import augly.video as vidaugs
from augly.tests.video_tests.base_unit_test import BaseVideoUnitTest
from augly.utils import VIDEO_METADATA_PATH
from augly.utils.ffmpeg import get_conditional_for_skipping_video_tests
@unittest.skipUnless(*get_conditional_for_skipping_video_tests())
class TransformsVideoUnitTest(BaseVideoUnitTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
with open(VIDEO_METADATA_PATH, "r") as f:
cls.metadata = json.load(f)
def test_OverlayDots(self):
self.evaluate_class(vidaugs.OverlayDots(), fname="overlay_dots", seed=1)
def test_OverlayShapes(self):
self.evaluate_class(vidaugs.OverlayShapes(), fname="overlay_shapes", seed=1)
def test_OverlayText(self):
self.evaluate_class(
vidaugs.OverlayText(
topleft=(0, 0),
bottomright=(0.5, 0.25),
),
fname="overlay_text",
seed=1,
)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_19131 | # -*- coding:utf-8 -*-
'''
此脚本用来生成icon.py文件。
我们使用base64编码,把icon.ico图标作为变量保存在icon.py中,
使用pyinstaller打包exe后,可在执行中由该变量生成临时ico文件给tkinter调用,实现窗口图标。
从而exe文件可以独立运行,不再依赖目录中的ico文件。
'''
import base64
with open('icon.py', 'wb') as py_file_obj:
text = "# -*- coding:utf-8 -*-\n'''\n"
text += '此文件由gen_icon_py.py生成。\n我们使用base64编码,把icon.ico图标作为变量保存在icon.py中,\n'
text += '使用pyinstaller打包exe后,可在执行中由该变量生成临时ico文件给tkinter调用,实现窗口图标。\n'
text += '从而exe文件可以独立运行,不再依赖目录中的ico文件。\n'
text += "'''\n\nencoded_img = '"
py_file_obj.write(text.encode('utf-8'))
with open('icon.ico', 'rb') as ico_file_obj:
b64str = base64.b64encode(ico_file_obj.read())
with open('icon.py','ab') as py_file_obj:
py_file_obj.write(b64str)
with open('icon.py','a') as py_file_obj:
py_file_obj.write("'") |
the-stack_0_19132 | import shutil
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', autosize = False):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
autosize - Optional : automatically resize the length of the progress bar to the terminal window (Bool)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
styling = '%s |%s| %s%% %s' % (prefix, fill, percent, suffix)
if autosize:
cols, _ = shutil.get_terminal_size(fallback = (length, 1))
length = cols - len(styling)
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s' % styling.replace(fill, bar), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
|
the-stack_0_19134 | # Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, cStringIO, traceback, warnings, weakref
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning']
try:
import codecs
except ImportError:
codecs = None
try:
import thread
import threading
except ImportError:
thread = None
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
try:
unicode
_unicode = True
except NameError:
_unicode = False
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, (int, long)):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if thread:
_lock = threading.RLock()
else:
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - long(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and thread:
self.thread = thread.get_ident()
self.threadName = threading.current_thread().name
else:
self.thread = None
self.threadName = None
if not logMultiprocessing:
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except StandardError:
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not _unicode: #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if not isinstance(msg, basestring):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = LogRecord(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)\\n" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
"""
if fmt:
self._fmt = fmt
else:
self._fmt = "%(message)s"
self.datefmt = datefmt
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = cStringIO.StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._fmt.find("%(asctime)") >= 0
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self._fmt % record.__dict__
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
'replace')
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return 1
elif self.name == record.name:
return 1
elif record.name.find(self.name, 0, self.nlen) != 0:
return 0
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
"""
rv = 1
for f in self.filters:
if not f.filter(record):
rv = 0
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if thread:
self.lock = threading.RLock()
else:
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError:
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s\n"
if not _unicode: #if no unicode support...
stream.write(fs % msg)
else:
try:
if (isinstance(msg, unicode) and
getattr(stream, 'encoding', None)):
ufs = fs.decode(stream.encoding)
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
#Printing to terminals sometimes fails. For example,
#with an encoding of 'cp1251', the above write will
#work if written to a stream opened or wrapped by
#the codecs module, but fail when writing to a
#terminal even when the codepage is set to cp1251.
#An extra encoding step seems to be needed.
stream.write((ufs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
if codecs is None:
encoding = None
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
if self.encoding is None:
stream = open(self.baseFilename, self.mode)
else:
stream = codecs.open(self.baseFilename, self.mode, self.encoding)
return stream
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
#self.loggers = [alogger]
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
#if alogger not in self.loggers:
if alogger not in self.loggerMap:
#self.loggers.append(alogger)
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
self.loggerClass = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, basestring):
raise TypeError('A logger name must be string or Unicode')
if isinstance(name, unicode):
name = name.encode('utf-8')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
def setLevel(self, level):
"""
Set the logging level of this logger.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = 1
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func = self.findCaller()
except ValueError:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return 0
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
kwargs["exc_info"] = 1
self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def isEnabledFor(self, level):
"""
See if the underlying logger is enabled for the specified level.
"""
return self.logger.isEnabledFor(level)
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
hdlr = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
hdlr = StreamHandler(stream)
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
fmt = Formatter(fs, dfs)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
#def getRootLogger():
# """
# Return the root logger.
#
# Note that getLogger('') now does the same thing, so this function is
# deprecated and may disappear in the future.
# """
# return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger,
with exception information.
"""
kwargs['exc_info'] = 1
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
warn = warning
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
the-stack_0_19137 | # from ....tools.normalize import log_cpm
from .....tools.decorators import method
from .....tools.utils import check_version
@method(
method_name="BBKNN",
paper_name="BBKNN: fast batch alignment of single cell transcriptomes",
paper_url="https://academic.oup.com/bioinformatics/article/36/3/964/5545955",
paper_year=2020,
code_url="https://github.com/Teichlab/bbknn",
code_version=check_version("bbknn"),
image="openproblems-python-batch-integration", # only if required
)
def bbknn_full_unscaled(adata, test=False):
from scib.integration import combat
from scib.preprocessing import reduce_data
adata = combat(adata, "batch")
reduce_data(adata, umap=False)
# Complete the result in-place
return adata
|
the-stack_0_19139 | from typing import List, Optional, Tuple, Union
from hwt.code import And, Or
from hwt.hdl.statements.assignmentContainer import HdlAssignmentContainer
from hwt.hdl.types.defs import BIT
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
def _get_ready_signal(intf: Union[Interface, Tuple[RtlSignal, RtlSignal]]) -> RtlSignal:
try:
return intf.rd
except AttributeError:
pass
if isinstance(intf, Tuple):
_, rd = intf
return rd
return intf.ready
def _get_valid_signal(intf: Union[Interface, Tuple[RtlSignal, RtlSignal]]) -> RtlSignal:
try:
return intf.vld
except AttributeError:
pass
if isinstance(intf, Tuple):
vld, _ = intf
return vld
return intf.valid
def _exStreamMemberAck(m) -> RtlSignal:
c, n = m
return c & n.ack()
class ExclusiveStreamGroups(list):
"""
list of tuples (cond, StreamNode instance)
Only one stream from this group can be activated at the time
"""
def __hash__(self):
return id(self)
def sync(self, enSig=None) -> List[HdlAssignmentContainer]:
"""
Create synchronization logic between streams
(generate valid/ready synchronization logic for interfaces)
:param enSig: optional signal to enable this group of nodes
:return: list of assignments which are responsible for synchronization
of streams
"""
expression = []
for cond, node in self:
if enSig is not None:
cond = cond & enSig
expression.extend(node.sync(cond))
return expression
def ack(self) -> RtlSignal:
"""
:return: expression which's value is high when transaction can be made
over at least on child streaming node
"""
return Or(*map(_exStreamMemberAck, self))
class StreamNode():
"""
Group of stream master and slave interfaces to synchronize them to each other
:ivar ~.masters: interfaces which are inputs into this node
:ivar ~.slaves: interfaces which are outputs of this node
:ivar ~.extraConds: {dict interface : extraConditionSignal}
where extra conditions will be added to expression for channel enable.
For master it means it will obtain ready=1 only if extraConditionSignal
is 1.
For slave it means it will obtain valid=1 only
if extraConditionSignal is 1.
All interfaces have to wait on each other so if an extraCond!=1 it causes
blocking on all interfaces if not overridden by skipWhen.
:note: instead of interface it is possilble to use tuple (valid, ready) signal
:ivar ~.skipWhen: dict interface : skipSignal
where if skipSignal is high interface is disconnected from stream
sync node and others does not have to wait on it
(master does not need to have valid and slave ready)
:attention: skipWhen has higher priority
"""
def __init__(self, masters=None, slaves=None,
extraConds=None, skipWhen=None):
if masters is None:
masters = []
if slaves is None:
slaves = []
if extraConds is None:
extraConds = {}
if skipWhen is None:
skipWhen = {}
self.masters = masters
self.slaves = slaves
self.extraConds = extraConds
self.skipWhen = skipWhen
def sync(self, enSig=None) -> List[HdlAssignmentContainer]:
"""
Create synchronization logic between streams
(generate valid/ready synchronization logic for interfaces)
:param enSig: optional signal to enable this node
:return: list of assignements which are responsible for synchronization of streams
"""
masters = self.masters
slaves = self.slaves
if not masters and not slaves:
# node is empty
assert not self.extraConds
assert not self.skipWhen
return []
# check if there is not not any mess in extraConds/skipWhen
for i in self.extraConds.keys():
assert i in masters or i in slaves, i
for i in self.skipWhen.keys():
assert i in masters or i in slaves, i
# this expression container is there to allow usage of this function
# in usual hdl containers like If, Switch etc...
expression = []
for m in masters:
r = self.ackForMaster(m)
if enSig is not None:
r = r & enSig
if isinstance(m, ExclusiveStreamGroups):
a = m.sync(r)
else:
a = [_get_ready_signal(m)(r), ]
expression.extend(a)
for s in slaves:
v = self.ackForSlave(s)
if enSig is not None:
v = v & enSig
if isinstance(s, ExclusiveStreamGroups):
a = s.sync(v)
else:
a = [_get_valid_signal(s)(v), ]
expression.extend(a)
return expression
def ack(self) -> RtlSignal:
"""
:return: expression which's value is high when transaction can be made over interfaces
"""
# every interface has to have skip flag or it has to be ready/valid
# and extraCond has to be True if present
acks = []
for m in self.masters:
extra, skip = self.getExtraAndSkip(m)
if isinstance(m, ExclusiveStreamGroups):
a = m.ack()
else:
a = _get_valid_signal(m)
if extra:
a = And(a, *extra)
if skip is not None:
a = Or(a, skip)
acks.append(a)
for s in self.slaves:
extra, skip = self.getExtraAndSkip(s)
if isinstance(s, ExclusiveStreamGroups):
a = s.ack()
else:
a = _get_ready_signal(s)
if extra:
a = And(a, *extra)
if skip is not None:
a = Or(a, skip)
acks.append(a)
if not acks:
return True
return And(*acks)
def getExtraAndSkip(self, intf) -> Tuple[Optional[RtlSignal], Optional[RtlSignal]]:
"""
:return: optional extraCond and skip flags for interface
"""
try:
extra = [self.extraConds[intf], ]
except KeyError:
extra = []
try:
skip = self.skipWhen[intf]
except KeyError:
skip = None
return extra, skip
def vld(self, intf: Union[Interface, Tuple[RtlSignal, RtlSignal]]) -> RtlSignal:
"""
:return: valid signal of master interface for synchronization of othres
"""
try:
s = self.skipWhen[intf]
assert s is not None
except KeyError:
s = None
if isinstance(intf, ExclusiveStreamGroups):
v = intf.ack()
else:
v = _get_valid_signal(intf)
if s is None:
return v
else:
return v | s
def rd(self, intf: Union[Interface, Tuple[RtlSignal, RtlSignal]]) -> RtlSignal:
"""
:return: ready signal of slave interface for synchronization of othres
"""
try:
s = self.skipWhen[intf]
assert s is not None
except KeyError:
s = None
if isinstance(intf, ExclusiveStreamGroups):
r = intf.ack()
else:
r = _get_ready_signal(intf)
if s is None:
return r
else:
return r | s
def ackForMaster(self, master: Interface) -> RtlSignal:
"""
:return: driver of ready signal for master
"""
extra, skip = self.getExtraAndSkip(master)
rd = self.rd
vld = self.vld
conds = [*(vld(m) for m in self.masters if m is not master),
*(rd(s) for s in self.slaves),
*extra]
if conds:
r = And(*conds)
else:
r = BIT.from_py(1)
if skip is not None:
r = r & ~skip
return r
def ackForSlave(self, slave: Interface) -> RtlSignal:
"""
:return: driver of valid signal for slave
"""
extra, skip = self.getExtraAndSkip(slave)
rd = self.rd
vld = self.vld
conds = [*(vld(m) for m in self.masters),
*(rd(s) for s in self.slaves if s is not slave),
*extra]
if conds:
v = And(*conds)
else:
v = BIT.from_py(1)
if skip is not None:
v = v & ~skip
return v
|
the-stack_0_19142 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import enum
from functools import partial
import itertools
import typing
from typing import Any, Optional, Tuple
import unittest
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
from jax import dtypes
from jax import numpy as jnp
from jax import ops
from jax._src import test_util as jtu
from jax._src import util
from jax.config import config
config.parse_flags_with_absl()
# We disable the whitespace continuation check in this file because otherwise it
# makes the test name formatting unwieldy.
# pylint: disable=bad-continuation
ARRAY_MSG = r"Using a non-tuple sequence for multidimensional indexing is not allowed.*arr\[array\(seq\)\]"
TUPLE_MSG = r"Using a non-tuple sequence for multidimensional indexing is not allowed.*arr\[tuple\(seq\)\]"
float_dtypes = jtu.dtypes.floating
default_dtypes = float_dtypes + jtu.dtypes.integer
all_dtypes = default_dtypes + jtu.dtypes.boolean
class IndexSpec(typing.NamedTuple):
shape: Tuple[int, ...]
indexer: Any
out_shape: Optional[Tuple[int, ...]] = None
def check_grads(f, args, order, atol=None, rtol=None, eps=None):
# TODO(mattjj,dougalm): add higher-order check
default_tol = 1e-6 if config.x64_enabled else 1e-2
atol = atol or default_tol
rtol = rtol or default_tol
eps = eps or default_tol
jtu.check_jvp(f, partial(jax.jvp, f), args, atol, rtol, eps)
jtu.check_vjp(f, partial(jax.vjp, f), args, atol, rtol, eps)
STATIC_INDEXING_TESTS = [
("OneIntIndex", [
IndexSpec(shape=(3,), indexer=1, out_shape=()),
IndexSpec(shape=(3, 3), indexer=0, out_shape=(3,)),
IndexSpec(shape=(3, 4, 5), indexer=2, out_shape=(4, 5)),
IndexSpec(shape=(3,), indexer=-1, out_shape=()),
IndexSpec(shape=(3,), indexer=-2, out_shape=()),
]),
("TwoIntIndices", [
IndexSpec(shape=(3, 3), indexer=(2, 1), out_shape=()),
IndexSpec(shape=(3, 4, 5), indexer=(1, 2), out_shape=(5,)),
IndexSpec(shape=(3, 4, 5), indexer=(-1, 2), out_shape=(5,)),
]),
("ThreeIntIndices", [
IndexSpec(shape=(3, 4, 5), indexer=(1, 2, 3), out_shape=()),
]),
("OneSliceIndex", [
IndexSpec(shape=(10,), indexer=slice(1, 3), out_shape=(2,)),
IndexSpec(shape=(10,), indexer=slice(1, -1), out_shape=(8,)),
IndexSpec(shape=(10,), indexer=slice(None, -1), out_shape=(9,)),
IndexSpec(shape=(10,), indexer=slice(None, None, None), out_shape=(10,)),
IndexSpec(shape=(10, 8), indexer=slice(1, 3), out_shape=(2, 8)),
IndexSpec(shape=(10, 8), indexer=slice(1, None), out_shape=(9, 8)),
IndexSpec(shape=(10, 8), indexer=slice(None, 3), out_shape=(3, 8)),
IndexSpec(shape=(10, 8), indexer=slice(-3, None), out_shape=(3, 8)),
]),
("OneSliceIndexNegativeStride", [
IndexSpec(shape=(10,), indexer=slice(3, 1, -1), out_shape=(2,)),
IndexSpec(shape=(10,), indexer=slice(1, 8, -1), out_shape=(0,)),
IndexSpec(shape=(10,), indexer=slice(None, 1, -2), out_shape=(4,)),
IndexSpec(shape=(10,), indexer=slice(None, None, -1), out_shape=(10,)),
IndexSpec(shape=(10, 8), indexer=slice(3, 1, -1), out_shape=(2, 8)),
IndexSpec(shape=(10, 8), indexer=slice(0, 8, -1), out_shape=(0, 8)),
IndexSpec(shape=(10, 8), indexer=slice(None, None, -1), out_shape=(10, 8)),
]),
("OneSliceIndexNonUnitStride", [
IndexSpec(shape=(10,), indexer=slice(0, 8, 2), out_shape=(4,)),
IndexSpec(shape=(10,), indexer=slice(0, 8, 3), out_shape=(3,)),
IndexSpec(shape=(10,), indexer=slice(1, 3, 2), out_shape=(1,)),
IndexSpec(shape=(10,), indexer=slice(1, None, 2), out_shape=(5,)),
IndexSpec(shape=(10,), indexer=slice(None, 1, -2), out_shape=(4,)),
IndexSpec(shape=(10, 8), indexer=slice(1, 8, 3), out_shape=(3, 8)),
IndexSpec(shape=(10, 8), indexer=slice(None, None, 2), out_shape=(5, 8)),
IndexSpec(shape=(10, 8), indexer=slice(None, 1, -2), out_shape=(4, 8)),
IndexSpec(shape=(10, 8), indexer=slice(None, None, -2), out_shape=(5, 8)),
]),
("TwoSliceIndices", [
IndexSpec(shape=(10, 8), indexer=(slice(1, 3), slice(0, 2)),
out_shape=(2, 2)),
IndexSpec(shape=(10, 8), indexer=(slice(1, None), slice(None, 2)),
out_shape=(9, 2)),
IndexSpec(shape=(10, 8), indexer=(slice(None, None, -1), slice(None, 2)),
out_shape=(10, 2)),
IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, 2)),
out_shape=(2, 2, 3)),
IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, None)),
out_shape=(2, 8, 3)),
IndexSpec(shape=(10, 8, 3), indexer=(slice(1, None), slice(0, 2)),
out_shape=(9, 2, 3)),
]),
("OneColonIndex", [
IndexSpec(shape=(3,), indexer=slice(None), out_shape=(3,)),
IndexSpec(shape=(3, 4), indexer=slice(None), out_shape=(3, 4)),
]),
("MultipleColonIndices", [
IndexSpec(shape=(3, 4), indexer=(slice(None), slice(None)),
out_shape=(3, 4)),
IndexSpec(shape=(3, 4, 5), indexer=(slice(None), slice(None)),
out_shape=(3, 4, 5)),
]),
("MixedSliceIndices", [
IndexSpec(shape=(10, 4), indexer=(slice(None), slice(0, 2)),
out_shape=(10, 2)),
IndexSpec(shape=(10, 4), indexer=(1, slice(None)),
out_shape=(4,)),
]),
("EllipsisIndex", [
IndexSpec(shape=(3,), indexer=Ellipsis, out_shape=(3,)),
IndexSpec(shape=(3, 4), indexer=Ellipsis, out_shape=(3, 4)),
IndexSpec(shape=(3, 4, 5), indexer=(0, Ellipsis), out_shape=(4, 5)),
IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, 2, 3), out_shape=(3,)),
]),
("NoneIndex", [
IndexSpec(shape=(), indexer=None, out_shape=(1,)),
IndexSpec(shape=(), indexer=(None, None), out_shape=(1, 1)),
IndexSpec(shape=(), indexer=(Ellipsis, None), out_shape=(1,)),
IndexSpec(shape=(3,), indexer=None, out_shape=(1, 3)),
IndexSpec(shape=(3, 4), indexer=None, out_shape=(1, 3, 4)),
IndexSpec(shape=(3, 4), indexer=(Ellipsis, None), out_shape=(3, 4, 1)),
IndexSpec(shape=(3, 4), indexer=(0, None, Ellipsis), out_shape=(1, 4)),
IndexSpec(shape=(3, 4, 5), indexer=(1, None, Ellipsis), out_shape=(1, 4, 5)),
]),
("EmptyIndex", [
IndexSpec(shape=(), indexer=(), out_shape=()),
IndexSpec(shape=(3,), indexer=(), out_shape=(3,)),
IndexSpec(shape=(3, 4), indexer=(), out_shape=(3, 4)),
]),
("TupleOfIntAndSliceAndIntArray", [
IndexSpec(shape=(3, 2, 3), indexer=(0, slice(None), np.arange(3)),
out_shape=(3, 2)),
IndexSpec(shape=(3, 2, 3), indexer=(np.int32(1), slice(None), np.arange(3)),
out_shape=(3, 2)),
IndexSpec(shape=(3, 2, 3), indexer=(np.array(2), slice(None), np.arange(3)),
out_shape=(3, 2)),
]),
]
STATIC_INDEXING_OUT_OF_BOUNDS_TESTS = [
("OneIntIndex", [
IndexSpec(shape=(3,), indexer=-4, out_shape=()),
IndexSpec(shape=(3, 3), indexer=3, out_shape=(3,)),
IndexSpec(shape=(3, 4, 5), indexer=4, out_shape=(4, 5)),
]),
("TwoIntIndices", [
IndexSpec(shape=(3, 3), indexer=(2, -4), out_shape=()),
IndexSpec(shape=(3, 4, 5), indexer=(3, 2), out_shape=()),
IndexSpec(shape=(3, 4, 5), indexer=(-4, 4), out_shape=(5,)),
]),
]
ADVANCED_INDEXING_TESTS = [
("One1DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),
IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 1]), out_shape=(3, 3)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 0, 1]),
out_shape=(4, 4, 5)),
IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),
IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),
IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32),
out_shape=(0,)),
]),
("One2DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([[0, 0]]),out_shape=(1, 2)),
IndexSpec(shape=(3, 3), indexer=np.array([[1, 2, 1], [0, 1, -1]]),
out_shape=(2, 3, 3)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([[0, 2, 0, 1], [-1, -2, 1, 0]]),
out_shape=(2, 4, 4, 5)),
]),
("Two1DIntArrayIndicesNoBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),
out_shape=(2,)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2, 0, 1]), np.array([-1, 0, -1, 2])),
out_shape=(4, 5)),
]),
("Two1DIntArrayIndicesWithBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),
out_shape=(1, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([[0, 2, 0, 1]]), np.array([-1, 0, -1, 2])),
out_shape=(1, 4, 5)),
]),
("ArrayOfInts", [
IndexSpec(shape=(3,), indexer=np.array([0, 1, 0]), out_shape=(3,)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([ 0, -1]), out_shape=(2, 4, 5)),
]),
("TupleOfListsOfPythonInts", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0, 3]]),
out_shape=(2, 4, 5)),
]),
("TupleOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[2, 3, 0, 3]])),
out_shape=(1, 4)),
]),
("TupleOfListsOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),
out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[2, 3, 0, 3]])),
out_shape=(2, 4, 5)),
]),
]
ADVANCED_INDEXING_TESTS_NO_REPEATS = [
("One1DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),
IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 0]), out_shape=(3, 3)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 1]),
out_shape=(3, 4, 5)),
IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),
IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),
IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32), out_shape=(0,)),
]),
("One2DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([[0, 1]]), out_shape=(1, 2)),
IndexSpec(shape=(6, 6), indexer=np.array([[1, 2, 0], [3, 4, -1]]),
out_shape=(2, 3, 6)),
]),
("Two1DIntArrayIndicesNoBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),
out_shape=(2,)),
IndexSpec(shape=(4, 5, 6),
indexer=(np.array([0, 2, 1, 3]), np.array([-1, 0, -2, 1])),
out_shape=(4, 6)),
]),
("Two1DIntArrayIndicesWithBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),
out_shape=(1, 2)),
IndexSpec(shape=(4, 5, 6),
indexer=(np.array([[0, 2, -1, 1]]), np.array([-1, 0, -2, 2])),
out_shape=(1, 4, 6)),
]),
("ArrayOfInts", [
IndexSpec(shape=(3,), indexer=np.array([0, 2, 1]), out_shape=(3,)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([ 0, -1]), out_shape=(2, 4, 5)),
]),
("TupleOfListsOfPythonInts", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0]]),
out_shape=(2, 3, 5)),
]),
("TupleOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[2, 3, 0]])),
out_shape=(1, 3)),
]),
("TupleOfListsOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),
out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[2, 3, 0]])),
out_shape=(2, 3, 5)),
]),
]
ADVANCED_INDEXING_TESTS_NO_REPEATS_SORTED = [
("One1DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),
IndexSpec(shape=(3, 3), indexer=np.array([0, 1, 2]), out_shape=(3, 3)),
IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 1, 2]),
out_shape=(3, 4, 5)),
IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),
IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),
IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32), out_shape=(0,)),
]),
("One2DIntArrayIndex", [
IndexSpec(shape=(3,), indexer=np.array([[0, 1]]), out_shape=(1, 2)),
IndexSpec(shape=(6, 6), indexer=np.array([[-1, 0, 1],
[ 2, 3, 4]]), out_shape=(2, 3, 6)),
]),
("Two1DIntArrayIndicesNoBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),
out_shape=(2,)),
IndexSpec(shape=(4, 5, 6),
indexer=(np.array([0, 1, 2, 3]), np.array([-2, -1, 0, 1])),
out_shape=(4, 6)),
]),
("Two1DIntArrayIndicesWithBroadcasting", [
IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),
out_shape=(1, 2)),
IndexSpec(shape=(4, 5, 6),
indexer=(np.array([[-1, 0, 1, 2]]), np.array([-2, -1, 0, 2])),
out_shape=(1, 4, 6)),
]),
("TupleOfListsOfPythonInts", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[0, 2, 3]]),
out_shape=(2, 3, 5)),
]),
("TupleOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[0, 2, 3]])),
out_shape=(1, 3)),
]),
("TupleOfListsOfPythonIntsAndIntArrays", [
IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),
out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[0, 2, 3]])),
out_shape=(2, 3, 5)),
]),
]
MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS = [
("SlicesAndOneIntArrayIndex", [
IndexSpec(shape=(2, 3), indexer=(np.array([0, 1]), slice(1, 2)),
out_shape=(2, 1)),
IndexSpec(shape=(2, 3), indexer=(slice(0, 2), np.array([0, 2])),
out_shape=(2, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(Ellipsis, np.array([0, 2]), slice(None)),
out_shape=(3, 2, 5)),
IndexSpec(shape=(3, 4, 5),
indexer=(Ellipsis, np.array([[0, 2], [1, 3]]), slice(None)),
out_shape=(3, 2, 2, 5)),
]),
("SlicesAndTwoIntArrayIndices", [
IndexSpec(shape=(3, 4, 5),
indexer=(Ellipsis, np.array([0, 2]), np.array([-1, 2])),
out_shape=(3, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), Ellipsis, np.array([-1, 2])),
out_shape=(2, 4)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), np.array([-1, 2]), Ellipsis),
out_shape=(2, 5)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), np.array([-1, 2]), slice(1, 3)),
out_shape=(2, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), slice(1, 3), np.array([-1, 2])),
out_shape=(2, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([ 0, 2, -2]), slice(None, None, 2),
np.array([-1, 2, 1])),
out_shape=(3, 2)),
]),
("NonesAndIntArrayIndices", [
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), None, np.array([-1, 2])),
out_shape=(2, 1, 5)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([0, 2]), None, None, np.array([-1, 2])),
out_shape=(2, 1, 1, 5)),
IndexSpec(shape=(3, 4, 5),
indexer=(Ellipsis, np.array([0, 2]), None, None,
np.array([-1, 2])),
out_shape=(2, 3, 1, 1)),
]),
("IntArrayWithInt32Type", [
IndexSpec(shape=(3, 4), indexer=(Ellipsis, np.array(1, dtype=np.int32)),
out_shape=(3,)),
]),
]
MIXED_ADVANCED_INDEXING_TESTS = MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS + [
("SlicesAndOneIntArrayIndex", [
IndexSpec(shape=(3, 4, 5),
indexer=(Ellipsis, np.array([[0, 2], [1, 1]]), slice(None)),
out_shape=(3, 2, 2, 5)),
]),
("SlicesAndTwoIntArrayIndices", [
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([ 0, 2, -2]), slice(None, None, 2),
np.array([-1, 2, -1])),
out_shape=(3, 2)),
IndexSpec(shape=(3, 4, 5),
indexer=(np.array([[0, 2], [2, 0]]), Ellipsis,
np.array([[1, 0], [1, 0]])),
out_shape=(2, 2, 4)),
]),
]
MODES = ["clip", "drop", "promise_in_bounds"]
@jtu.with_config(jax_numpy_rank_promotion="raise")
class IndexingTest(jtu.JaxTestCase):
"""Tests for Numpy indexing translation rules."""
@parameterized.named_parameters(jtu.cases_from_list({
"testcase_name": "{}_inshape={}_indexer={}".format(
name, jtu.format_shape_dtype_string( shape, dtype), indexer),
"shape": shape, "dtype": dtype, "indexer": indexer
} for name, index_specs in STATIC_INDEXING_TESTS
for shape, indexer, _ in index_specs
for dtype in all_dtypes))
def testStaticIndexing(self, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.asarray(x)[indexer]
jnp_fun = lambda x: jnp.asarray(x)[indexer]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
# Tests x.at[...].get(...) as well.
jnp_fun = lambda x: jnp.asarray(x).at[indexer].get()
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters({
"testcase_name":
f"{jtu.format_shape_dtype_string(shape, dtype)}_inshape={name}"
f"_indexer={indexer}_mode={mode}",
"shape": shape, "dtype": dtype, "indexer": indexer, "mode": mode
}
for mode in MODES
for name, index_specs in (
STATIC_INDEXING_TESTS if mode == "promise_in_bounds" else
STATIC_INDEXING_TESTS + STATIC_INDEXING_OUT_OF_BOUNDS_TESTS)
for shape, indexer, _ in index_specs
for dtype in float_dtypes)
def testStaticIndexingGrads(self, shape, dtype, indexer, mode):
rng = jtu.rand_default(self.rng())
tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None
arg = rng(shape, dtype)
# Use an arbitrary finite fill_value, since NaNs won't work in a numerical
# gradient test.
fun = lambda x: jnp.asarray(x).at[indexer].get(mode=mode, fill_value=7)**2
check_grads(fun, (arg,), 2, tol, tol, tol)
def _ReplaceSlicesWithTuples(self, idx):
"""Helper method to replace slices with tuples for dynamic indexing args."""
if isinstance(idx, slice):
triple = idx.start, idx.stop, idx.step
isnone = [i for i, elt in enumerate(triple) if elt is None]
zeros = itertools.repeat(0)
nones = itertools.repeat(None)
out = util.subvals(triple, zip(isnone, zeros))
return out, lambda out: slice(*util.subvals(out, zip(isnone, nones)))
elif isinstance(idx, (tuple, list)) and idx:
t = type(idx)
elts, packs = zip(*map(self._ReplaceSlicesWithTuples, idx))
return elts, lambda elts: t((pack(i) for pack, i in zip(packs, elts)))
else:
return idx, lambda x: x
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "indexer": indexer}
for name, index_specs in [
("OneSliceIndex",
[IndexSpec(shape=(5,), indexer=slice(1, 3)),
IndexSpec(shape=(5, 4), indexer=slice(1, 3))]),
("TwoSliceIndices",
[IndexSpec(shape=(5, 4), indexer=(slice(1, 3), slice(0, 2))),
IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, 2)))]),
("NonUnitStrides", [
IndexSpec(shape=(3,), indexer=slice(None, None, -1)),
IndexSpec(shape=(3, 3), indexer=slice(0, 3, -2)),
IndexSpec(shape=(3, 4, 5), indexer=slice(0, 4, 2))
]),
("OnlyStartOrStopDynamic", [
IndexSpec(shape=(5, 4), indexer=(slice(None, 3), slice(0, 2))),
IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, None)))
]),
]
for shape, indexer, _ in index_specs
for dtype in all_dtypes)
def testDynamicIndexingWithSlicesErrors(self, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)
@jax.jit
def fun(x, unpacked_indexer):
indexer = pack_indexer(unpacked_indexer)
return x[indexer]
args_maker = lambda: [rng(shape, dtype), unpacked_indexer]
self.assertRaises(IndexError, lambda: fun(*args_maker()))
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "indexer": indexer}
for name, index_specs in [
("OneIntIndex",
[IndexSpec(shape=(3,), indexer=1),
IndexSpec(shape=(3, 3), indexer=0),
IndexSpec(shape=(3, 4, 5), indexer=2),
IndexSpec(shape=(3,), indexer=-1),
IndexSpec(shape=(3,), indexer=-2)]),
("TwoIntIndices",
[IndexSpec(shape=(3, 3), indexer=(2, 1)),
IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),
IndexSpec(shape=(3, 4, 5), indexer=(-1, 2))]),
("ThreeIntIndices",
[IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),
]
for shape, indexer, _ in index_specs
for dtype in all_dtypes)
def testDynamicIndexingWithIntegers(self, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)
def np_fun(x, unpacked_indexer):
indexer = pack_indexer(unpacked_indexer)
return np.asarray(x)[indexer]
def jnp_fun(x, unpacked_indexer):
indexer = pack_indexer(unpacked_indexer)
return jnp.array(x)[indexer]
args_maker = lambda: [rng(shape, dtype), unpacked_indexer]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "indexer": indexer}
for name, index_specs in [
("OneIntIndex",
[IndexSpec(shape=(3,), indexer=1),
IndexSpec(shape=(3, 3), indexer=0),
IndexSpec(shape=(3, 4, 5), indexer=2),
IndexSpec(shape=(3,), indexer=-1),
IndexSpec(shape=(3,), indexer=-2),
]),
("TwoIntIndices",
[IndexSpec(shape=(3, 3), indexer=(2, 1)),
IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),
IndexSpec(shape=(3, 4, 5), indexer=(-1, 2)),
]),
("ThreeIntIndices",
[IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),
]
for shape, indexer, _ in index_specs
for dtype in float_dtypes)
def testDynamicIndexingWithIntegersGrads(self, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None
unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)
@jax.jit
def fun(unpacked_indexer, x):
indexer = pack_indexer(unpacked_indexer)
return x[indexer]
arr = rng(shape, dtype)
check_grads(partial(fun, unpacked_indexer), (arr,), 2, tol, tol, tol)
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "indexer": indexer}
for name, index_specs in ADVANCED_INDEXING_TESTS
for shape, indexer, _ in index_specs
for dtype in all_dtypes)
def testAdvancedIntegerIndexing(self, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), indexer]
np_fun = lambda x, idx: np.asarray(x)[idx]
jnp_fun = lambda x, idx: jnp.asarray(x)[idx]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "indexer": indexer}
for name, index_specs in [
("One1DIntArrayIndex",
[IndexSpec(shape=(3,), indexer=np.array([0, 1])),
IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 1])),
IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 0, 1])),
IndexSpec(shape=(3,), indexer=np.array([-1, 1])),
IndexSpec(shape=(3,), indexer=np.array([-2, -1])),
]),
("One2DIntArrayIndex",
[IndexSpec(shape=(3,), indexer=np.array([[0, 0]])),
IndexSpec(shape=(3, 3), indexer=np.array([[1, 2, 1],
[0, 1, -1]])),
IndexSpec(shape=(3, 4, 5), indexer=np.array([[0, 2, 0, 1],
[-1, -2, 1, 0]])),
]),
("Two1DIntArrayIndicesNoBroadcasting",
[IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]),
np.array([1, 2]))),
IndexSpec(shape=(3, 4, 5), indexer=(np.array([0, 2, 0, 1]),
np.array([-1, 0, -1, 2]))),
]),
("Two1DIntArrayIndicesWithBroadcasting",
[IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]),
np.array([1, 2]))),
IndexSpec(shape=(3, 4, 5), indexer=(np.array([[0, 2, 0, 1]]),
np.array([-1, 0, -1, 2]))),
]),
("TupleOfPythonIntsAndIntArrays",
[IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1]))),
IndexSpec(shape=(3, 4, 5), indexer=(0, 1,
np.array([[2, 3, 0, 3]]))),
]),
("TupleOfListsOfPythonIntsAndIntArrays",
[IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0]))),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]],
np.array([[2, 3, 0, 3]]))),
]),
]
for shape, indexer, _ in index_specs
for dtype in float_dtypes)
def testAdvancedIntegerIndexingGrads(self, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None
arg = rng(shape, dtype)
fun = lambda x: jnp.asarray(x)[indexer]
check_grads(fun, (arg,), 2, tol, tol, eps=1.)
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "indexer": indexer}
for name, index_specs in MIXED_ADVANCED_INDEXING_TESTS
for shape, indexer, _ in index_specs
for dtype in all_dtypes)
def testMixedAdvancedIntegerIndexing(self, shape, dtype, indexer):
rng = jtu.rand_default(self.rng())
indexer_with_dummies = [e if isinstance(e, np.ndarray) else ()
for e in indexer]
substitutes = [(i, e) for i, e in enumerate(indexer)
if not isinstance(e, np.ndarray)]
args_maker = lambda: [rng(shape, dtype), indexer_with_dummies]
def jnp_fun(x, indexer_with_dummies):
idx = type(indexer)(util.subvals(indexer_with_dummies, substitutes))
return jnp.asarray(x)[idx]
def np_fun(x, indexer_with_dummies):
idx = type(indexer)(util.subvals(indexer_with_dummies, substitutes))
return np.asarray(x)[idx]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testAdvancedIndexingManually(self):
x = np.random.RandomState(0).randn(3, 4, 5)
index_array = np.array([0, 2, -1, 0])
op = lambda x, index_array: x[..., index_array, :]
cop = jax.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2)
op = lambda x, index_array: x[..., index_array, :, index_array, None]
cop = jax.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2)
op = lambda x, index_array: x[index_array, ..., index_array[:, None], None]
cop = jax.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2)
def testUnpacking(self):
def foo(x):
a, b, c = x
return a + b + c
cfoo = jax.jit(foo)
a1 = foo(np.arange(3))
a2 = cfoo(np.arange(3))
self.assertAllClose(a1, a2)
def testBooleanIndexingArray1D(self):
idx = np.array([True, True, False])
x = jax.device_put(np.arange(3))
ans = x[idx]
expected = np.arange(3)[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingList1D(self):
idx = [True, True, False]
x = jax.device_put(np.arange(3))
with self.assertRaisesRegex(TypeError, ARRAY_MSG):
x[idx]
def testBooleanIndexingArray2DBroadcast(self):
idx = np.array([True, True, False, True])
x = np.arange(8).reshape(4, 2)
ans = jax.device_put(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingList2DBroadcast(self):
idx = [True, True, False, True]
x = np.arange(8).reshape(4, 2)
with self.assertRaisesRegex(TypeError, ARRAY_MSG):
jax.device_put(x)[idx]
def testBooleanIndexingArray2D(self):
idx = np.array([[True, False],
[False, True],
[False, False],
[True, True]])
x = np.arange(8).reshape(4, 2)
ans = jax.device_put(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingDynamicShapeError(self):
x = np.zeros(3)
i = np.array([True, True, False])
self.assertRaises(IndexError, lambda: jax.jit(lambda x, i: x[i])(x, i))
def testScalarBooleanIndexingNotImplemented(self):
msg = "JAX arrays do not support boolean scalar indices"
with self.assertRaisesRegex(TypeError, msg):
jnp.arange(4)[True]
with self.assertRaisesRegex(TypeError, msg):
jnp.arange(4)[False]
def testIssue187(self):
x = jnp.ones((5, 5))
x[[0, 2, 4], [0, 2, 4]] # doesn't crash
x = np.arange(25).reshape((5, 5))
ans = jax.jit(lambda x: x[[0, 2, 4], [0, 2, 4]])(x)
expected = x[[0, 2, 4], [0, 2, 4]]
self.assertAllClose(ans, expected, check_dtypes=False)
def testJVPOfGradOfIndexing(self):
# Should return a value, even though we didn't pass a symbolic zero as the
# index tangent.
x = jnp.ones((3, 4), jnp.float32)
i = jnp.ones((3,), jnp.int32)
f = lambda x, i: jnp.sum(x[i])
primals, tangents = jax.jvp(jax.grad(f), (x, i),
(x, np.zeros(i.shape, dtypes.float0)))
expected = np.broadcast_to(
np.array([0, 3, 0], dtype=np.float32)[:, None], (3, 4))
self.assertAllClose(expected, primals)
self.assertAllClose(np.zeros_like(x), tangents)
def testTrivialGatherIsntGenerated(self):
# https://github.com/google/jax/issues/1621
jaxpr = jax.make_jaxpr(lambda x: x[:, None])(np.arange(4))
self.assertEqual(len(jaxpr.jaxpr.eqns), 1)
self.assertNotIn('gather', str(jaxpr))
def testIndexingEmptyDimension(self):
# Issue 2671: XLA error when indexing into dimension of size 0
x = jnp.ones((2, 0))
# The following work, even on axis 1 of size 0
with jax.numpy_rank_promotion('allow'):
_ = x[0, :] + x[0, None] + x[0, 1:] + x[0, 1:3:2]
with self.assertRaisesRegex(IndexError,
"index .* is out of bounds for axis .* with size 0"):
_ = np.ones((2, 0))[0, 0] # The numpy error
with self.assertRaisesRegex(IndexError,
"index is out of bounds for axis .* with size 0"):
_ = x[0, 0] # JAX indexing
with self.assertRaisesRegex(IndexError,
"index is out of bounds for axis .* with size 0"):
jax.jit(lambda i: x[0, i])(0) # JAX indexing under jit
def testBooleanIndexingWithEmptyResult(self):
# based on a TensorFlow Probability test that started failing after #1622
x = jnp.array([-1])
mask = jnp.array([False])
ans = x[mask] # doesn't crash
expected = np.array([-1])[np.array([False])]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingShapeMismatch(self):
# Regression test for https://github.com/google/jax/issues/7329
x = jnp.arange(4)
idx = jnp.array([True, False])
with self.assertRaisesRegex(IndexError, "boolean index did not match shape.*"):
x[idx]
def testNontrivialBooleanIndexing(self):
# Test nontrivial corner case in boolean indexing shape validation
rng = jtu.rand_default(self.rng())
index = (rng((2, 3), np.bool_), rng((6,), np.bool_))
args_maker = lambda: [rng((2, 3, 6), np.int32)]
np_fun = lambda x: np.asarray(x)[index]
jnp_fun = lambda x: jnp.asarray(x)[index]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testFloatIndexingError(self):
BAD_INDEX_TYPE_ERROR = "Indexer must have integer or boolean type, got indexer with type"
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros(2)[0.]
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros((2, 2))[(0, 0.)]
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros((2, 2))[(0, 0.)]
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jax.jit(lambda idx: jnp.zeros((2, 2))[idx])((0, 0.))
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros(2).at[0.].add(1.)
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros(2).at[0.].set(1.)
def testIndexOutOfBounds(self): # https://github.com/google/jax/issues/2245
x = jnp.arange(5, dtype=jnp.int32) + 1
self.assertAllClose(x, x[:10])
idx = jnp.array([-10, -6, -5, -4, 0, 3, 4, 5, 6, 100])
self.assertArraysEqual(
x.at[idx].get(mode="clip"),
jnp.array([1, 1, 1, 2, 1, 4, 5, 5, 5, 5], jnp.int32))
nan = np.nan
self.assertArraysEqual(
x.astype(jnp.float32).at[idx].get(mode="fill"),
jnp.array([nan, nan, 1, 2, 1, 4, 5, nan, nan, nan], jnp.float32))
imin = np.iinfo(np.int32).min
self.assertArraysEqual(
x.at[idx].get(mode="fill"),
jnp.array([imin, imin, 1, 2, 1, 4, 5, imin, imin, imin], jnp.int32))
umax = np.iinfo(np.uint32).max
self.assertArraysEqual(
x.astype(np.uint32).at[idx].get(mode="fill"),
jnp.array([umax, umax, 1, 2, 1, 4, 5, umax, umax, umax], jnp.uint32))
self.assertArraysEqual(
x.at[idx].get(mode="fill", fill_value=7),
jnp.array([7, 7, 1, 2, 1, 4, 5, 7, 7, 7], jnp.int32))
def _broadcastable_shapes(shape):
"""Returns all shapes that broadcast to `shape`."""
def f(rshape):
yield []
if rshape:
for s in f(rshape[1:]):
yield rshape[0:1] + s
if rshape[0] != 1:
for s in f(rshape[1:]):
yield [1] + s
for x in f(list(reversed(shape))):
yield list(reversed(x))
class UpdateOps(enum.Enum):
UPDATE = 0
ADD = 1
MUL = 2
DIV = 3
POW = 4
MIN = 5
MAX = 6
def np_fn(op, indexer, x, y):
x = x.copy()
x[indexer] = {
UpdateOps.UPDATE: lambda: y,
UpdateOps.ADD: lambda: x[indexer] + y,
UpdateOps.MUL: lambda: x[indexer] * y,
UpdateOps.DIV: jtu.ignore_warning(category=RuntimeWarning)(
lambda: x[indexer] / y.astype(x.dtype)),
UpdateOps.POW: jtu.ignore_warning(category=RuntimeWarning)(
lambda: x[indexer] ** y.astype(x.dtype)),
UpdateOps.MIN: lambda: np.minimum(x[indexer], y),
UpdateOps.MAX: lambda: np.maximum(x[indexer], y),
}[op]()
return x
def jax_fn(op, indexer, x, y, indices_are_sorted=False,
unique_indices=False, mode=None):
x = jnp.array(x)
return {
UpdateOps.UPDATE: x.at[indexer].set,
UpdateOps.ADD: x.at[indexer].add,
UpdateOps.MUL: x.at[indexer].multiply,
UpdateOps.DIV: x.at[indexer].divide,
UpdateOps.POW: x.at[indexer].power,
UpdateOps.MIN: x.at[indexer].min,
UpdateOps.MAX: x.at[indexer].max,
}[op](y, indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices, mode=mode)
def dtypes(op):
if op == UpdateOps.UPDATE:
return all_dtypes
elif op == UpdateOps.DIV or op == UpdateOps.POW:
return jtu.dtypes.inexact
else:
return default_dtypes
def _update_tol(op):
if op == UpdateOps.POW:
tol = {np.complex64: 1e-4 if jtu.device_under_test() == "tpu" else 1e-5,
np.complex128: 1e-14}
else:
tol = {np.complex128: 1e-14}
return tol
@jtu.with_config(jax_numpy_rank_promotion="raise")
class IndexedUpdateTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name":
f"{name}_inshape={jtu.format_shape_dtype_string(shape, dtype)}"
f"_indexer={indexer}"
f"_update={jtu.format_shape_dtype_string(update_shape, update_dtype)}"
f"_op={op.name}",
"shape": shape, "dtype": dtype, "indexer": indexer,
"update_shape": update_shape, "update_dtype": update_dtype,
"op": op, "mode": mode,
} for name, index_specs in s(STATIC_INDEXING_TESTS)
for shape, indexer, update_shape in s(index_specs)
for op in s(UpdateOps)
for dtype in s(UpdateOps.dtypes(op))
for update_shape in s(_broadcastable_shapes(update_shape))
for update_dtype in s([dtype] if op == UpdateOps.ADD else all_dtypes)
for mode in s(MODES))))
def testStaticIndexing(self, shape, dtype, update_shape, update_dtype,
indexer, op, mode):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]
np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)
jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y, mode=mode)
self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, tol=_update_tol(op))
self._CompileAndCheck(jax_fn, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format(
name, jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "indexer": indexer,
"update_shape": update_shape, "update_dtype": update_dtype,
"op": op
} for name, index_specs in s(ADVANCED_INDEXING_TESTS_NO_REPEATS)
for shape, indexer, update_shape in s(index_specs)
for op in s(UpdateOps)
for dtype in s(UpdateOps.dtypes(op))
for update_shape in s(_broadcastable_shapes(update_shape))
for update_dtype in s([dtype] if op == UpdateOps.ADD else all_dtypes))))
def testAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,
indexer, op):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]
np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)
jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y,
unique_indices=True)
self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, tol=_update_tol(op))
self._CompileAndCheck(jax_fn, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format(
name, jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "indexer": indexer,
"update_shape": update_shape, "update_dtype": update_dtype,
"op": op
} for name, index_specs in s(ADVANCED_INDEXING_TESTS_NO_REPEATS_SORTED)
for shape, indexer, update_shape in s(index_specs)
for op in s(UpdateOps)
for dtype in s(UpdateOps.dtypes(op))
for update_shape in s(_broadcastable_shapes(update_shape))
for update_dtype in s([dtype] if op == UpdateOps.ADD else all_dtypes))))
def testAdvancedIndexingSorted(self, shape, dtype, update_shape, update_dtype,
indexer, op):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]
np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)
jax_fn = lambda x, y: UpdateOps.jax_fn(
op, indexer, x, y, indices_are_sorted=True, unique_indices=True)
self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, check_dtypes=True,
tol=_update_tol(op))
self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format(
name, jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "indexer": indexer,
"update_shape": update_shape, "update_dtype": update_dtype,
"op": op
} for name, index_specs in s(MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS)
for shape, indexer, update_shape in s(index_specs)
for op in s(UpdateOps)
for dtype in s(UpdateOps.dtypes(op))
for update_shape in s(_broadcastable_shapes(update_shape))
for update_dtype in s([dtype] if op == UpdateOps.ADD else all_dtypes))))
def testMixedAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,
indexer, op):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]
np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)
jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y)
self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, tol=_update_tol(op))
self._CompileAndCheck(jax_fn, args_maker)
@parameterized.named_parameters(jtu.cases_from_list({
"testcase_name":
f"{name}_inshape={jtu.format_shape_dtype_string(shape, dtype)}"
f"_indexer={indexer}"
f"_update={jtu.format_shape_dtype_string(update_shape, update_dtype)}"
f"_op={op.name}_mode={mode}",
"shape": shape, "dtype": dtype, "indexer": indexer,
"update_shape": update_shape, "update_dtype": update_dtype,
"op": op, "mode": mode,
} for mode in MODES
for name, index_specs in (
STATIC_INDEXING_TESTS if mode == "promise_in_bounds" else
STATIC_INDEXING_TESTS + STATIC_INDEXING_OUT_OF_BOUNDS_TESTS)
for shape, indexer, update_shape in index_specs
for op in [UpdateOps.ADD, UpdateOps.MUL, UpdateOps.UPDATE]
for dtype in float_dtypes
for update_shape in _broadcastable_shapes(update_shape)
for update_dtype in ([dtype] if op == UpdateOps.ADD else float_dtypes)))
def testStaticIndexingGrads(self, shape, dtype, update_shape, update_dtype,
indexer, op, mode):
rng = jtu.rand_default(self.rng())
jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y, mode=mode)
x = rng(shape, dtype)
y = rng(update_shape, update_dtype)
check_grads(jax_fn, (x, y), 2, rtol=1e-3, atol=1e-3, eps=1.)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format(
name, jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "indexer": indexer,
"update_shape": update_shape, "update_dtype": update_dtype,
"op": op
} for name, index_specs in s(ADVANCED_INDEXING_TESTS_NO_REPEATS)
for shape, indexer, update_shape in s(index_specs)
for op in s([UpdateOps.ADD, UpdateOps.MUL, UpdateOps.UPDATE])
for dtype in s(float_dtypes)
for update_shape in s(_broadcastable_shapes(update_shape))
for update_dtype in s([dtype] if op == UpdateOps.ADD else float_dtypes))))
def testAdvancedIndexingGrads(self, shape, dtype, update_shape, update_dtype,
indexer, op):
rng = jtu.rand_default(self.rng())
jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y,
unique_indices=True)
x = rng(shape, dtype)
y = rng(update_shape, update_dtype)
check_grads(jax_fn, (x, y), 2, rtol=1e-3, atol=1e-3, eps=1.)
def testSegmentSumBehavior(self):
# testAdvancedIndexing compares against NumPy, and as a result doesn't check
# repeated indices. This test is just a simple manual check, based on
# https://www.tensorflow.org/api_docs/python/tf/math/segment_sum
data = np.array([5, 1, 7, 2, 3, 4, 1, 3])
segment_ids = np.array([0, 0, 0, 1, 2, 2, 3, 3])
ans = jnp.zeros(np.max(segment_ids) + 1).at[segment_ids].add(data)
expected = np.array([13, 2, 7, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testSegmentSum(self):
data = jnp.array([5, 1, 7, 2, 3, 4, 1, 3])
segment_ids = jnp.array([0, 0, 0, 1, 2, 2, 3, 3])
# test with explicit num_segments
ans = ops.segment_sum(data, segment_ids, num_segments=4)
expected = jnp.array([13, 2, 7, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
# test with explicit num_segments larger than the higher index.
ans = ops.segment_sum(data, segment_ids, num_segments=5)
expected = jnp.array([13, 2, 7, 4, 0])
self.assertAllClose(ans, expected, check_dtypes=False)
# test without explicit num_segments
ans = ops.segment_sum(data, segment_ids)
expected = jnp.array([13, 2, 7, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
# test with negative segment ids and segment ids larger than num_segments,
# that will be wrapped with the `mod`.
segment_ids = jnp.array([0, 4, 8, 1, 2, -6, -1, 3])
ans = ops.segment_sum(data, segment_ids, num_segments=4)
expected = jnp.array([5, 2, 3, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
# test with negative segment ids and without without explicit num_segments
# such as num_segments is defined by the smaller index.
segment_ids = jnp.array([3, 3, 3, 4, 5, 5, -7, -6])
ans = ops.segment_sum(data, segment_ids)
expected = jnp.array([0, 0, 0, 13, 2, 7])
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list({
"testcase_name": "_{}_{}_num_segments={}_bucket_size={}".format(
jtu.format_shape_dtype_string(shape, dtype),
reducer.__name__, num_segments, bucket_size),
"dtype": dtype, "shape": shape,
"reducer": reducer, "op": op, "identity": identity,
"num_segments": num_segments, "bucket_size": bucket_size}
for dtype in default_dtypes
for shape in [(8,), (7, 4), (6, 4, 2)]
for bucket_size in [None, 2]
for num_segments in [None, 1, 3])
for reducer, op, identity in [
(ops.segment_sum, np.add, 0),
(ops.segment_prod, np.multiply, 1),
(ops.segment_min, np.minimum, float('inf')),
(ops.segment_max, np.maximum, -float('inf')),
]))
def testSegmentReduce(self, shape, dtype, reducer, op, identity, num_segments, bucket_size):
rng = jtu.rand_default(self.rng())
idx_rng = jtu.rand_int(self.rng(), low=-2, high=3)
args_maker = lambda: [rng(shape, dtype), idx_rng(shape[:1], jnp.int32)]
if np.issubdtype(dtype, np.integer):
if np.isposinf(identity):
identity = np.iinfo(dtype).max
elif np.isneginf(identity):
identity = np.iinfo(dtype).min
jnp_fun = lambda data, segment_ids: reducer(
data, segment_ids, num_segments=num_segments, bucket_size=bucket_size)
def np_fun(data, segment_ids):
size = num_segments if num_segments is not None else (segment_ids.max() + 1)
out = np.full((size,) + shape[1:], identity, dtype)
for i, val in zip(segment_ids, data):
if 0 <= i < size:
out[i] = op(out[i], val).astype(dtype)
return out
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
if num_segments is not None:
self._CompileAndCheck(jnp_fun, args_maker)
def testIndexDtypeError(self):
# https://github.com/google/jax/issues/2795
jnp.array(1) # get rid of startup warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
jnp.zeros(5).at[::2].set(1)
self.assertLen(w, 0)
@contextmanager
def assertNoWarnings(self):
with warnings.catch_warnings(record=True) as caught_warnings:
yield
self.assertEmpty(caught_warnings)
@parameterized.named_parameters(jtu.cases_from_list({
"testcase_name": "idx={}".format(idx), "idx": idx, "idx_type": idx_type}
for idx, idx_type in [
([0], "array"),
([0, 0], "array"),
([[0, 0]], "tuple"),
([0, [0, 1]], "tuple"),
([0, np.arange(2)], "tuple"),
([0, None], "tuple"),
([0, slice(None)], "tuple"),
]))
def testIndexSequenceDeprecation(self, idx, idx_type):
normalize = {"array": np.array, "tuple": tuple}[idx_type]
msg = {"array": ARRAY_MSG, "tuple": TUPLE_MSG}[idx_type]
x = jnp.arange(6).reshape(3, 2)
with self.assertRaisesRegex(TypeError, msg):
x[idx]
with self.assertNoWarnings():
x[normalize(idx)]
with self.assertRaisesRegex(TypeError, msg):
x.at[idx].set(0)
with self.assertNoWarnings():
x.at[normalize(idx)].set(0)
@unittest.skipIf(jax._src.lib.version < (0, 1, 72),
"Bug fixed in jaxlib 0.1.72")
def testIndexedUpdateAliasingBug(self):
# https://github.com/google/jax/issues/7461
fn = lambda x: x.at[1:].set(1 + x[:-1])
y = jnp.zeros(8)
self.assertArraysEqual(fn(y), jax.jit(fn)(y))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
the-stack_0_19146 | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **options):
self.stdout.write("Wating for database...")
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write(
'Database unavailable, waiting for 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
|
the-stack_0_19148 | """Represent a range of years, with ability to update based on a track"""
# Copyright 2016-2019 Florian Pigorsch & Contributors. All rights reserved.
#
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import re
import datetime
from typing import Optional
class YearRange:
"""Represent a range of years, with ability to update based on a track
Attributes:
from_year: First year in range (lower)
to_year: Last year in range (higher)
Methods:
parse: Parse a string into lower and upper bounds
add: Adjust bounds based on a track
contains: If track is contained in the range
count: Number of years in range
"""
def __init__(self):
"""Inits YearRange with empty bounds -- to be built after init"""
self.from_year = None
self.to_year = None
def parse(self, s: str) -> bool:
"""Parse a plaintext range of years into a pair of years
Attempt to turn the input string into a pair of year values, from_year and to_year. If one
year is passed, both from_year and to_year will be set to that year. If a range like
'2016-2018' is passed, from_year will be set to 2016, and to_year will be set to 2018.
Args:
s: A string representing a range of years or a single year
Returns:
True if the range was successfully parsed, False if not.
"""
if s == "all":
self.from_year = None
self.to_year = None
return True
m = re.match(r"^\d+$", s)
if m:
self.from_year = int(s)
self.to_year = self.from_year
return True
m = re.match(r"^(\d+)-(\d+)$", s)
if m:
y1, y2 = int(m.group(1)), int(m.group(2))
if y1 <= y2:
self.from_year = y1
self.to_year = y2
return True
return False
def add(self, t: datetime.datetime):
"""For the given t, update from_year and to_year to include that timestamp"""
if self.from_year is None:
self.from_year = t.year
self.to_year = t.year
elif t.year < self.from_year:
self.from_year = t.year
elif t.year > self.to_year:
self.to_year = t.year
def contains(self, t: datetime.datetime) -> bool:
"""Return True if current year range contains t, False if not"""
if self.from_year is None:
return True
return self.from_year <= t.year <= self.to_year
def count(self) -> Optional[int]:
"""Return number of years contained in the current range"""
if self.from_year is None:
return None
return 1 + self.to_year - self.from_year
def all(self):
return list(range(int(self.from_year), int(self.to_year)+1))
|
the-stack_0_19149 | # coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import pytest
from scout_apm.core.socket import CoreAgentSocket
from tests.compat import mock
from tests.integration.core.test_core_agent_manager import ( # noqa: F401,F811
core_agent_manager,
is_running,
shutdown,
)
@pytest.fixture # noqa: F811
def running_agent(core_agent_manager): # noqa: F811
assert not is_running(core_agent_manager)
assert core_agent_manager.launch()
time.sleep(0.01) # wait for agent to start running
assert is_running(core_agent_manager)
try:
yield
finally:
shutdown(core_agent_manager)
assert not is_running(core_agent_manager)
@pytest.fixture
def socket(running_agent):
socket = CoreAgentSocket.instance()
try:
time.sleep(0.01) # wait for socket to connect and register
yield socket
finally:
socket.stop()
socket.join()
def test_socket_instance_is_a_singleton(running_agent):
socket1 = CoreAgentSocket.instance()
socket2 = CoreAgentSocket.instance()
try:
assert socket2 is socket1
finally:
socket1.stop()
socket1.join()
socket2.stop()
socket2.join()
def test_socket_instance_is_recreated_if_not_running(running_agent):
socket1 = CoreAgentSocket.instance()
socket1.stop()
socket1.join()
socket2 = CoreAgentSocket.instance()
try:
assert socket2 is not socket1
finally:
socket2.stop()
socket2.join()
class Command(object):
def message(self):
return {}
def test_send(socket):
socket.send(Command())
class NonSerializableCommand(object):
def message(self):
return object()
def test_send_serialization_error(socket):
socket.send(NonSerializableCommand())
@mock.patch("socket.socket.sendall")
def test_send_network_error(sendall, socket):
sendall.side_effect = OSError
socket.send(Command())
|
the-stack_0_19150 | import sys
sys.path.append("../../")
import matplotlib.pyplot as plt
from Utils.callback_tf import callbacklist
from tensorflow import keras
(train_img,train_lab),(test_img,test_lab) = keras.datasets.mnist.load_data()
model = keras.models.Sequential()
model.add(keras.layers.Dense(512,activation="relu",input_shape=(28*28,)))
model.add(keras.layers.Dense(10,activation="softmax"))
opt = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
model.compile(optimizer=opt,
loss=keras.losses.categorical_crossentropy,
metrics=['accuracy'])
train_img = train_img.reshape((60000, 28 * 28))
train_img = train_img.astype('float32') / 255
test_img = test_img.reshape((10000, 28 * 28))
test_img = test_img.astype('float32') / 255
train_lab = keras.utils.to_categorical(train_lab)
test_lab = keras.utils.to_categorical(test_lab)
history = model.fit(train_img,train_lab,validation_data=(test_img,test_lab),epochs=200,batch_size=128,callbacks=callbacklist)
# model = keras.models.load_model('e:/model.hdf5')
# test_loss, test_acc = model.evaluate(test_img, test_lab)
#
# print(test_loss)
# print(test_acc)
# 绘制训练 & 验证的准确率值
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
|
the-stack_0_19155 | # coding=utf-8
"""
Manages config params for ATIS
"""
import elib_config
from esst.sentry.sentry_context import SentryConfigContext
class ATISConfig(SentryConfigContext):
"""
Configuration for the ATIS package
"""
ATIS_CREATE = elib_config.ConfigValueBool(
'atis', 'create',
description='create ATIS mp3 files when starting ESST',
default=True,
)
UR_PATH = elib_config.ConfigValueString(
'atis', 'ur_path',
description='Path to UR config folder (usually found in Saved Games)',
default=''
)
DEFAULT_ICAO = elib_config.ConfigValueString(
'atis', 'default_icao',
description='When generating the weather from a MIZ file, there is no way to know what ICAO to use.'
'By default, ESST will use the "XXXX" ICAO to indicate that. However, that generates '
'weather reports fro "unknown airport (XXXX)". To avoid that, you can define a fallback '
'value for the ICAO, using a "dummy" ICAO for MIZ generated weathers.',
default='XXXX'
)
|
the-stack_0_19157 | import os, sys
from ltk.locales import default_locales, locale_list
from ltk.logger import logger
import time
import logging
import traceback
from ltk import exceptions
try:
from blessings import Terminal
term = Terminal()
except ImportError:
term = False
# from constants import APP_ID
class Enum(set):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
# def get_access_token(host, username, password):
# auth_uri = host + '/auth/authorize.html'
# auth_params = {'client_id': APP_ID, 'redirect_uri': '', 'response_type': 'token'}
# todo possibly put dictionary outside so not built with every function call
def detect_format(file_name, get_mapper=False):
format_mapper = {
'.csv': 'CSV',
'.dita': 'DITA',
'.ditamap': 'DITAMAP',
'.docx': 'DOCX_OKAPI',
'.doc' : 'WORD_OKAPI',
'.dtd': 'DTD',
'.xlsx': 'XLSX_OKAPI',
'.idml': 'IDML',
'.properties': 'JAVA_PROPERTIES_OKAPI',
'.json': 'JSON',
'.pdf': 'PDF',
'.txt': 'PLAINTEXT_OKAPI',
'.po': 'PO',
'.ppt': 'PPT_OKAPI',
'.pptx': 'PPTX_OKAPI',
'.resx': 'RESX',
'.regex': 'REGEX',
'.rtf': 'RTF_OKAPI',
'.srt': 'SUBTITLE_RIP',
'.strings': 'APPLE_STRINGS',
'.tsv': 'TABLE', # catkeys?
'.ts': 'TS',
'.xml': 'XML_OKAPI'
}
format_mapper.update(dict.fromkeys(['.dox', '.c', '.h', '.cpp'], 'DOXYGEN'))
format_mapper.update(dict.fromkeys(['.html', '.htm'], 'HTML_OKAPI'))
format_mapper.update(dict.fromkeys(['.odp', '.otp'], 'ODP'))
format_mapper.update(dict.fromkeys(['.ods', '.ots'], 'ODS'))
format_mapper.update(dict.fromkeys(['.odt', '.ott'], 'ODT'))
format_mapper.update(dict.fromkeys(['.yaml', '.yml'], 'RAILS_YAML'))
format_mapper.update(dict.fromkeys(['.xliff', '.xlf'], 'XLIFF_OKAPI'))
if get_mapper:
return format_mapper
name, extension = os.path.splitext(file_name)
return format_mapper.get(extension.lower(), 'PLAINTEXT_OKAPI')
def map_locale(locale):
"""
maps incorrectly formatted locales to valid locales for use with Lingotek API
:param locale: incorrectly formatted locale
:return: valid locale
"""
# import json
# valid_locales = []
# unsupported_locales = []
# with open('data/language-default-locales.json') as json_file:
# default_locales = json.load(json_file)
try:
return default_locales[locale]
except KeyError:
return None
# for locale in locales:
# try:
# valid_locales.append(default_locales[locale])
# except KeyError:
# unsupported_locales.append(locale)
# return valid_locales, unsupported_locales
def restart(message="Restarting watch", interval=5):
"""Restarts the program. Used after exceptions. Otherwise, watch doesn't work anymore."""
time.sleep(interval)
print(message)
cmd = "ltk"
for arg in sys.argv[1:]:
cmd = cmd + " " + arg
os.system(cmd)
''' This way (below) works for Linux, but does not work on Windows '''
#python = sys.executable
#os.execl(python, python, * sys.argv)
def is_valid_locale(api, locale):
"""Returns true if the locale is found in Lingotek's remote list of locales or, if the api call fails, if the locale is found in the local list of locales."""
valid_locales = []
response = api.list_locales()
remote_check = False
if response.status_code == 200:
remote_check = True
locale_json = response.json()['entities']
for entry in locale_json:
valid_locales.append(entry['properties']['code'])
locales = []
check_locale = locale.replace("-","_")
if remote_check and check_locale not in valid_locales or not remote_check and not check_locale in locale_list:
return False
else:
return True
def get_valid_locales(api, entered_locales, operation_text):
"""Return the list of valid locales, checking locales either remotely or using a local list of locales."""
valid_locales = []
response = api.list_locales()
remote_check = False
if response.status_code == 200:
remote_check = True
locale_json = response.json()
for entry in locale_json['entities']:
valid_locales.append(entry['properties']['code'])
locales = []
if(len(entered_locales) == 0 or (len(entered_locales) == 1 and entered_locales[0] == "[]")):
logger.warning('No locales have been assigned to this document. Please add them using \'ltk request\'.')
else:
for locale in entered_locales:
check_locale = locale.replace("-","_")
if remote_check and check_locale not in valid_locales or not remote_check and not check_locale in locale_list:
logger.warning('The locale code "'+str(locale)+'" failed to be '+operation_text+' since it is invalid (see "ltk list -l" for the list of valid codes).')
else:
locales.append(locale)
return locales
def get_translation_files(file_name, path, download_option, doc_manager):
translation_files = []
if download_option == "same":
downloads = doc_manager.get_doc_downloads(file_name)
translation_files = find_translations(file_name, path, downloads)
elif download_option == "folder" :
downloads = doc_manager.get_doc_downloads(file_name)
entry = doc_manager.get_doc_by_prop("file_name", file_name)
if entry:
file_name = entry['name']
translation_files = find_translations(file_name, path, downloads)
elif download_option == "clone":
entry = doc_manager.get_doc_by_prop("file_name", file_name)
if entry:
file_name = entry['name']
if os.path.isfile(os.path.join(path, file_name)):
translation_files.append(os.path.join(path, file_name))
return translation_files
def find_translations(file_name, path, downloads):
translation_files = []
trans_file_name = ""
for d in downloads:
temp = file_name.split(".")
trans_file_name = ""
for idx, val in enumerate(temp):
if idx == len(temp)-2:
trans_file_name = trans_file_name +val+"."
trans_file_name = trans_file_name+d+"."
else:
trans_file_name += val
if idx != len(temp)-1:
trans_file_name += "."
if os.path.isfile(os.path.join(path, trans_file_name)):
translation_files.append(os.path.join(path, trans_file_name))
return translation_files
def raise_error(json, error_message, is_warning=False, doc_id=None, file_name=None):
try:
error = ""
if json:
error = json['messages'][0]
if file_name:
file_name = file_name.replace("Status of ", "")
if file_name is not None and doc_id is not None:
error = error.replace(doc_id, file_name+" ("+doc_id+")")
# Sometimes api returns vague errors like 'Unknown error'
if error == 'Unknown error':
error = error_message
if not is_warning:
raise exceptions.RequestFailedError(error)
# warnings.warn(error)
if error:
error = error+"\n"
logger.error(error+error_message)
except (AttributeError, IndexError):
if not is_warning:
raise exceptions.RequestFailedError(error_message)
# warnings.warn(error_message)
logger.error(error_message)
def error(error_message):
logger.error(error_message+"\n")
def underline(text):
if term:
print(term.underline(text))
else:
# print("Recommended to install blessings module for better formatting")
print(text)
def format_commit_message():
pass
def check_response(response):
try:
if response and response.text:
if response.json():
return True
except ValueError:
logger.warning("Could not connect to Lingotek")
return
# Python 3
except json.decoder.JSONDecodeError:
logger.warning("Could not connect to Lingotek")
return
# End Python 3
def remove_begin_slashes(path):
index = 0
for letter in path:
if letter != os.sep:
break
index += 1
if len(path) > index + 1:
return path[index:]
else:
return ''
return path
def remove_end_slashes(path):
index = len(path) - 1
for letter in reversed(path):
if letter != os.sep:
break
index -= 1
if index > 0:
return path[:index - 1]
else:
return ''
return path
def remove_last_folder_in_path(path):
if len(path):
split_path = path.split(os.sep)
split_path = split_path[:len(split_path) - 1]
return os.path.join(*split_path)
else:
return path
# Takes a path normalized relative to the project root (path) and returns the path relative to the current directory.
def get_relative_path(path_to_project_root, path):
abs_path = os.path.dirname(os.path.join(path_to_project_root,path))
# print("abs_path: "+abs_path)
relative_path = os.path.relpath(abs_path,os.getcwd())
# print("relative path: "+relative_path)
if relative_path == '..' and os.path.join(path_to_project_root,path) == os.getcwd():
return '.'
relative_file_path = os.path.join(relative_path,os.path.basename(path))
split_path = relative_file_path.split(os.sep)
# print("cwd: "+os.getcwd())
# print("joined path: "+os.path.join(abs_path,os.path.basename(path)))
if len(split_path) and split_path[0] == '.' or os.path.join(abs_path,os.path.basename(path)) in os.getcwd():
relative_file_path = os.path.join(*split_path[1:])
return relative_file_path
def log_traceback(ex, ex_traceback=None):
# Python 2
# try:
# exc_type, exc_value, exc_traceback = sys.exc_info()
# tb_str = ""
# tb_lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
# for line in tb_lines:
# tb_str += line+"\n"
# finally:
# """Assigning the traceback return value to a local variable in a function that is handling an exception will cause a circular reference,
# preventing anything referenced by a local variable in the same function or by the traceback from being garbage collected.
# Must explicitly delete. """
# del exc_traceback
# return tb_str
# End Python 2
# Python 3
if ex_traceback is None:
ex_traceback = ex.__traceback__
tb_str = ""
tb_lines = traceback.format_exception(ex.__class__, ex, ex_traceback)
for line in tb_lines:
tb_str += line+"\n"
return tb_str
# End Python 3
def log_error(error_file_name, e):
try:
with open(error_file_name, 'a') as error_file:
error_file.write(str(time.strftime("%Y-%m-%d %H:%M:%S") + ": "+str(log_traceback(e))))
except IOError as e:
print(e.errno)
print(e)
return
def remove_powershell_formatting(args):
if args != None:
if isinstance(args, tuple):
myTuple = ()
if len(args) > 1:
if isinstance(args, tuple):
for k in args:
k = remove_formatting(k)
myTuple = myTuple+(k,)
return myTuple
else:
for k,v in args:
k = (remove_formatting(k),)
v = remove_formatting(v)
tup1 = k+(v,)
return myTuple+(tup1,)
return myTuple+(tup1,)
else:
for tup in args:
if isinstance(tup, tuple):
for k in tup:
k = remove_formatting(k)
myTuple = myTuple+(k,)
myTuple = (myTuple),
return myTuple
else:
for k in args:
k = remove_formatting(k)
myTuple = (k,)
return myTuple
return args
elif isinstance(args, list):
temp = []
for k in args:
k = remove_formatting(k)
temp.append(k)
return tuple(temp)
elif isinstance(args, str):
temp = remove_formatting(args)
return temp
# Python 2
# elif isinstance(args, bool):
# return args
# End Python 2
else:
# Python 2
# temp = remove_formatting(args)
# return temp
# End Python 2
# Python 3
return args
# End Python 3
def remove_formatting(f):
if f.startswith(".\\"):
f = f[2:]
if f.endswith("\\"):
f = f[:-1]
if f.endswith("\""):
f = f[:-1]
return f
else:
return f
def yes_no_prompt(prompt, default_yes=False):
try:
confirm = 'none'
prompt_message = prompt + (" [Y/n]: " if default_yes else " [y/N]: ")
while confirm not in ['y', 'Y', 'n', 'N', '']:
# Python 2
# confirm = raw_input(prompt_message)
# End Python 2
# Python 3
confirm = input(prompt_message)
# End Python 3
if confirm in ['y', 'Y']:
return True
elif confirm in ['n', 'N']:
return False
else:
return default_yes
except KeyboardInterrupt:
raise
|
the-stack_0_19158 | import os
import math
import tqdm
import torch
import itertools
import traceback
from utils.pqmf import PQMF
from model.generator import Generator
from model.multiscale import MultiScaleDiscriminator
from .utils import get_commit_hash
from .validation import validate
from utils.stft_loss import MultiResolutionSTFTLoss
def train(args, pt_dir, chkpt_path, trainloader, valloader, writer, logger, hp, hp_str):
model_g = Generator(hp.audio.n_mel_channels, hp.model.n_residual_layers,
ratios=hp.model.generator_ratio, mult = hp.model.mult,
out_band = hp.model.out_channels).cuda()
print("Generator : \n",model_g)
model_d = MultiScaleDiscriminator(hp.model.num_D, hp.model.ndf, hp.model.n_layers,
hp.model.downsampling_factor, hp.model.disc_out).cuda()
print("Discriminator : \n", model_d)
optim_g = torch.optim.Adam(model_g.parameters(),
lr=hp.train.adam.lr, betas=(hp.train.adam.beta1, hp.train.adam.beta2))
optim_d = torch.optim.Adam(model_d.parameters(),
lr=hp.train.adam.lr, betas=(hp.train.adam.beta1, hp.train.adam.beta2))
githash = get_commit_hash()
init_epoch = -1
step = 0
if chkpt_path is not None:
logger.info("Resuming from checkpoint: %s" % chkpt_path)
checkpoint = torch.load(chkpt_path)
model_g.load_state_dict(checkpoint['model_g'])
model_d.load_state_dict(checkpoint['model_d'])
optim_g.load_state_dict(checkpoint['optim_g'])
optim_d.load_state_dict(checkpoint['optim_d'])
step = checkpoint['step']
init_epoch = checkpoint['epoch']
if hp_str != checkpoint['hp_str']:
logger.warning("New hparams is different from checkpoint. Will use new.")
if githash != checkpoint['githash']:
logger.warning("Code might be different: git hash is different.")
logger.warning("%s -> %s" % (checkpoint['githash'], githash))
else:
logger.info("Starting new training run.")
# this accelerates training when the size of minibatch is always consistent.
# if not consistent, it'll horribly slow down.
torch.backends.cudnn.benchmark = True
try:
model_g.train()
model_d.train()
stft_loss = MultiResolutionSTFTLoss()
criterion = torch.nn.MSELoss().cuda()
sub_stft_loss = MultiResolutionSTFTLoss(hp.subband_stft_loss_params.fft_sizes,
hp.subband_stft_loss_params.hop_sizes,
hp.subband_stft_loss_params.win_lengths)
pqmf = PQMF()
for epoch in itertools.count(init_epoch+1):
if epoch % hp.log.validation_interval == 0:
with torch.no_grad():
validate(hp, args, model_g, model_d, valloader, stft_loss, sub_stft_loss, criterion, pqmf, writer, step)
trainloader.dataset.shuffle_mapping()
loader = tqdm.tqdm(trainloader, desc='Loading train data')
avg_g_loss = []
avg_d_loss = []
avg_adv_loss = []
for (melG, audioG), (melD, audioD) in loader:
melG = melG.cuda() # torch.Size([16, 80, 64])
audioG = audioG.cuda() # torch.Size([16, 1, 16000])
melD = melD.cuda() # torch.Size([16, 80, 64])
audioD = audioD.cuda() #torch.Size([16, 1, 16000]
# generator
optim_g.zero_grad()
fake_audio = model_g(melG)[:, :, :hp.audio.segment_length] # torch.Size([16, 1, 12800])
loss_g = 0.0
# reconstruct the signal from multi-band signal
if hp.model.out_channels > 1:
y_mb_ = fake_audio
fake_audio = pqmf.synthesis(y_mb_)
sc_loss, mag_loss = stft_loss(fake_audio[:, :, :audioG.size(2)].squeeze(1), audioG.squeeze(1))
loss_g = sc_loss + mag_loss
if hp.model.use_subband_stft_loss:
loss_g *= 0.5 # for balancing with subband stft loss
y_mb = pqmf.analysis(audioG)
y_mb = y_mb.view(-1, y_mb.size(2)) # (B, C, T) -> (B x C, T)
y_mb_ = y_mb_.view(-1, y_mb_.size(2)) # (B, C, T) -> (B x C, T)
sub_sc_loss, sub_mag_loss = sub_stft_loss(y_mb_[:, :y_mb.size(-1)], y_mb) # y_mb --> [B*C, T]
loss_g += 0.5 * (sub_sc_loss + sub_mag_loss)
adv_loss = 0.0
if step > hp.train.discriminator_train_start_steps:
disc_real = model_d(audioG)
disc_fake = model_d(fake_audio)
# for multi-scale discriminator
for feats_fake, score_fake in disc_fake:
# adv_loss += torch.mean(torch.sum(torch.pow(score_fake - 1.0, 2), dim=[1, 2]))
adv_loss += criterion(score_fake, torch.ones_like(score_fake))
adv_loss = adv_loss / len(disc_fake) # len(disc_fake) = 3
if hp.model.feat_loss :
for (feats_fake, score_fake), (feats_real, _) in zip(disc_fake, disc_real):
for feat_f, feat_r in zip(feats_fake, feats_real):
adv_loss += hp.model.feat_match * torch.mean(torch.abs(feat_f - feat_r))
loss_g += hp.model.lambda_adv * adv_loss
loss_g.backward()
optim_g.step()
# discriminator
loss_d_avg = 0.0
if step > hp.train.discriminator_train_start_steps:
fake_audio = model_g(melD)[:, :, :hp.audio.segment_length]
if hp.model.out_channels > 1:
fake_audio = pqmf.synthesis(fake_audio)
fake_audio = fake_audio.detach()
loss_d_sum = 0.0
for _ in range(hp.train.rep_discriminator):
optim_d.zero_grad()
disc_fake = model_d(fake_audio)
disc_real = model_d(audioD)
loss_d = 0.0
loss_d_real = 0.0
loss_d_fake = 0.0
for (_, score_fake), (_, score_real) in zip(disc_fake, disc_real):
loss_d_real += criterion(score_real, torch.ones_like(score_real))
loss_d_fake += criterion(score_fake, torch.zeros_like(score_fake))
loss_d_real = loss_d_real / len(disc_real) # len(disc_real) = 3
loss_d_fake = loss_d_fake / len(disc_fake) # len(disc_fake) = 3
loss_d = loss_d_real + loss_d_fake
loss_d.backward()
optim_d.step()
loss_d_sum += loss_d
loss_d_avg = loss_d_sum / hp.train.rep_discriminator
loss_d_avg = loss_d_avg.item()
step += 1
# logging
loss_g = loss_g.item()
avg_g_loss.append(loss_g)
avg_d_loss.append(loss_d_avg)
avg_adv_loss.append(adv_loss)
if any([loss_g > 1e8, math.isnan(loss_g), loss_d_avg > 1e8, math.isnan(loss_d_avg)]):
logger.error("loss_g %.01f loss_d_avg %.01f at step %d!" % (loss_g, loss_d_avg, step))
raise Exception("Loss exploded")
if step % hp.log.summary_interval == 0:
writer.log_training(loss_g, loss_d_avg, adv_loss, step)
loader.set_description("Avg : g %.04f d %.04f ad %.04f| step %d" % (sum(avg_g_loss) / len(avg_g_loss),
sum(avg_d_loss) / len(avg_d_loss),
sum(avg_adv_loss) / len(avg_adv_loss),
step))
if epoch % hp.log.save_interval == 0:
save_path = os.path.join(pt_dir, '%s_%s_%04d.pt'
% (args.name, githash, epoch))
torch.save({
'model_g': model_g.state_dict(),
'model_d': model_d.state_dict(),
'optim_g': optim_g.state_dict(),
'optim_d': optim_d.state_dict(),
'step': step,
'epoch': epoch,
'hp_str': hp_str,
'githash': githash,
}, save_path)
logger.info("Saved checkpoint to: %s" % save_path)
except Exception as e:
logger.info("Exiting due to exception: %s" % e)
traceback.print_exc()
|
the-stack_0_19160 | import os
import sys
import time
from collections import namedtuple
from contextlib import contextmanager
import click
import pendulum
from dagster import DagsterInvariantViolationError, check, seven
from dagster.cli.workspace.cli_target import (
get_repository_location_from_kwargs,
get_repository_origin_from_kwargs,
get_working_directory_from_kwargs,
python_origin_target_argument,
repository_target_argument,
)
from dagster.core.events import EngineEventData
from dagster.core.execution.api import create_execution_plan, execute_plan_iterator
from dagster.core.host_representation.external import ExternalPipeline
from dagster.core.host_representation.selector import PipelineSelector
from dagster.core.instance import DagsterInstance
from dagster.core.scheduler import (
ScheduledExecutionFailed,
ScheduledExecutionSkipped,
ScheduledExecutionSuccess,
)
from dagster.core.scheduler.job import JobTickData, JobTickStatus, JobType
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.core.storage.tags import check_tags
from dagster.core.telemetry import telemetry_wrapper
from dagster.core.test_utils import mock_system_timezone
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.grpc import DagsterGrpcClient, DagsterGrpcServer
from dagster.grpc.impl import core_execute_run
from dagster.grpc.types import ExecuteRunArgs, ExecuteStepArgs
from dagster.serdes import (
deserialize_json_to_dagster_namedtuple,
serialize_dagster_namedtuple,
whitelist_for_serdes,
)
from dagster.serdes.ipc import ipc_write_stream
from dagster.seven import nullcontext
from dagster.utils.error import serializable_error_info_from_exc_info
from dagster.utils.hosted_user_process import recon_pipeline_from_origin
from dagster.utils.interrupts import capture_interrupts
from dagster.utils.merger import merge_dicts
@whitelist_for_serdes
class ExecuteRunArgsLoadComplete(namedtuple("_ExecuteRunArgsLoadComplete", "")):
pass
@click.group(name="api")
def api_cli():
"""
[INTERNAL] These commands are intended to support internal use cases. Users should generally
not invoke these commands interactively.
"""
@api_cli.command(
name="execute_run",
help=(
"[INTERNAL] This is an internal utility. Users should generally not invoke this command "
"interactively."
),
)
@click.argument("input_json", type=click.STRING)
def execute_run_command(input_json):
with capture_interrupts():
args = check.inst(deserialize_json_to_dagster_namedtuple(input_json), ExecuteRunArgs)
recon_pipeline = recon_pipeline_from_origin(args.pipeline_origin)
with (
DagsterInstance.from_ref(args.instance_ref)
if args.instance_ref
else DagsterInstance.get()
) as instance:
buffer = []
def send_to_buffer(event):
buffer.append(serialize_dagster_namedtuple(event))
_execute_run_command_body(
recon_pipeline, args.pipeline_run_id, instance, send_to_buffer
)
for line in buffer:
click.echo(line)
def _execute_run_command_body(recon_pipeline, pipeline_run_id, instance, write_stream_fn):
# we need to send but the fact that we have loaded the args so the calling
# process knows it is safe to clean up the temp input file
write_stream_fn(ExecuteRunArgsLoadComplete())
pipeline_run = instance.get_run_by_id(pipeline_run_id)
pid = os.getpid()
instance.report_engine_event(
"Started process for pipeline (pid: {pid}).".format(pid=pid),
pipeline_run,
EngineEventData.in_process(pid, marker_end="cli_api_subprocess_init"),
)
try:
for event in core_execute_run(recon_pipeline, pipeline_run, instance):
write_stream_fn(event)
finally:
instance.report_engine_event(
"Process for pipeline exited (pid: {pid}).".format(pid=pid),
pipeline_run,
)
def get_step_stats_by_key(instance, pipeline_run, step_keys_to_execute):
# When using the k8s executor, there whould only ever be one step key
step_stats = instance.get_run_step_stats(pipeline_run.run_id, step_keys=step_keys_to_execute)
step_stats_by_key = {step_stat.step_key: step_stat for step_stat in step_stats}
return step_stats_by_key
def verify_step(instance, pipeline_run, retry_state, step_keys_to_execute):
step_stats_by_key = get_step_stats_by_key(instance, pipeline_run, step_keys_to_execute)
for step_key in step_keys_to_execute:
step_stat_for_key = step_stats_by_key.get(step_key)
current_attempt = retry_state.get_attempt_count(step_key) + 1
# When using the k8s executor, it is possible to get into an edge case when deleting
# a step pod. K8s will restart the pod immediately even though we don't want it to.
# Pod can be deleted manually or due to or node failures (for example, when running on
# a spot instance that is evicted).
#
# If we encounter one of the error cases below, we exit with a success exit code
# so that we don't cause the "Encountered failed job pods" error.
#
# Instead, the step will be marked as being in an unknown state by the executor and the
# pipeline will fail accordingly.
if current_attempt == 1 and step_stat_for_key:
# If this is the first attempt, there shouldn't be any step stats for this
# event yet.
instance.report_engine_event(
"Attempted to run {step_key} again even though it was already started. "
"Exiting to prevent re-running the step.".format(step_key=step_key),
pipeline_run,
)
return False
elif current_attempt > 1 and step_stat_for_key:
# If this is a retry, then the number of previous attempts should be exactly one less
# than the current attempt
if step_stat_for_key.attempts != current_attempt - 1:
instance.report_engine_event(
"Attempted to run retry attempt {current_attempt} for step {step_key} again "
"even though it was already started. Exiting to prevent re-running "
"the step.".format(current_attempt=current_attempt, step_key=step_key),
pipeline_run,
)
return False
elif current_attempt > 1 and not step_stat_for_key:
instance.report_engine_event(
"Attempting to retry attempt {current_attempt} for step {step_key} "
"but there is no record of the original attempt".format(
current_attempt=current_attempt, step_key=step_key
),
pipeline_run,
)
return False
return True
@api_cli.command(
name="execute_step",
help=(
"[INTERNAL] This is an internal utility. Users should generally not invoke this command "
"interactively."
),
)
@click.argument("input_json", type=click.STRING)
def execute_step_command(input_json):
with capture_interrupts():
args = check.inst(deserialize_json_to_dagster_namedtuple(input_json), ExecuteStepArgs)
with (
DagsterInstance.from_ref(args.instance_ref)
if args.instance_ref
else DagsterInstance.get()
) as instance:
pipeline_run = instance.get_run_by_id(args.pipeline_run_id)
check.inst(
pipeline_run,
PipelineRun,
"Pipeline run with id '{}' not found for step execution".format(
args.pipeline_run_id
),
)
if args.should_verify_step:
success = verify_step(
instance,
pipeline_run,
args.known_state.get_retry_state(),
args.step_keys_to_execute,
)
if not success:
return
recon_pipeline = recon_pipeline_from_origin(
args.pipeline_origin
).subset_for_execution_from_existing_pipeline(pipeline_run.solids_to_execute)
execution_plan = create_execution_plan(
recon_pipeline,
run_config=pipeline_run.run_config,
step_keys_to_execute=args.step_keys_to_execute,
mode=pipeline_run.mode,
known_state=args.known_state,
)
buff = []
for event in execute_plan_iterator(
execution_plan,
recon_pipeline,
pipeline_run,
instance,
run_config=pipeline_run.run_config,
retry_mode=args.retry_mode,
):
buff.append(serialize_dagster_namedtuple(event))
for line in buff:
click.echo(line)
class _ScheduleLaunchContext:
def __init__(self, tick, instance, stream):
self._instance = instance
self._tick = tick # placeholder for the current tick
self._stream = stream
def update_state(self, status, **kwargs):
self._tick = self._tick.with_status(status=status, **kwargs)
def add_run(self, run_id, run_key=None):
self._tick = self._tick.with_run(run_id, run_key)
@property
def stream(self):
return self._stream
def write(self):
self._instance.update_job_tick(self._tick)
@contextmanager
def _schedule_tick_context(instance, stream, tick_data):
tick = instance.create_job_tick(tick_data)
context = _ScheduleLaunchContext(tick=tick, instance=instance, stream=stream)
try:
yield context
except Exception: # pylint: disable=broad-except
error_data = serializable_error_info_from_exc_info(sys.exc_info())
context.update_state(JobTickStatus.FAILURE, error=error_data)
stream.send(ScheduledExecutionFailed(run_id=None, errors=[error_data]))
finally:
context.write()
@api_cli.command(name="grpc", help="Serve the Dagster inter-process API over GRPC")
@click.option(
"--port",
"-p",
type=click.INT,
required=False,
help="Port over which to serve. You must pass one and only one of --port/-p or --socket/-s.",
)
@click.option(
"--socket",
"-s",
type=click.Path(),
required=False,
help="Serve over a UDS socket. You must pass one and only one of --port/-p or --socket/-s.",
)
@click.option(
"--host",
"-h",
type=click.STRING,
required=False,
default="localhost",
help="Hostname at which to serve. Default is localhost.",
)
@click.option(
"--max_workers",
"-n",
type=click.INT,
required=False,
default=None,
help="Maximum number of (threaded) workers to use in the GRPC server",
)
@click.option(
"--heartbeat",
is_flag=True,
help=(
"If set, the GRPC server will shut itself down when it fails to receive a heartbeat "
"after a timeout configurable with --heartbeat-timeout."
),
)
@click.option(
"--heartbeat-timeout",
type=click.INT,
required=False,
default=30,
help="Timeout after which to shutdown if --heartbeat is set and a heartbeat is not received",
)
@click.option(
"--lazy-load-user-code",
is_flag=True,
required=False,
default=False,
help="Wait until the first LoadRepositories call to actually load the repositories, instead of "
"waiting to load them when the server is launched. Useful for surfacing errors when the server "
"is managed directly from Dagit",
)
@python_origin_target_argument
@click.option(
"--ipc-output-file",
type=click.Path(),
help="[INTERNAL] This option should generally not be used by users. Internal param used by "
"dagster when it automatically spawns gRPC servers to communicate the success or failure of the "
"server launching.",
)
@click.option(
"--fixed-server-id",
type=click.STRING,
required=False,
help="[INTERNAL] This option should generally not be used by users. Internal param used by "
"dagster to spawn a gRPC server with the specified server id.",
)
@click.option(
"--override-system-timezone",
type=click.STRING,
required=False,
help="[INTERNAL] This option should generally not be used by users. Override the system "
"timezone for tests.",
)
def grpc_command(
port=None,
socket=None,
host=None,
max_workers=None,
heartbeat=False,
heartbeat_timeout=30,
lazy_load_user_code=False,
ipc_output_file=None,
fixed_server_id=None,
override_system_timezone=None,
**kwargs,
):
if seven.IS_WINDOWS and port is None:
raise click.UsageError(
"You must pass a valid --port/-p on Windows: --socket/-s not supported."
)
if not (port or socket and not (port and socket)):
raise click.UsageError("You must pass one and only one of --port/-p or --socket/-s.")
loadable_target_origin = None
if any(
kwargs[key]
for key in [
"attribute",
"working_directory",
"module_name",
"package_name",
"python_file",
"empty_working_directory",
]
):
loadable_target_origin = LoadableTargetOrigin(
executable_path=sys.executable,
attribute=kwargs["attribute"],
working_directory=get_working_directory_from_kwargs(kwargs),
module_name=kwargs["module_name"],
python_file=kwargs["python_file"],
package_name=kwargs["package_name"],
)
with (
mock_system_timezone(override_system_timezone)
if override_system_timezone
else nullcontext()
):
server = DagsterGrpcServer(
port=port,
socket=socket,
host=host,
loadable_target_origin=loadable_target_origin,
max_workers=max_workers,
heartbeat=heartbeat,
heartbeat_timeout=heartbeat_timeout,
lazy_load_user_code=lazy_load_user_code,
ipc_output_file=ipc_output_file,
fixed_server_id=fixed_server_id,
)
server.serve()
@api_cli.command(name="grpc-health-check", help="Check the status of a dagster GRPC server")
@click.option(
"--port",
"-p",
type=click.INT,
required=False,
help="Port over which to serve. You must pass one and only one of --port/-p or --socket/-s.",
)
@click.option(
"--socket",
"-s",
type=click.Path(),
required=False,
help="Serve over a UDS socket. You must pass one and only one of --port/-p or --socket/-s.",
)
@click.option(
"--host",
"-h",
type=click.STRING,
required=False,
default="localhost",
help="Hostname at which to serve. Default is localhost.",
)
def grpc_health_check_command(port=None, socket=None, host="localhost"):
if seven.IS_WINDOWS and port is None:
raise click.UsageError(
"You must pass a valid --port/-p on Windows: --socket/-s not supported."
)
if not (port or socket and not (port and socket)):
raise click.UsageError("You must pass one and only one of --port/-p or --socket/-s.")
client = DagsterGrpcClient(port=port, socket=socket, host=host)
status = client.health_check_query()
if status != "SERVING":
sys.exit(1)
###################################################################################################
# WARNING: these cli args are encoded in cron, so are not safely changed without migration
###################################################################################################
@api_cli.command(
name="launch_scheduled_execution",
help=(
"[INTERNAL] This is an internal utility. Users should generally not invoke this command "
"interactively."
),
)
@click.argument("output_file", type=click.Path())
@repository_target_argument
@click.option("--schedule_name")
@click.option("--override-system-timezone")
def launch_scheduled_execution(output_file, schedule_name, override_system_timezone, **kwargs):
with (
mock_system_timezone(override_system_timezone)
if override_system_timezone
else nullcontext()
):
with ipc_write_stream(output_file) as stream:
with DagsterInstance.get() as instance:
repository_origin = get_repository_origin_from_kwargs(kwargs)
job_origin = repository_origin.get_job_origin(schedule_name)
# open the tick scope before we load any external artifacts so that
# load errors are stored in DB
with _schedule_tick_context(
instance,
stream,
JobTickData(
job_origin_id=job_origin.get_id(),
job_name=schedule_name,
job_type=JobType.SCHEDULE,
status=JobTickStatus.STARTED,
timestamp=time.time(),
),
) as tick_context:
with get_repository_location_from_kwargs(kwargs) as repo_location:
repo_dict = repo_location.get_repositories()
check.invariant(
repo_dict and len(repo_dict) == 1,
"Passed in arguments should reference exactly one repository, instead there are {num_repos}".format(
num_repos=len(repo_dict)
),
)
external_repo = next(iter(repo_dict.values()))
if not schedule_name in [
schedule.name for schedule in external_repo.get_external_schedules()
]:
raise DagsterInvariantViolationError(
"Could not find schedule named {schedule_name}".format(
schedule_name=schedule_name
),
)
external_schedule = external_repo.get_external_schedule(schedule_name)
# Validate that the schedule's timezone matches the system timezone
schedule_timezone = (
external_schedule.execution_timezone
if external_schedule.execution_timezone
else "UTC"
)
system_timezone = pendulum.now().timezone.name
if system_timezone != schedule_timezone:
raise DagsterInvariantViolationError(
"Schedule {schedule_name} is set to execute in {schedule_timezone}, "
"but this scheduler can only run in the system timezone, "
"{system_timezone}. Use DagsterDaemonScheduler if you want to be able "
"to execute schedules in arbitrary timezones.".format(
schedule_name=external_schedule.name,
schedule_timezone=schedule_timezone,
system_timezone=system_timezone,
),
)
_launch_scheduled_executions(
instance, repo_location, external_repo, external_schedule, tick_context
)
@telemetry_wrapper
def _launch_scheduled_executions(
instance, repo_location, external_repo, external_schedule, tick_context
):
pipeline_selector = PipelineSelector(
location_name=repo_location.name,
repository_name=external_repo.name,
pipeline_name=external_schedule.pipeline_name,
solid_selection=external_schedule.solid_selection,
)
subset_pipeline_result = repo_location.get_subset_external_pipeline_result(pipeline_selector)
external_pipeline = ExternalPipeline(
subset_pipeline_result.external_pipeline_data,
external_repo.handle,
)
schedule_execution_data = repo_location.get_external_schedule_execution_data(
instance=instance,
repository_handle=external_repo.handle,
schedule_name=external_schedule.name,
scheduled_execution_time=None, # No way to know this in general for this scheduler
)
if not schedule_execution_data.run_requests:
# Update tick to skipped state and return
tick_context.update_state(JobTickStatus.SKIPPED)
tick_context.stream.send(ScheduledExecutionSkipped())
return
for run_request in schedule_execution_data.run_requests:
_launch_run(
instance, repo_location, external_schedule, external_pipeline, tick_context, run_request
)
tick_context.update_state(JobTickStatus.SUCCESS)
def _launch_run(
instance, repo_location, external_schedule, external_pipeline, tick_context, run_request
):
run_config = run_request.run_config
schedule_tags = run_request.tags
execution_plan_snapshot = None
errors = []
try:
external_execution_plan = repo_location.get_external_execution_plan(
external_pipeline,
run_config,
external_schedule.mode,
step_keys_to_execute=None,
known_state=None,
)
execution_plan_snapshot = external_execution_plan.execution_plan_snapshot
except Exception: # pylint: disable=broad-except
errors.append(serializable_error_info_from_exc_info(sys.exc_info()))
pipeline_tags = external_pipeline.tags or {}
check_tags(pipeline_tags, "pipeline_tags")
tags = merge_dicts(pipeline_tags, schedule_tags)
# Enter the run in the DB with the information we have
possibly_invalid_pipeline_run = instance.create_run(
pipeline_name=external_schedule.pipeline_name,
run_id=None,
run_config=run_config,
mode=external_schedule.mode,
solids_to_execute=external_pipeline.solids_to_execute,
step_keys_to_execute=None,
solid_selection=external_pipeline.solid_selection,
status=None,
root_run_id=None,
parent_run_id=None,
tags=tags,
pipeline_snapshot=external_pipeline.pipeline_snapshot,
execution_plan_snapshot=execution_plan_snapshot,
parent_pipeline_snapshot=external_pipeline.parent_pipeline_snapshot,
external_pipeline_origin=external_pipeline.get_external_origin(),
)
tick_context.add_run(run_id=possibly_invalid_pipeline_run.run_id, run_key=run_request.run_key)
# If there were errors, inject them into the event log and fail the run
if len(errors) > 0:
for error in errors:
instance.report_engine_event(
error.message,
possibly_invalid_pipeline_run,
EngineEventData.engine_error(error),
)
instance.report_run_failed(possibly_invalid_pipeline_run)
tick_context.stream.send(
ScheduledExecutionFailed(run_id=possibly_invalid_pipeline_run.run_id, errors=errors)
)
return
try:
launched_run = instance.submit_run(possibly_invalid_pipeline_run.run_id, external_pipeline)
except Exception: # pylint: disable=broad-except
tick_context.stream.send(
ScheduledExecutionFailed(
run_id=possibly_invalid_pipeline_run.run_id,
errors=[serializable_error_info_from_exc_info(sys.exc_info())],
)
)
return
tick_context.stream.send(ScheduledExecutionSuccess(run_id=launched_run.run_id))
|
the-stack_0_19161 | import angr
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(str(__file__))), '..', '..', 'binaries', 'tests')
arch_data = { # (steps, [hit addrs], finished)
'x86_64': (330, (0x1021c20, 0x1021980, 0x1021be0, 0x4004b0, 0x400440, 0x400570), True),
'i386': (425, (0x90198e0, 0x90195c0, 0x9019630, 0x90198a0, 0x8048370, 0x80482f8, 0x8048440, 0x804846D, 0x8048518), True),
'ppc': (381, (0x11022f50, 0x11022eb0, 0x10000340, 0x100002e8, 0x1000053C, 0x1000063C), True),
'ppc64': (372, (0x11047490, 0x100003fc, 0x10000368, 0x10000654, 0x10000770), True),
'mips': (363, (0x1016f20, 0x400500, 0x400470, 0x400640, 0x400750), True),
'mips64': (390, (0x12103b828, 0x120000870, 0x1200007e0, 0x120000A80, 0x120000B68), True),
'armel': (370, (0x10154b8, 0x1108244, 0x83a8, 0x8348, 0x84b0, 0x84E4, 0x85E8), True),
'aarch64': (370, (0x1020b04, 0x400430, 0x4003b8, 0x400538, 0x400570, 0x40062C), True),
}
def emulate(arch, binary, use_sim_procs, steps, hit_addrs, finished):
# auto_load_libs can't be disabled as the test takes longer time to execute
p = angr.Project(os.path.join(test_location, arch, binary), use_sim_procedures=use_sim_procs, rebase_granularity=0x1000000, load_debug_info=False, auto_load_libs=True)
state = p.factory.full_init_state(args=['./test_arrays'], add_options={angr.options.STRICT_PAGE_ACCESS, angr.options.ENABLE_NX, angr.options.ZERO_FILL_UNCONSTRAINED_MEMORY, angr.options.USE_SYSTEM_TIMES})
pg = p.factory.simulation_manager(state, resilience=True)
pg2 = pg.run(until=lambda lpg: len(lpg.active) != 1)
is_finished = False
if len(pg2.active) > 0:
state = pg2.active[0]
elif len(pg2.deadended) > 0:
state = pg2.deadended[0]
is_finished = True
elif len(pg2.errored) > 0:
state = pg2.errored[0].state # ErroredState object!
else:
raise ValueError("The result does not contain a state we can use for this test?")
assert state.history.depth >= steps
# this is some wonky control flow that asserts that the items in hit_addrs appear in the state in order.
trace = state.history.bbl_addrs.hardcopy
reqs = list(hit_addrs)
while len(reqs) > 0:
req = reqs.pop(0)
while True:
assert len(trace) > 0
trace_head = trace.pop(0)
if trace_head == req:
break
assert trace_head not in reqs
if finished:
assert is_finished
def test_emulation():
for arch in arch_data:
steps, hit_addrs, finished = arch_data[arch]
yield emulate, arch, 'test_arrays', False, steps, hit_addrs, finished
def test_windows():
yield emulate, 'i386', 'test_arrays.exe', True, 41, [], False # blocked on GetLastError or possibly dynamic loading
def test_locale():
# auto_load_libs can't be disabled as the test takes longer time to execute
p = angr.Project(os.path.join(test_location, 'i386', 'isalnum'), use_sim_procedures=False, auto_load_libs=True)
state = p.factory.full_init_state(args=['./isalnum'], add_options={angr.options.STRICT_PAGE_ACCESS})
pg = p.factory.simulation_manager(state)
pg2 = pg.run(until=lambda lpg: len(lpg.active) != 1,
step_func=lambda lpg: lpg if len(lpg.active) == 1 else lpg.prune()
)
assert len(pg2.active) == 0
assert len(pg2.deadended) == 1
assert pg2.deadended[0].history.events[-1].type == 'terminate'
assert pg2.deadended[0].history.events[-1].objects['exit_code']._model_concrete.value == 0
if __name__ == '__main__':
#emulate('armel', 'test_arrays', False, *arch_data['armel'])
#import sys; sys.exit()
for func, a, b, c, d, e, f in test_windows():
print(a, b)
func(a, b, c, d, e, f)
print('locale')
test_locale()
for func, a, b, c, d, e, f in test_emulation():
print(a, b)
func(a, b, c, d, e, f)
|
the-stack_0_19164 | # Author: Vlad Niculae <[email protected]>
# License: BSD 3-clause
from collections import Counter
from operator import itemgetter
import os
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import FeatureUnion, Pipeline, make_pipeline
from nltk import Tree
from marseille.custom_logging import logging
from marseille.datasets import load_embeds
from marseille.features import add_pmi_features
from marseille.io import save_csr
class FilteredDictVectorizer(DictVectorizer):
def __init__(self, columns, dtype=np.float64, sparse=True):
super(FilteredDictVectorizer, self).__init__(dtype=dtype,
sparse=sparse)
self.columns = columns
def fit(self, X, y=None):
return super(FilteredDictVectorizer, self).fit(
[{key: x[key] for key in self.columns} for x in X])
def fit_transform(self, X, y=None):
return super(FilteredDictVectorizer, self).fit_transform(
[{key: x[key] for key in self.columns} for x in X])
def transform(self, X, y=None):
return super(FilteredDictVectorizer, self).transform(
[{key: x[key] for key in self.columns} for x in X])
def _lower_words_getter(feats):
return (w.lower() for w in feats['words'])
class EmbeddingVectorizer(BaseEstimator, TransformerMixin):
def __init__(self, embeds, embed_vocab, mode='average'):
self.embeds = embeds
self.embed_vocab = embed_vocab
self.mode = mode
def fit(self, X, y=None, **fit_params):
self.vect_ = TfidfVectorizer(vocabulary=self.embed_vocab,
analyzer=_lower_words_getter,
norm='l1',
use_idf=False).fit(X)
return self
def transform(self, X, y=None):
return safe_sparse_dot(self.vect_.transform(X), self.embeds)
def fit_transform(self, X, y=None, **fit_params):
self.vect_ = TfidfVectorizer(vocabulary=self.embed_vocab,
analyzer=_lower_words_getter,
norm='l1',
use_idf=False)
return safe_sparse_dot(self.vect_.fit_transform(X), self.embeds)
def get_feature_names(self):
return ["dim{:03d}".format(k)
for k in range(self.embeds.shape[1])]
def custom_fnames(union):
feature_names = []
for name, trans, weight in union._iter():
if hasattr(trans, 'get_feature_names'):
this_fn = trans.get_feature_names()
elif isinstance(trans, Pipeline):
# we use pipelines to scale only specific attributes.
# In this case, the vectorizer is first in the pipe.
this_fn = trans.steps[0][-1].get_feature_names()
else:
raise AttributeError("Transformer %s (type %s) does not "
"provide get_feature_names." % (
str(name), type(trans).__name__))
feature_names.extend([name + "__" + f for f in this_fn])
return feature_names
class PrecedingStats(BaseEstimator, TransformerMixin):
def __init__(self, alpha=0.001):
self.alpha = alpha
def fit(self, feats):
self.labels_ = ('MajorClaim', 'Claim', 'Premise')
self.counts_ = {label: Counter() for label in self.labels_}
for prop in feats:
self.counts_[prop['label_']][tuple(prop['preceding_'])] += 1
return self
def count(self, preceding):
return [self.counts_[label][preceding] for label in self.labels_]
def transform(self, feats):
stats = [self.count(tuple(prop['preceding_'])) for prop in feats]
stats = np.array(stats, dtype=np.double)
stats += self.alpha
stats /= np.sum(stats, axis=1)[:, np.newaxis]
return stats
def get_feature_names(self):
return ["p({}|prefix)".format(lbl) for lbl in self.labels_]
def stats_train(docs):
lemma_freqs = Counter()
prod_freqs = Counter()
dep_freqs = Counter()
count_outgoing = Counter()
count_incoming = Counter()
n_links = 0
n_props = 0
for doc in docs:
lemma_freqs.update(w['lemma'].lower() for sent in doc.nlp['sentences']
for w in sent['tokens'])
prod_freqs.update(
str(prod) for sent in doc.nlp['sentences'] for prod in
Tree.fromstring(sent['parse']).productions())
for sent in doc.nlp['sentences']:
lemmas = ['ROOT'] + [w['lemma'] for w in sent['tokens']]
dep_freqs.update('{} -> {}'.format(lemmas[arc['dependent']],
lemmas[arc['governor']])
for arc
in sent['collapsed-ccprocessed-dependencies'])
n_props += len(doc.prop_offsets)
for link in doc.features:
if link['label_']:
# tokens in src are counted as outgoing
count_outgoing.update(w.lower() for w in link['src__lemmas'])
n_links += 1
# tokens in trg are counted as outgoing
count_incoming.update(w.lower() for w in link['trg__lemmas'])
link_log_proba = np.log(n_links) - np.log(n_props)
def pmi(word, class_count):
return (np.log(class_count[word]) - np.log(lemma_freqs[word]) -
link_log_proba)
pmi_incoming = {w: pmi(w, count_incoming) for w in count_incoming}
pmi_outgoing = {w: pmi(w, count_outgoing) for w in count_outgoing}
# ensure order
lemma_freqs = sorted(list(lemma_freqs.most_common()),
key=lambda x: (-x[1], x[0]))
prod_freqs = sorted(list(prod_freqs.most_common()),
key=lambda x: (-x[1], x[0]))
dep_freqs = sorted(list(dep_freqs.most_common()),
key=lambda x: (-x[1], x[0]))
return lemma_freqs, prod_freqs, dep_freqs, pmi_incoming, pmi_outgoing
def make_union_prop(vect_params):
for key, params in sorted(vect_params.items()):
yield key, CountVectorizer(analyzer=itemgetter(key), binary=True,
**params)
def make_union_link(vect_params):
for which in ('src', 'trg'):
for key, params in sorted(vect_params.items()):
getkey = "{}__{}".format(which, key)
prefix = "[{}]_{}".format(which, key)
vect_ = CountVectorizer(analyzer=itemgetter(getkey), binary=True,
**params)
yield prefix, vect_
def prop_vectorizer(train_docs, which, stats=None, n_most_common_tok=1000,
n_most_common_dep=1000, return_transf=False):
# One pass to compute training corpus statistics.
train_docs = list(train_docs)
if stats is None:
stats = stats_train(train_docs)
lemma_freqs, _, dep_freqs, _, _ = stats
# vectorize BOW-style features
lemma_vocab = [w for w, _ in lemma_freqs[:n_most_common_tok]]
dep_vocab = [p for p, _ in dep_freqs[:n_most_common_dep]]
vects = dict(lemmas=dict(vocabulary=lemma_vocab, lowercase=True),
dependency_tuples=dict(vocabulary=dep_vocab), pos={},
discourse={}, indicators={}, indicator_preceding_in_para={},
indicator_following_in_para={})
raw_keys = ['is_first_in_para', 'is_last_in_para', 'toks_to_sent_ratio',
'relative_in_para', 'first_person_any', 'root_vb_modal',
'root_vb_tense']
nrm_keys = ['n_tokens', 'n_toks_in_sent', 'n_toks_in_para',
'n_toks_preceding_in_sent', 'n_toks_following_in_sent',
'preceding_props_in_para', 'following_props_in_para',
'parse_tree_height', 'n_subordinate_clauses']
if which == 'ukp':
raw_keys += ['is_in_intro', 'is_in_conclusion',
'has_shared_np_intro', 'has_shared_vp_intro',
'has_shared_np_conclusion', 'has_shared_vp_conclusion']
nrm_keys += ['n_shared_np_intro', 'n_shared_vp_intro',
'n_shared_np_conclusion', 'n_shared_vp_conclusion']
# load embeds
embed_vocab, embeds = load_embeds(which)
vect_list = list(make_union_prop(vects)) + [
('raw', FilteredDictVectorizer(raw_keys)),
('nrm', make_pipeline(FilteredDictVectorizer(nrm_keys, sparse=False),
MinMaxScaler((0, 1)))),
('embeds', EmbeddingVectorizer(embeds, embed_vocab))]
if which == 'ukp':
vect_list.append(('proba', PrecedingStats()))
vect = FeatureUnion(vect_list)
train_feats = [f for doc in train_docs for f in doc.prop_features]
if return_transf:
X_tr = vect.fit_transform(train_feats)
return vect, X_tr
else:
return vect.fit(train_feats)
def link_vectorizer(train_docs, stats=None, n_most_common=1000,
return_transf=False):
# One pass to compute training corpus statistics.
train_docs = list(train_docs)
if stats is None:
stats = stats_train(train_docs)
lemma_freqs, prod_freqs, _, pmi_incoming, pmi_outgoing = stats
# vectorize BOW-style features
lemma_vocab = [w for w, _ in lemma_freqs[:n_most_common]]
prod_vocab = [p for p, _ in prod_freqs[:n_most_common]]
vects = dict(lemmas=dict(vocabulary=lemma_vocab, lowercase=True),
productions=dict(vocabulary=prod_vocab), pos={}, discourse={},
indicators={}, indicator_preceding_in_para={},
indicator_following_in_para={})
raw_keys = ['src__is_first_in_para', 'src__is_last_in_para',
'trg__is_first_in_para', 'trg__is_last_in_para',
'same_sentence', 'src_precedes_trg', 'trg_precedes_src',
'any_shared_nouns', 'src__pmi_pos_ratio', 'src__pmi_neg_ratio',
'trg__pmi_pos_ratio', 'trg__pmi_neg_ratio', 'src__pmi_pos_any',
'src__pmi_neg_any', 'trg__pmi_pos_any', 'trg__pmi_neg_any', ]
nrm_keys = ['src__n_tokens', 'trg__n_tokens', 'props_between', 'n_props',
'n_shared_nouns']
vect_list = list(make_union_link(vects)) + [
('raw', FilteredDictVectorizer(raw_keys)), ('nrm', make_pipeline(
FilteredDictVectorizer(nrm_keys, sparse=False),
MinMaxScaler((0, 1))))]
vect = FeatureUnion(vect_list)
train_feats = [f for doc in train_docs for f in doc.features]
[add_pmi_features(f, pmi_incoming, pmi_outgoing) for f in train_feats]
if return_transf:
X_tr = vect.fit_transform(train_feats)
return vect, X_tr
else:
return vect.fit(train_feats)
def second_order_vectorizer(train_docs):
# this is very simple and all features are already in 0,1
train_docs = list(train_docs)
raw_keys = ['same_sentence', 'same_sentence_ab', 'same_sentence_ac',
'same_sentence_bc', 'order_abc', 'order_acb', 'order_bac',
'order_bca', 'order_cab', 'order_cba', 'range_leq_1',
'range_leq_2', 'range_leq_3', 'range_leq_4', 'range_leq_5',
'any_shared_nouns', 'any_shared_nouns_ab',
'any_shared_nouns_ac', 'any_shared_nouns_bc', 'jaccard',
'jaccard_ab', 'jaccard_ac', 'jaccard_bc',
'shared_nouns_ratio_a', 'shared_nouns_ratio_b',
'shared_nouns_ratio_c', 'shared_nouns_ratio_ab',
'shared_nouns_ratio_ac', 'shared_nouns_ratio_bc',
'shared_nouns_ab_ratio_a', 'shared_nouns_ab_ratio_b',
'shared_nouns_ac_ratio_a', 'shared_nouns_ac_ratio_c',
'shared_nouns_bc_ratio_b', 'shared_nouns_bc_ratio_c']
vect = FilteredDictVectorizer(raw_keys)
vect.fit([f for doc in train_docs for f in doc.second_order_features])
return vect
def vectorize(train_docs, test_docs, which, n_most_common=500):
"""Train a vectorizer on the training docs and transform the test docs.
We use a function because scikit-learn vectorizers cannot change the
number of samples, but we need to extract multiple rows from each doc.
So we cannot use pipelines.
"""
logging.info("Vectorizing...")
# One pass to compute training corpus statistics.
train_docs = list(train_docs)
test_docs = list(test_docs)
stats = stats_train(train_docs)
_, _, _, pmi_incoming, pmi_outgoing = stats
# link vectors
vect, X_tr = link_vectorizer(train_docs, stats, n_most_common,
return_transf=True)
y_tr = np.array([f['label_'] for doc in train_docs for f in doc.features],
dtype=np.bool)
test_feats = [f for doc in test_docs for f in doc.features]
[add_pmi_features(f, pmi_incoming, pmi_outgoing) for f in test_feats]
y_te = np.array([f['label_'] for f in test_feats], dtype=np.bool)
X_te = vect.transform(test_feats)
# prop vectors
prop_vect, prop_X_tr = prop_vectorizer(train_docs, which, stats,
n_most_common_tok=None,
n_most_common_dep=2000,
return_transf=True)
prop_y_tr = np.array([str(f['label_']) for doc in train_docs
for f in doc.prop_features])
prop_y_te = np.array([str(f['label_']) for doc in test_docs
for f in doc.prop_features])
test_feats = [f for doc in test_docs for f in doc.prop_features]
prop_X_te = prop_vect.transform(test_feats)
return ((prop_X_tr, prop_X_te, prop_y_tr, prop_y_te, prop_vect),
(X_tr, X_te, y_tr, y_te, vect))
def main():
from marseille.argdoc import CdcpArgumentationDoc, UkpEssayArgumentationDoc
from marseille.datasets import (cdcp_train_ids, cdcp_test_ids,
ukp_train_ids, ukp_test_ids)
from docopt import docopt
usage = """
Usage:
vectorize folds (cdcp|ukp) [--n-folds=N]
vectorize split (cdcp|ukp)
Options:
--n-folds=N number of cross-val folds to generate. [default: 3]
"""
args = docopt(usage)
which = 'ukp' if args['ukp'] else 'cdcp' if args['cdcp'] else None
if args['ukp']:
_tpl = os.path.join("data", "process", "ukp-essays", "essay{:03d}")
_path = os.path.join("data", "process", "ukp-essays", "folds", "{}",
"{}")
_load = lambda which, ks: (UkpEssayArgumentationDoc(_tpl.format(k))
for k in ks)
ids = ukp_train_ids
test_ids = ukp_test_ids
else:
_tpl = os.path.join("data", "process", "erule", "{}", "{:05d}")
_path = os.path.join("data", "process", "erule", "folds", "{}", "{}")
_load = lambda which, ks: (CdcpArgumentationDoc(_tpl.format(which, k))
for k in ks)
ids = cdcp_train_ids
test_ids = cdcp_test_ids
if args['folds']:
n_folds = int(args['--n-folds'])
ids = np.array(ids)
for k, (tr, val) in enumerate(KFold(n_folds).split(ids)):
train_docs = _load("train", ids[tr])
val_docs = _load("train", ids[val])
prop_out, link_out = vectorize(train_docs, val_docs,
which=which)
X_tr, X_val, y_tr, y_val, vect = link_out
fnames = custom_fnames(vect)
save_csr(_path.format(k, "train.npz"), X_tr, y_tr)
save_csr(_path.format(k, "val.npz"), X_val, y_val)
with open(_path.format(k, "fnames.txt"), "w") as f:
for fname in fnames:
print(fname, file=f)
X_tr, X_val, y_tr, y_val, vect = prop_out
fnames = custom_fnames(vect)
save_csr(_path.format(k, "prop-train.npz"), X_tr, y_tr)
save_csr(_path.format(k, "prop-val.npz"), X_val, y_val)
with open(_path.format(k, "prop-fnames.txt"), "w") as f:
for fname in fnames:
print(fname, file=f)
elif args['split']:
train_docs = _load("train", ids)
test_docs = _load("test", test_ids)
prop_out, link_out = vectorize(train_docs, test_docs, which=which)
X_tr, X_te, y_tr, y_te, vect = link_out
fnames = custom_fnames(vect)
save_csr(_path.format("traintest", "train.npz"), X_tr, y_tr)
save_csr(_path.format("traintest", "test.npz"), X_te, y_te)
with open(_path.format("traintest", "fnames.txt"), "w") as f:
for fname in fnames:
print(fname, file=f)
X_tr, X_te, y_tr, y_te, vect = prop_out
fnames = custom_fnames(vect)
save_csr(_path.format("traintest", "prop-train.npz"), X_tr, y_tr)
save_csr(_path.format("traintest", "prop-test.npz"), X_te, y_te)
with open(_path.format("traintest", "prop-fnames.txt"), "w") as f:
for fname in fnames:
print(fname, file=f)
if __name__ == '__main__':
main()
|
the-stack_0_19166 | import argparse
import gym
import numpy as np
from network_models.policy_net import Policy_net
import tensorflow as tf
# noinspection PyTypeChecker
def open_file_and_save(file_path, data):
"""
:param file_path: type==string
:param data:
"""
try:
with open(file_path, 'ab') as f_handle:
np.savetxt(f_handle, data, fmt='%s')
except FileNotFoundError:
with open(file_path, 'wb') as f_handle:
np.savetxt(f_handle, data, fmt='%s')
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='filename of model to test', default='trained_models/ppo/model.ckpt')
parser.add_argument('--iteration', default=10, type=int)
return parser.parse_args()
def main(args):
env = gym.make('CartPole-v0')
env.seed(0)
ob_space = env.observation_space
Policy = Policy_net('policy', env)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, args.model)
obs = env.reset()
for iteration in range(args.iteration): # episode
observations = []
actions = []
run_steps = 0
while True:
run_steps += 1
# prepare to feed placeholder Policy.obs
obs = np.stack([obs]).astype(dtype=np.float32)
act, _ = Policy.act(obs=obs, stochastic=True)
act = np.asscalar(act)
observations.append(obs)
actions.append(act)
next_obs, reward, done, info = env.step(act)
if done:
print(run_steps)
obs = env.reset()
break
else:
obs = next_obs
observations = np.reshape(observations, newshape=[-1] + list(ob_space.shape))
actions = np.array(actions).astype(dtype=np.int32)
open_file_and_save('trajectory/observations.csv', observations)
open_file_and_save('trajectory/actions.csv', actions)
if __name__ == '__main__':
args = argparser()
main(args)
|
the-stack_0_19167 | _base_ = [
'../_base_/default_runtime.py'
]
model = dict(
type='GFL',
pretrained=None,
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='GFLHead',
num_classes=1,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
reg_max=16,
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=150)
dataset_type = 'WheatDatasetTest'
data_root = '/kaggle/working/data/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(768, 768), (1280, 1280)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 1024),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/detection_test.json',
img_prefix=data_root + 'test/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)#paramwise_cfg=dict(bias_decay_mult=0.)
#optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
optimizer_config = dict(grad_clip=None)
# learning policy
# lr_config = dict(
# policy='CosineAnealing',
# warmup='linear',
# warmup_iters=500,
# warmup_ratio=0.001,
# min_lr=0.0001)
# total_epochs = 50
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 22])
total_epochs = 24 |
the-stack_0_19170 | """A smart list."""
from dataclasses import dataclass
from jupiter.domain.entity_name import EntityName
from jupiter.domain.smart_lists.smart_list_key import SmartListKey
from jupiter.framework.aggregate_root import AggregateRoot
from jupiter.framework.base.entity_id import BAD_REF_ID
from jupiter.framework.base.timestamp import Timestamp
@dataclass()
class SmartList(AggregateRoot):
"""A smart list."""
@dataclass(frozen=True)
class Created(AggregateRoot.Created):
"""Created event."""
@dataclass(frozen=True)
class Updated(AggregateRoot.Updated):
"""Updated event."""
_key: SmartListKey
_name: EntityName
@staticmethod
def new_smart_list(key: SmartListKey, name: EntityName, created_time: Timestamp) -> 'SmartList':
"""Create a smart list."""
smart_list = SmartList(
_ref_id=BAD_REF_ID,
_archived=False,
_created_time=created_time,
_archived_time=None,
_last_modified_time=created_time,
_events=[],
_key=key,
_name=name)
smart_list.record_event(SmartList.Created.make_event_from_frame_args(created_time))
return smart_list
def change_name(self, name: EntityName, modification_time: Timestamp) -> 'SmartList':
"""Change the name of the smart list."""
if self._name == name:
return self
self._name = name
self.record_event(SmartList.Updated.make_event_from_frame_args(timestamp=modification_time))
return self
@property
def key(self) -> SmartListKey:
"""The key of the metric."""
return self._key
@property
def name(self) -> EntityName:
"""The name of the metric."""
return self._name
|
the-stack_0_19171 | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Time discretized Monte Carlo simulation engine for stochastic processes.
Useful to run simulations of trajectories of stochastic systems.
Provides wrappers to sample trajectories until convergence according to a
tolerance criterion.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import numpy as np
import tensorflow as tf
from typing import Callable, MutableMapping, List, Optional, Tuple, Union
from simulation_research.tf_risk import util
NumpyArrayOrFloat = Union[np.ndarray, float]
TensorOrFloat = Union[tf.Tensor, float]
TensorOrNumpyArray = Union[tf.Tensor, np.ndarray]
TensorOrListOfTensors = Union[tf.Tensor, List[tf.Tensor]]
DynamicsOp = Callable[[tf.Tensor, TensorOrFloat, TensorOrFloat], tf.Tensor]
RunFunction = Callable[[DynamicsOp, tf.Tensor, TensorOrFloat, TensorOrFloat],
TensorOrListOfTensors]
def _max(x):
"""Returns max(x) for an array or max(x)=x for a scalar."""
if isinstance(x, np.ndarray):
return np.max(x)
else:
return x
def _min(x):
"""Returns min(x) for an array or min(x)=x for a scalar."""
if isinstance(x, np.ndarray):
return np.min(x)
else:
return x
def _maybe_tile_tensor_for_mc(tensor, num_samples):
"""Maybe tile state tensor to generate multiple scenarios in Monte Carlo."""
original_rank = tensor.get_shape().rank
if not original_rank:
# If initial_state is a scalar, duplicate to [num_samples] shape.
tensor = tf.expand_dims(tensor, axis=0)
return tf.tile(tensor, [num_samples])
elif original_rank == 1:
# If initial_state is a vector, expand it to [num_samples, num_dims] shape.
tensor = tf.expand_dims(tensor, axis=0)
return tf.tile(tensor, [num_samples, 1])
else:
return tensor
def _reshape_initial_state_for_mc(initial_state,
num_samples):
"""Tile initial conditions to generate multiple scenarios."""
if (not isinstance(initial_state, tf.Tensor)) and isinstance(
initial_state, collections.Iterable):
return [_maybe_tile_tensor_for_mc(s, num_samples) for s in initial_state]
else:
return _maybe_tile_tensor_for_mc(initial_state, num_samples)
def multistep_runner_unrolled_tf(return_trajectories = False
):
"""Explicit discretization scheme runner using an unrolled tensorflow loop.
Returns a function simulating a dynamical process (potentially stochastic).
Args:
return_trajectories: whether to have run(...) return entire trajectories
(states for all time steps) instead of just the final state.
Returns:
A run function taking 4 arguments
1) dynamics_op: a function turning a triplet (state, time,
discretization step) into the next state.
2) states: typically a [num_samples, num_dims] tensor of scalars entailing
the initial state values.
3) dt: a scalar, the value of the discretization step;
4) duration: a scalar, the amount of simulated time. Floor(duration / dt)
steps will be run in all simulations;
returning a (resp. a list of Floor(duration / dt) + 1)
[num_samples, num_dims] tensor(s) of scalars if return_trajectories is False
(resp. True).
"""
def run(dynamics_op, states, dt, duration):
t = 0.0
trajectories = [states]
while t < duration:
trajectories.append(dynamics_op(trajectories[-1], t, dt))
t += dt
if return_trajectories:
return trajectories
return trajectories[-1]
return run
def multistep_runner_tf_while(maximum_iterations = None,
back_prop = True):
"""Explicit discretization scheme runner using a dynamic tensorflow loop.
Returns a function simulating a dynamical process (potentially stochastic).
Args:
maximum_iterations: the maximum number of time steps dynamics will be
simulated for.
back_prop: whether to enable back-propagation through the while loop.
Disabling back-propagation will reduce the amont of memory needed by a
factor close to maximum_iterations factor.
Returns:
A run function (see multistep_runner_unrolled_tf's docstring for
a description) returning a [num_samples, num_dims] tensor of scalars.
"""
if not back_prop:
logging.warning("Disabling back propagation in runner_tf_while.")
def run(dynamics_op, states, dt, duration):
t = tf.constant(0.0)
_, end_states = tf.while_loop(
cond=lambda t, s: tf.less(t, duration),
body=lambda t, s: [t + dt, dynamics_op(s, t, dt)],
loop_vars=[t, states],
shape_invariants=[t.get_shape(), states.get_shape()],
maximum_iterations=maximum_iterations,
back_prop=back_prop)
return end_states
return run
def non_callable_price_mc(
initial_state,
dynamics_op,
payoff_fn,
maturity,
num_samples,
dt,
multistep_runner = multistep_runner_tf_while()):
"""Monte Carlo simulation to price a non callable derivative.
Wrapper around run_multistep_dynamics to price European options.
Args:
initial_state: a scalar or a [num_samples, ...] tensor.
dynamics_op: a function turning a triplet (state, time, discretization step)
into the next state.
payoff_fn: a function transforming the final state [num_samples, ...] of the
simulation into a [num_samples] tensor of payoff samples.
maturity: a scalar, the amount of simulated time. Floor(duration / dt) steps
will be run.
num_samples: the number of samples in the Monte-Carlo simulation.
dt: a scalar, the value of the discretization step.
multistep_runner: a run function (see multistep_runner_unrolled_tf's
docstring for a description).
Returns:
a tensorflow scalar estimate of the mean of outcomes, a tensorflow scalar
estimate of the mean of square outcomes, a [num_samples] tensor of
outcomes.
"""
if dt <= 0.0:
raise ValueError("dt should be >0.0 but is %.f." % dt)
initial_states = _reshape_initial_state_for_mc(initial_state, num_samples)
terminal_states = multistep_runner(dynamics_op, initial_states, dt, maturity)
outcomes = payoff_fn(terminal_states)
mean_outcome = tf.reduce_mean(outcomes)
mean_sq_outcome = tf.reduce_mean(outcomes**2)
return mean_outcome, mean_sq_outcome, outcomes
def sensitivity_autodiff(price, diff_target):
"""Compute the first order derivative of price estimate w.r.t. diff_target."""
return tf.gradients([price], [diff_target])[0]
def mc_estimator(mean_est,
mean_sq_est,
batch_size,
key_placeholder,
feed_dict,
tol = 1e-3,
confidence = 0.95,
max_num_steps = 10000,
tf_session = None,
tol_is_relative = True
):
"""Run Monte-Carlo until convergence.
Args:
mean_est: [num_dims] tensor of scalars for the estimate of the mean of the
outcome.
mean_sq_est: [num_dims] tensor of scalars for the estimate of the mean of
the squared outcome.
batch_size: size of the mini-batch.
key_placeholder: the placeholder entailing the key for the sub-stream of
pseudo or quasi random numbers.
feed_dict: (optional) feed_dict of placeholders for the parameters of the
estimator (e.g. drift, volatility).
tol: tolerance for the relative with of the confidence interval.
confidence: level of confidence of the confidence interval. 0.95 means that
we are confident at 95% that the actual mean is in the interval.
max_num_steps: maximum number of mini-batches computed.
tf_session: (optional) tensorflow session.
tol_is_relative: (optional) whether to consider numerical precision in
relative or absolute terms.
Returns:
the estimated mean ([num_dims] numpy array), the estimated (relative if
tol_is_relative) half confidence interval given by the
central limit theorem ([num_dims] numpy array), a boolean stating
whether the method has converged or not.
"""
if max_num_steps <= 0:
raise ValueError("max_num_steps must be > 0 but is %d" % max_num_steps)
if tf_session is None:
config = tf.ConfigProto(isolate_session_state=True)
with tf.Session(config=config) as tf_session:
return mc_estimator(
mean_est=mean_est,
mean_sq_est=mean_sq_est,
batch_size=batch_size,
key_placeholder=key_placeholder,
feed_dict=feed_dict,
tol=tol,
confidence=confidence,
max_num_steps=max_num_steps,
tf_session=tf_session,
tol_is_relative=tol_is_relative)
mean_est_eval = np.zeros(mean_est.get_shape().as_list())
mean_sq_est_eval = np.zeros(mean_sq_est.get_shape().as_list())
num_samples = 0
num_steps = 0
def _run_mc_step(
):
"""Run one mini-batch step of the Monte-Carlo method."""
feed_dict.update({key_placeholder: num_steps})
batch_mean_est, batch_mean_sq_est = tf_session.run((mean_est, mean_sq_est),
feed_dict=feed_dict)
new_mean_est_eval = util.running_mean_estimate(mean_est_eval,
batch_mean_est, num_samples,
batch_size)
new_mean_sq_est_eval = util.running_mean_estimate(mean_sq_est_eval,
batch_mean_sq_est,
num_samples, batch_size)
std_est_eval = util.stddev_est(new_mean_est_eval, new_mean_sq_est_eval)
half_conf_interval = util.half_clt_conf_interval(confidence,
num_samples + batch_size,
std_est_eval)
return new_mean_est_eval, new_mean_sq_est_eval, half_conf_interval
mean_est_eval, mean_sq_est_eval, half_conf_interval = _run_mc_step()
converged = True
effective_tol = (
tol if not tol_is_relative else tol * _min(np.abs(mean_est_eval)))
if effective_tol == 0.0:
logging.warning("Zero effective tolerance. The run may not converge.")
while _max(half_conf_interval) > effective_tol:
logging.info("Monte Carlo estimation step %d", num_steps)
logging.info("Half confidence interval %s", half_conf_interval)
mean_est_eval, mean_sq_est_eval, half_conf_interval = _run_mc_step()
effective_tol = (
tol if not tol_is_relative else tol * _min(np.abs(mean_est_eval)))
if effective_tol == 0.0:
logging.warning("Zero effective tolerance. The run may not converge.")
num_steps += 1
num_samples += batch_size
if num_steps >= max_num_steps:
converged = False
break
logging.info("Monte Carlo estimation step %d", num_steps)
logging.info("Half confidence interval %s", half_conf_interval)
return mean_est_eval, half_conf_interval, converged
|
the-stack_0_19172 | #!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Description: Base method for custom commands
Class: BaseCommand
"""
import argparse
from collections import namedtuple
from abc import abstractmethod
class BaseCommand:
"""
Description: Basic attributes used for command invocation
Attributes:
params(argparse.Namespace): Command line parameters
sub_parse: Subcommand parameters
sub_args: namedtuple for generating subcommand parameters
"""
parser = argparse.ArgumentParser(
description="A-Ops is a project to mantain servers automatically.",
prog='A-Ops')
subparsers = parser.add_subparsers(title='command',
help='A-Ops is a project to maintain servers automatically.',
required=True,
dest='sub_parse_name',
metavar='start')
def __init__(self):
"""
Description: Instance initialization
"""
self.params = []
self.sub_parse = None
self.sub_args = namedtuple(
'sub_args',
['sub_command', 'help', 'default', 'action', 'nargs', 'required', 'choices']
)
def add_subcommand(self, sub_command, help_desc):
"""
Description: add subcommand with releaseIssueID and gitee ID as sub_parse argument
Args:
sub_command(str): sub command of the cli
help_desc(str): help description of the sub command
"""
self.sub_parse = BaseCommand.subparsers.add_parser(
sub_command, help=help_desc)
@staticmethod
def register_command(command):
"""
Description: Registration of commands
Args:
command: commands for aops
"""
command.sub_parse.set_defaults(func=command.do_command)
@classmethod
def args_parser(cls):
"""
Description: argument parser
"""
args = cls.parser.parse_args()
args.func(args)
@abstractmethod
def do_command(self, params):
"""
Description: Method which wound need to be implemented by subcommands
Args:
params: Command line parameters
Returns:
"""
|
the-stack_0_19173 | import os
import tempfile
import time
import pytest
from app import create_app
from app.models import LogPost, db
@pytest.fixture(scope='module')
def app():
db_fd, db_path = tempfile.mkstemp()
cred_fd, cred_path = tempfile.mkstemp()
flask_app = create_app({
'TESTING': True,
'CRED_FILE': cred_path,
'SQLALCHEMY_DATABASE_URI': f'sqlite:///{db_path}'
})
with flask_app.app_context():
db.create_all()
yield flask_app
os.close(db_fd)
os.close(cred_fd)
os.unlink(db_path)
os.unlink(cred_path)
@pytest.fixture(autouse=True)
def app_context(app):
with app.app_context():
yield app
@pytest.fixture
def client(app_context):
return app_context.test_client()
@pytest.fixture
def authenticated_client(client):
with client.session_transaction() as session:
session['authed'] = True
return client
@pytest.fixture()
def log_post_title():
return f'titleTITLEaTIELE___{time.time()}'
@pytest.fixture
def log_post_content():
return 'contentCONTENT___CONTENT'
@pytest.fixture
def log_post_md_content():
return f'# HeadingHEADING___{time.time()}'
@pytest.fixture
def log_post(log_post_title, log_post_content):
new_log_post = LogPost(log_post_title, log_post_content, False)
db.session.add(new_log_post)
db.session.commit()
yield new_log_post
if LogPost.query.filter_by(id=new_log_post.id).first():
db.session.delete(new_log_post)
db.session.commit()
@pytest.fixture
def md_log_post(log_post_title, log_post_md_content):
new_log_post = LogPost(log_post_title, log_post_md_content, True)
db.session.add(new_log_post)
db.session.commit()
yield new_log_post
if LogPost.query.filter_by(id=new_log_post.id).first():
db.session.delete(new_log_post)
db.session.commit()
@pytest.fixture
def pinned_log_post(log_post_title, log_post_content):
new_log_post = LogPost(log_post_title, log_post_content, False)
new_log_post.is_pinned = True
db.session.add(new_log_post)
db.session.commit()
yield new_log_post
if LogPost.query.filter_by(id=new_log_post.id).first():
db.session.delete(new_log_post)
db.session.commit()
|
the-stack_0_19174 | # 堆栈
stack = [3, 4, 5]
stack.append(6)
stack.append(6)
stack.pop()
stack.pop()
# 队列
stack.pop(0)
# 也可以把列表当做队列用,只是在队列里第一加入的元素,第一个取出来;但是拿列表用作这样的目的效率不高。
# 在列表的最后添加或者弹出元素速度快,然而在列表里插入或者从头部弹出速度却不快(因为所有其他的元素都得一个一个地移动)。
# 队列可以用 collections
from collections import deque
queue = deque(["Eric", "John", "Michael"])
queue.append("Terry") # Terry arrives
print(queue.popleft())
# 列表推导式(集合和字典推导也支持推导式:)
## 集合
vec = {2, 4, 6}
list_v = {3 * x for x in vec}
print(list_v)
# 字典
vec = {2, 4, 6}
list_v = { x :3 * x for x in vec}
print(list_v)
## 列表
vec = [2, 4, 6]
list_v = [3 * x for x in vec]
print(list_v)
var = [[x, x ** 2] for x in vec]
print(var)
freshfruit = [' banana', ' loganberry ', 'passion fruit ']
var = [weapon.strip() for weapon in freshfruit]
print(var)
var = [3 * x for x in vec if x > 3]
print(var)
vec1 = [2, 4, 6]
vec2 = [4, 3, -9]
print([x * y for x in vec1 for y in vec2])
print([x + y for x in vec1 for y in vec2])
print([vec1[i] * vec2[i] for i in range(len(vec1))])
print([str(round(355 / 113, i)) for i in range(1, 9)])
# 嵌套列表解析
# 以下实例将3X4的矩阵列表转换为4X3列表:
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
]
print([[row[i] for row in matrix] for i in range(4)])
# or
transposed = []
for i in range(4):
transposed.append([row[i] for row in matrix])
print(transposed)
# or
transposed = []
for i in range(4):
transposed_row = []
for row in matrix:
transposed_row.append(row[i])
transposed.append(transposed_row)
print(transposed)
|
the-stack_0_19178 | from os.path import exists
import sys
# supports standard format
codes_to_info = {
'ZNR' : ['\"Zendikar Rising\"', 391],
'KHM' : ['\"Kaldheim\"', 405],
'STX' : ['\"Strixhaven: School of Mages\"', 382],
'STA' : ['\"Strixhaven: Mystical Archive\"', 126],
'AFR' : ['\"Dungeons & Dragons: Adventures in the Forgotten Realms\"', 402],
'MID' : ['\"Innistrad: Midnight Hunt\"', 391],
'VOW' : ['\"Innistrad: Crimson Vow\"', 412],
'NEO' : ['\"Kamigawa: Neon Dynasty\"', 512]
}
# will be strings in query but numbers while stored
rarity_to_int = {
'C' : '1',
'U' : '2',
'R' : '3',
'M' : '4'
}
# queries the database and inserts the card set
def insert_card_set(set_code, set_name, max_count, sql_file):
# insert the set into the card_sets table
sql_file.write("-- create the card sets\n")
sql_file.write("INSERT INTO card_sets (set_code, set_name, max_count)\n")
sql_file.write("VALUES (\"" + set_code + "\", " + set_name + ", " + max_count + ");\n\n")
# looks through the cards raw data and returns all unique mana costs.
def unique_mana_costs(cards):
manas = {"[L]"}
for card in cards:
mana = card[3]
if mana != "" and mana != "cost" and mana not in manas:
manas.add(mana)
return manas
# takes a list of mana costs and inserts them as sql syntax in sql_file
def populate_mana(manas, sql_file):
sql_file.write("-- populate mana table\n")
sql_file.write("INSERT OR IGNORE INTO mana_costs (cost) VALUES\n")
width = 0
for mana in manas:
if width >= 50: # formatting, estetical, never get too wide
sql_file.write(",\n")
width = 0
s = ", (\"" + mana + "\")"
if width == 0: # if first like, remove the comma and space
s = s[2:len(s)]
sql_file.write(s)
width += len(s)
# takes a list of cards and inserts them as sql syntax in sql_file
def populate_cards(cards, sql_file):
sql_file.write(";\n\n-- populate with card data\n")
sql_file.write("INSERT INTO cards (set_code, card_number, card_name, mana_id, rarity) VALUES\n")
for card in cards:
set_code = card[0]
card_number = card[1]
card_name = card[2]
# make sure lands are represented as [L] cost
mana_cost = card[3] if card[3] != "" else "[L]"
card_rarity = card[4]
# writing ("exp_code", card_nbr, "card_name", (QUERY FOR MANA), rarity)
sql_file.write(
"(\"" + set_code +
"\", " + card_number +
", \"" + card_name +
"\", (SELECT mana_id FROM mana_costs WHERE cost = \"" + mana_cost + "\")" +
", " + rarity_to_int.get(card_rarity) + ")")
if int(card_number) == codes_to_info.get(set_code)[1]: # if it is the last card
sql_file.write(";\n") # append a semicolon
else: # elese -- its not the last card
sql_file.write(",\n") # so append a comma
def main(set_code):
if set_code not in codes_to_info:
if set_code == "ALL":
print("generating for all known sets!")
for code in codes_to_info.keys():
main(code)
print(code + "... done!")
exit(0)
else:
print("ERROR: UNKNOWN_SET [" + set_code + "]")
exit(1)
if not exists("../data/" + set_code + ".csv"):
print("ERROR: UNKNOWN_FILE [../data/" + set_code + ".csv]")
exit(1)
set_info = codes_to_info.get(set_code)
sql_file = open(set_code + ".sql", "w")
csv_file = open("../data/" + set_code + ".csv", "r")
csv_raw = csv_file.read().splitlines()
# cards filtered where cardnumber MUST be numeric (skipping alternate arts)
cards = [c.split(";") for c in csv_raw if c.split(";")[1].isnumeric()]
# insert the actual set into DB
insert_card_set(set_code, set_info[0], str(set_info[1]), sql_file)
# find all unique mana costs
manas = unique_mana_costs(cards)
# now we can insert the unique mana costs (reducing number of values passed to DB)
populate_mana(manas, sql_file)
# finally, insert the actual cards
populate_cards(cards, sql_file)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("usage: python3 csv2sql <set_code>")
elif len(sys.argv[1]) != 3:
print("must provide a set_code with 3 letters")
elif exists(sys.argv[1].upper() + ".sql"):
print("output file exists, this program will never overwrite!")
else:
set_code = sys.argv[1].upper()
main(set_code)
# assume git file tree
# take set code as argument
# check if csv exists
# start parsing
|
the-stack_0_19180 | # -*- coding: utf-8 -*-
from .GaugeGroup import GaugeGroup
from .Math import expand, flattenedTensorPos, flattenedTensorInds
from .Symbols import mSymbol, mMul, Identity
from .Tensors import TensorDic, Tensor, tensorContract, tensorAdd, tensorMul
from .Trace import Trace, trace
from sympy import flatten, Function, Mul, Pow, Symbol, Wild
# Definition of global dummy indices across all modules
# No need to import / redefine them elsewhere
# All are in the form ' x_ '
import builtins
wildSymbs = ['A','B','C','D','E','F','G',
'a','b','c','d','e','f','g','h',
'i','j','k','l','m','n','o','p',
'q','r','s','t']
for s in wildSymbs:
exec('builtins.'+s+'_ = Wild(\''+s+'\')')
def splitPow(expr, deep=False):
if type(expr) == list or type(expr) == tuple:
coeff = Mul(*[el for el in expr if el.is_number])
if coeff != 1:
return [coeff] + splitPow([el for el in expr if not el.is_number], deep=deep)
else:
return flatten([splitPow(el, deep=deep) for el in expr if not el.is_number])
if isinstance(expr, Pow):
if not isinstance(expr.args[1], Symbol):
res = expr.args[1]*[expr.args[0]]
else:
if isinstance(expr.args[0], Pow):
return expr.args[0].args[1]*[expr.args[0].args[0]**expr.args[1]]
return [expr]
if not deep:
return res
return flatten([splitPow(el, deep=deep) for el in res])
if isinstance(expr, Mul):
return splitPow(expr.args, deep=deep)
if isinstance(expr, Trace):
return [expr]
else:
return [expr]
def replaceKey(dic, oldKey, newKey, newVal = None):
newDic = {}
if type(newKey) != tuple:
for k,v in dic.items():
if k != oldKey:
newDic[k] = v
else:
newDic[newKey] = newVal if newVal is not None else v
else:
for k,v in dic.items():
if k != oldKey:
newDic[k] = v
else:
for i, key in enumerate(newKey):
newDic[key] = newVal[i] if newVal is not None else v
return newDic
def insertKey(dic, afterWhich, newKey, newVal):
newDic = {}
for k,v in dic.items():
newDic[k] = v
if k == afterWhich:
newDic[newKey] = newVal
return newDic
|
the-stack_0_19181 | # python3
# Copyright 2019 The gVisor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A machine producer which produces machine objects using `gcloud`.
Machine producers produce valid harness.Machine objects which are backed by
real machines. This producer produces those machines on the given user's GCP
account using the `gcloud` tool.
GCloudProducer creates instances on the given GCP account named like:
`machine-XXXXXXX-XXXX-XXXX-XXXXXXXXXXXX` in a randomized fashion such that name
collisions with user instances shouldn't happen.
Typical usage example:
producer = GCloudProducer(args)
machines = producer.get_machines(NUM_MACHINES)
# run stuff on machines with machines[i].run(CMD)
producer.release_machines(NUM_MACHINES)
"""
import datetime
import json
import subprocess
import threading
from typing import List, Dict, Any
import uuid
from benchmarks.harness import machine
from benchmarks.harness.machine_producers import gcloud_mock_recorder
from benchmarks.harness.machine_producers import machine_producer
class GCloudProducer(machine_producer.MachineProducer):
"""Implementation of MachineProducer backed by GCP.
Produces Machine objects backed by GCP instances.
Attributes:
image: image name as a string.
zone: string to a valid GCP zone.
machine_type: type of GCP to create (e.g. n1-standard-4).
installers: list of installers post-boot.
ssh_key_file: path to a valid ssh private key. See README on vaild ssh keys.
ssh_user: string of user name for ssh_key
ssh_password: string of password for ssh key
mock: a mock printer which will print mock data if required. Mock data is
recorded output from subprocess calls (returncode, stdout, args).
condition: mutex for this class around machine creation and deleteion.
"""
def __init__(self,
image: str,
zone: str,
machine_type: str,
installers: List[str],
ssh_key_file: str,
ssh_user: str,
ssh_password: str,
mock: gcloud_mock_recorder.MockPrinter = None):
self.image = image
self.zone = zone
self.machine_type = machine_type
self.installers = installers
self.ssh_key_file = ssh_key_file
self.ssh_user = ssh_user
self.ssh_password = ssh_password
self.mock = mock
self.condition = threading.Condition()
def get_machines(self, num_machines: int) -> List[machine.Machine]:
"""Returns requested number of machines backed by GCP instances."""
if num_machines <= 0:
raise ValueError(
"Cannot ask for {num} machines!".format(num=num_machines))
with self.condition:
names = self._get_unique_names(num_machines)
instances = self._build_instances(names)
self._add_ssh_key_to_instances(names)
machines = self._machines_from_instances(instances)
# Install all bits in lock-step.
#
# This will perform paralell installations for however many machines we
# have, but it's easy to track errors because if installing (a, b, c), we
# won't install "c" until "b" is installed on all machines.
for installer in self.installers:
threads = [None] * len(machines)
results = [False] * len(machines)
for i in range(len(machines)):
threads[i] = threading.Thread(
target=machines[i].install, args=(installer, results, i))
threads[i].start()
for thread in threads:
thread.join()
for result in results:
if not result:
raise NotImplementedError(
"Installers failed on at least one machine!")
# Add this user to each machine's docker group.
for m in machines:
m.run("sudo setfacl -m user:$USER:rw /var/run/docker.sock")
return machines
def release_machines(self, machine_list: List[machine.Machine]):
"""Releases the requested number of machines, deleting the instances."""
if not machine_list:
return
cmd = "gcloud compute instances delete --quiet".split(" ")
names = [str(m) for m in machine_list]
cmd.extend(names)
cmd.append("--zone={zone}".format(zone=self.zone))
self._run_command(cmd, detach=True)
def _machines_from_instances(
self, instances: List[Dict[str, Any]]) -> List[machine.Machine]:
"""Creates Machine Objects from json data describing created instances."""
machines = []
for instance in instances:
name = instance["name"]
kwargs = {
"hostname":
instance["networkInterfaces"][0]["accessConfigs"][0]["natIP"],
"key_path":
self.ssh_key_file,
"username":
self.ssh_user,
"key_password":
self.ssh_password
}
machines.append(machine.RemoteMachine(name=name, **kwargs))
return machines
def _get_unique_names(self, num_names) -> List[str]:
"""Returns num_names unique names based on data from the GCP project."""
return ["machine-" + str(uuid.uuid4()) for _ in range(0, num_names)]
def _build_instances(self, names: List[str]) -> List[Dict[str, Any]]:
"""Creates instances using gcloud command.
Runs the command `gcloud compute instances create` and returns json data
on created instances on success. Creates len(names) instances, one for each
name.
Args:
names: list of names of instances to create.
Returns:
List of json data describing created machines.
"""
if not names:
raise ValueError(
"_build_instances cannot create instances without names.")
cmd = "gcloud compute instances create".split(" ")
cmd.extend(names)
cmd.append("--image=" + self.image)
cmd.append("--zone=" + self.zone)
cmd.append("--machine-type=" + self.machine_type)
res = self._run_command(cmd)
data = res.stdout
data = str(data, "utf-8") if isinstance(data, (bytes, bytearray)) else data
return json.loads(data)
def _add_ssh_key_to_instances(self, names: List[str]) -> None:
"""Adds ssh key to instances by calling gcloud ssh command.
Runs the command `gcloud compute ssh instance_name` on list of images by
name. Tries to ssh into given instance.
Args:
names: list of machine names to which to add the ssh-key
self.ssh_key_file.
Raises:
subprocess.CalledProcessError: when underlying subprocess call returns an
error other than 255 (Connection closed by remote host).
TimeoutError: when 3 unsuccessful tries to ssh into the host return 255.
"""
for name in names:
cmd = "gcloud compute ssh {user}@{name}".format(
user=self.ssh_user, name=name).split(" ")
cmd.append("--ssh-key-file={key}".format(key=self.ssh_key_file))
cmd.append("--zone={zone}".format(zone=self.zone))
cmd.append("--command=uname")
timeout = datetime.timedelta(seconds=5 * 60)
start = datetime.datetime.now()
while datetime.datetime.now() <= timeout + start:
try:
self._run_command(cmd)
break
except subprocess.CalledProcessError:
if datetime.datetime.now() > timeout + start:
raise TimeoutError(
"Could not SSH into instance after 5 min: {name}".format(
name=name))
def _run_command(self,
cmd: List[str],
detach: bool = False) -> [None, subprocess.CompletedProcess]:
"""Runs command as a subprocess.
Runs command as subprocess and returns the result.
If this has a mock recorder, use the record method to record the subprocess
call.
Args:
cmd: command to be run as a list of strings.
detach: if True, run the child process and don't wait for it to return.
Returns:
Completed process object to be parsed by caller or None if detach=True.
Raises:
CalledProcessError: if subprocess.run returns an error.
"""
cmd = cmd + ["--format=json"]
if detach:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.mock:
out, _ = p.communicate()
self.mock.record(
subprocess.CompletedProcess(
returncode=p.returncode, stdout=out, args=p.args))
return
res = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.mock:
self.mock.record(res)
if res.returncode != 0:
raise subprocess.CalledProcessError(
cmd=" ".join(res.args),
output=res.stdout,
stderr=res.stderr,
returncode=res.returncode)
return res
|
the-stack_0_19185 | import math, os
import numpy as np
from snsdl.utils import paths
from snsdl.utils.splitds import SplitDataset
from snsdl.keras.generators.base import ImgFsSequenceGenerator
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils import shuffle as skshuffle
import logging
logger = logging.getLogger(__name__)
class ImgFsBatchGenerator():
def __init__(self, dataset_path, balanced=False, test_ratio=0.25, val_ratio=0.0, batch_size=32, binary_classification=False, shuffle=False, preprocessors=[]):
"""
Class constructor.
Attributes:
dataset_path: Images directory.
test_ratio: Ratio from the full dataset for testing. If unspecified, test_ratio will default to 0.25.
val_ratio: Ratio from the train dataset for validation. If unspecified, val_ratio will default to 0.0
batch_size: Number of samples to be generated. If unspecified, batch_size will default to 32.
binary_classification: Set True if is binary classification (sigmoid) problem. If binary_classification, test_ratio will default to False.
shuffle: If true shuffles dataset before each epoch. If unspecified, shuffle will default to False.
preprocessors: Array of objects to preprocess the data.
"""
logger.info('Criando novo batch generator para imagens.')
if not (test_ratio > 0.0 and test_ratio < 1.0):
raise ValueError('test_ratio must be > 0.0 and < 1.0')
if not (val_ratio >= 0.0 and val_ratio < 1.0):
raise ValueError('val_ratio must be >= 0.0 and < 1.0')
if not (batch_size > 0):
raise ValueError('batch_size must be > 0')
self.dataset_path = dataset_path
self.test_ratio = test_ratio
self.val_ratio = val_ratio
self.batch_size = batch_size
self.binary_classification = binary_classification
self.preprocessors = preprocessors
self.shuffle = shuffle
self.balanced = balanced
self.trainGenerator = None
self.testGenerator = None
self.valGenerator = None
self._data = SplitDataset.previewSplit(self.dataset_path, balanced=self.balanced, test_ratio=self.test_ratio, val_ratio=self.val_ratio, shuffle=True, type='img')
# Dict for train, test and val dataset
self.train_test_val = {'train': [], 'test': [], 'val':[]}
# Dict for train, test and val labels
self.labels_train_test_val = {'train': [], 'test': [], 'val':[]}
# Lenght of the dataset
self.datasetsize = 0
# Encoder
self.le = None
# Encode labels
if binary_classification:
self.le = LabelEncoder()
else:
self.le = LabelBinarizer()
# Encode classes
classes_name = list(self._data.keys())
class_codes = self.le.fit_transform(classes_name)
# Create a mapping for the class name and its hot encode value
self.encoded_classes = dict(zip(classes_name, class_codes))
if binary_classification:
self.class_indices = dict(zip(classes_name, class_codes))
else:
self.class_indices = dict(zip(classes_name, list(np.argmax(class_codes, axis=-1))))
# Re-organize the list of files
for label in self._data.keys():
for ds in self._data[label]:
for f in self._data[label][ds]:
self.train_test_val[ds].append(f)
self.labels_train_test_val[ds].append(self.encoded_classes[label])
self.datasetsize += 1
# Shuffle the arrays
self.train_test_val['train'], self.labels_train_test_val['train'] = skshuffle(self.train_test_val['train'], self.labels_train_test_val['train'], random_state=0)
self.train_test_val['test'], self.labels_train_test_val['test'] = skshuffle(self.train_test_val['test'], self.labels_train_test_val['test'], random_state=0)
self.train_test_val['val'], self.labels_train_test_val['val'] = skshuffle(self.train_test_val['val'], self.labels_train_test_val['val'], random_state=0)
self.__info()
@property
def train(self):
"""Get an instance of a train generator"""
if self.trainGenerator is None:
self.trainGenerator = ImgFsSequenceGenerator(self.train_test_val['train'], self.labels_train_test_val['train'], batch_size=self.batch_size, shuffle=self.shuffle, preprocessors=self.preprocessors, filenames=self.__getFilenames('train'), classes=self.__getTrueIndexClasses('train'), class_indices=self.class_indices)
return self.trainGenerator
@property
def test(self):
"""Get an instance of a test generator"""
if self.testGenerator is None:
self.testGenerator = ImgFsSequenceGenerator(self.train_test_val['test'], self.labels_train_test_val['test'], batch_size=self.batch_size, shuffle=self.shuffle, preprocessors=self.preprocessors, filenames=self.__getFilenames('test'), classes=self.__getTrueIndexClasses('test'), class_indices=self.class_indices)
return self.testGenerator
@property
def val(self):
"""Get an instance of a validation generator"""
if self.valGenerator is None:
self.valGenerator = ImgFsSequenceGenerator(self.train_test_val['val'], self.labels_train_test_val['val'], batch_size=self.batch_size, shuffle=self.shuffle, preprocessors=self.preprocessors, filenames=self.__getFilenames('val'), classes=self.__getTrueIndexClasses('val'), class_indices=self.class_indices)
return self.valGenerator
def getNumberOfClasses(self):
"""Get the number of training classes."""
return len(self._data.keys())
def getDatasetSize(self, dataset):
"""Get the size of a given dataset (full / train / test / val)."""
if dataset == 'full':
return self.datasetsize
else:
return len(self.train_test_val[dataset])
def getTrueClasses(self, dataset):
"""Get an array with the true classes for a given dataset (train / test / val)."""
encoded_indx = self.__getTrueIndexClasses(dataset)
return [list(self.class_indices.keys())[list(self.class_indices.values()).index(s)] for s in encoded_indx]
def __getTrueIndexClasses(self, dataset):
encoded = [self.labels_train_test_val[dataset][i] for i, s in enumerate(self.train_test_val[dataset])]
if dataset == 'val' and len(encoded) == 0:
encoded_indx = []
else:
encoded_indx = list(np.argmax(encoded, axis=-1))
return encoded_indx
def __getFilenames(self, dataset):
return self.train_test_val[dataset]
def __info(self):
print('')
print('Dataset information:')
print(' Dataset size: {}'.format(self.getDatasetSize('full')))
print(' Training size: {}'.format(self.getDatasetSize('train')))
print(' Testing size: {}'.format(self.getDatasetSize('test')))
print(' Validation size: {}'.format(self.getDatasetSize('val')))
print(' # of classes: {}'.format(self.getNumberOfClasses()))
print('') |
the-stack_0_19186 | from six.moves import xrange # pylint: disable=redefined-builtin
from datetime import datetime
import math
import time
import tensorflow.python.platform
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 128, """Batch size.""")
tf.app.flags.DEFINE_integer('num_batches', 100, """Number of batches to run.""")
tf.app.flags.DEFINE_boolean('forward_only', False,
"""Only run the forward pass.""")
tf.app.flags.DEFINE_boolean('forward_backward_only', False,
"""Only run the forward-forward pass.""")
tf.app.flags.DEFINE_string('data_format', 'NCHW',
"""The data format for Convnet operations.
Can be either NHWC or NCHW.
""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
parameters = []
conv_counter = 1
pool_counter = 1
affine_counter = 1
def _conv(inpOp, nIn, nOut, kH, kW, dH, dW, padType, wd=0.005, act=True):
global conv_counter
global parameters
name = 'conv' + str(conv_counter)
conv_counter += 1
with tf.name_scope(name) as scope:
kernel = tf.Variable(
tf.truncated_normal(
[kH, kW, nIn, nOut], dtype=tf.float32, stddev=1e-1),
name='weights')
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(kernel), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
if FLAGS.data_format == 'NCHW':
strides = [1, 1, dH, dW]
else:
strides = [1, dH, dW, 1]
conv = tf.nn.conv2d(
inpOp,
kernel,
strides,
padding=padType,
data_format=FLAGS.data_format)
biases = tf.Variable(
tf.constant(
0.0, shape=[nOut], dtype=tf.float32),
trainable=True,
name='biases')
bias = tf.reshape(
tf.nn.bias_add(
conv, biases, data_format=FLAGS.data_format),
conv.get_shape())
conv1 = tf.nn.relu(bias, name=scope) if act else bias
parameters += [kernel, biases]
return conv1
def _affine(inpOp, nIn, nOut, wd=None, act=True):
global affine_counter
global parameters
name = 'affine' + str(affine_counter)
affine_counter += 1
with tf.name_scope(name) as scope:
kernel = tf.Variable(
tf.truncated_normal(
[nIn, nOut], dtype=tf.float32, stddev=1e-1),
name='weights')
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(kernel), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
biases = tf.Variable(
tf.constant(
0.0, shape=[nOut], dtype=tf.float32),
trainable=True,
name='biases')
affine1 = tf.nn.relu_layer(
inpOp, kernel, biases,
name=name) if act else tf.matmul(inpOp, kernel) + biases
parameters += [kernel, biases]
return affine1
def _mpool(inpOp, kH, kW, dH, dW, padding):
global pool_counter
global parameters
name = 'pool' + str(pool_counter)
pool_counter += 1
if FLAGS.data_format == 'NCHW':
ksize = [1, 1, kH, kW]
strides = [1, 1, dH, dW]
else:
ksize = [1, kH, kW, 1]
strides = [1, dH, dW, 1]
return tf.nn.max_pool(
inpOp,
ksize=ksize,
strides=strides,
padding=padding,
data_format=FLAGS.data_format,
name=name)
def _apool(inpOp, kH, kW, dH, dW, padding):
global pool_counter
global parameters
name = 'pool' + str(pool_counter)
pool_counter += 1
if FLAGS.data_format == 'NCHW':
ksize = [1, 1, kH, kW]
strides = [1, 1, dH, dW]
else:
ksize = [1, kH, kW, 1]
strides = [1, dH, dW, 1]
return tf.nn.avg_pool(
inpOp,
ksize=ksize,
strides=strides,
padding=padding,
data_format=FLAGS.data_format,
name=name)
def _norm(name, l_input, lsize=4):
return tf.nn.lrn(l_input,
lsize,
bias=1.0,
alpha=0.001 / 9.0,
beta=0.75,
name=name)
def loss(logits, labels):
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(concated,
tf.pack([batch_size, 10]), 1.0, 0.0)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, onehot_labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
def get_incoming_shape(incoming):
""" Returns the incoming data shape """
if isinstance(incoming, tf.Tensor):
return incoming.get_shape().as_list()
elif type(incoming) in [np.array, list, tuple]:
return np.shape(incoming)
else:
raise Exception("Invalid incoming layer.")
def inference(images):
conv1 = _conv(images, 3, 32, 5, 5, 1, 1, 'SAME')
pool1 = _mpool(conv1, 3, 3, 2, 2, 'SAME')
conv2 = _conv(pool1, 32, 32, 5, 5, 1, 1, 'SAME')
pool2 = _apool(conv2, 3, 3, 2, 2, 'SAME')
conv3 = _conv(pool2, 32, 64, 5, 5, 1, 1, 'SAME')
pool3 = _apool(conv3, 3, 3, 2, 2, 'SAME')
resh1 = tf.reshape(pool3, [-1, 64 * 4 * 4])
affn1 = _affine(resh1, 64 * 4 * 4, 64)
affn2 = _affine(affn1, 64, 10, act=False)
print('conv1:', get_incoming_shape(conv1))
print('pool1:', get_incoming_shape(pool1))
print('conv2:', get_incoming_shape(conv2))
print('pool2:', get_incoming_shape(pool2))
print('conv3:', get_incoming_shape(conv3))
print('pool3:', get_incoming_shape(pool3))
return affn2
def time_tensorflow_run(session, target, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
if not isinstance(target, list):
target = [target]
target_op = tf.group(*target)
for i in xrange(FLAGS.num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target_op)
duration = time.time() - start_time
if i > num_steps_burn_in:
if not i % 10:
print('%s: step %d, duration = %.3f' %
(datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / FLAGS.num_batches
vr = total_duration_squared / FLAGS.num_batches - mn * mn
sd = math.sqrt(vr)
print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
(datetime.now(), info_string, FLAGS.num_batches, mn, sd))
def run_benchmark():
global parameters
with tf.Graph().as_default():
# Generate some dummy images.
image_size = 32
# Note that our padding definition is slightly different the cuda-convnet.
# In order to force the model to start with the same activations sizes,
# we add 3 to the image_size and employ VALID padding above.
if FLAGS.data_format == 'NCHW':
image_shape = [FLAGS.batch_size, 3, image_size, image_size]
else:
image_shape = [FLAGS.batch_size, image_size, image_size, 3]
images = tf.get_variable(
'image',
image_shape,
initializer=tf.truncated_normal_initializer(
stddev=0.1, dtype=tf.float32),
dtype=tf.float32,
trainable=False)
labels = tf.get_variable(
'label', [FLAGS.batch_size],
initializer=tf.constant_initializer(1),
dtype=tf.int32,
trainable=False)
# Build a Graph that computes the logits predictions from the
# inference model.
last_layer = inference(images)
objective = loss(last_layer, labels)
# Compute gradients.
opt = tf.train.MomentumOptimizer(0.001, 0.9)
grads = opt.compute_gradients(objective)
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(
0.0, dtype=tf.float32),
trainable=False,
dtype=tf.float32)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(0.9, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables(
))
# Build an initialization operation.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
run_forward = True
run_forward_backward = True
if FLAGS.forward_only and FLAGS.forward_backward_only:
raise ValueError("Cannot specify --forward_only and "
"--forward_backward_only at the same time.")
if FLAGS.forward_only:
run_forward_backward = False
elif FLAGS.forward_backward_only:
run_forward = False
if run_forward:
# Run the forward benchmark.
time_tensorflow_run(sess, last_layer, "Forward")
if run_forward_backward:
with tf.control_dependencies(
[apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
time_tensorflow_run(sess, [train_op, objective], "Forward-backward")
def main(_):
run_benchmark()
if __name__ == '__main__':
tf.app.run()
|
the-stack_0_19187 | import os
import six
from backports.configparser import ConfigParser
def _read_default_config_file(file_name):
"""
https://github.com/apache/incubator-airflow/blob/master/airflow/configuration.py
https://github.com/apache/incubator-airflow/blob/master/LICENSE
"""
templates_dir = os.path.join(os.path.dirname(__file__), "config_templates")
file_path = os.path.join(templates_dir, file_name)
with open(file_path, encoding="utf-8") as f:
return f.read()
def expand_env_var(env_var):
"""
https://github.com/apache/incubator-airflow/blob/master/airflow/configuration.py
https://github.com/apache/incubator-airflow/blob/master/LICENSE
Expands (potentially nested) env vars by repeatedly applying
`expandvars` and `expanduser` until interpolation stops having
any effect.
"""
if not env_var:
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
DEFAULT_CONFIG = _read_default_config_file("default_excalibur.cfg")
class ExcaliburConfigParser(ConfigParser):
def __init__(self, default_config=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.excalibur_defaults = ConfigParser(*args, **kwargs)
if default_config is not None:
self.excalibur_defaults.read_string(default_config)
self.is_validated = False
def _validate(self):
if self.get(
"core", "executor"
) != "SequentialExecutor" and "sqlite" in self.get("core", "sql_alchemy_conn"):
raise ValueError(
"Cannot use sqlite with the {}".format(self.get("core", "executor"))
)
self.is_validated = True
def get(self, section, key, **kwargs):
section = str(section).lower()
key = str(key).lower()
if super().has_option(section, key):
return expand_env_var(super().get(section, key, **kwargs))
if self.excalibur_defaults.has_option(section, key):
return expand_env_var(self.excalibur_defaults.get(section, key, **kwargs))
else:
raise ValueError(
"section/key [{section}/{key}] not found in"
" config".format(**locals())
)
def read(self, filename):
super().read(filename)
self._validate()
def mkdirs(path):
if not os.path.isdir(path):
os.makedirs(path)
if "EXCALIBUR_HOME" not in os.environ:
EXCALIBUR_HOME = expand_env_var("~/excalibur")
else:
EXCALIBUR_HOME = expand_env_var(os.environ["EXCALIBUR_HOME"])
mkdirs(EXCALIBUR_HOME)
if "EXCALIBUR_CONFIG" not in os.environ:
EXCALIBUR_CONFIG = EXCALIBUR_HOME + "/excalibur.cfg"
else:
EXCALIBUR_CONFIG = expand_env_var(os.environ["EXCALIBUR_CONFIG"])
def parameterized_config(template):
"""
https://github.com/apache/incubator-airflow/blob/master/airflow/configuration.py
https://github.com/apache/incubator-airflow/blob/master/LICENSE
Generates a configuration from the provided template + variables defined in
current scope
:param template: a config content templated with {{variables}}
"""
all_vars = {k: v for d in [globals(), locals()] for k, v in d.items()}
return template.format(**all_vars)
if not os.path.isfile(EXCALIBUR_CONFIG):
print(f"Creating new Excalibur configuration file in: {EXCALIBUR_CONFIG}")
with open(EXCALIBUR_CONFIG, "w") as f:
cfg = parameterized_config(DEFAULT_CONFIG)
if six.PY2:
cfg = cfg.encode("utf8")
f.write(cfg)
conf = ExcaliburConfigParser(default_config=parameterized_config(DEFAULT_CONFIG))
conf.read(EXCALIBUR_CONFIG)
# for Flask
ALLOWED_EXTENSIONS = ["pdf", "json"]
SECRET_KEY = conf.get("webserver", "SECRET_KEY")
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
PDFS_FOLDER = os.path.join(PROJECT_ROOT, "www/static/uploads")
USING_SQLITE = (
True if conf.get("core", "SQL_ALCHEMY_CONN").startswith("sqlite") else False
)
get = conf.get
has_option = conf.has_option
|
the-stack_0_19188 | # 323. Number of Connected Components in an Undirected Graph
class Solution:
# Depth-First Search
def countComponents(self, n: int, edges: list[list[int]]) -> int:
count = 0
visited = [False] * n
adjacency = [[] for _ in range(n)]
for a, b in edges:
adjacency[a].append(b)
adjacency[b].append(a)
def dfs(node: int) -> None:
visited[node] = True
for next in adjacency[node]:
if not visited[next]:
dfs(next)
for i in range(n):
if not visited[i]:
dfs(i)
count += 1
return count |
the-stack_0_19189 | ### apply the normalisation (acquired on MC-simulations) to the data. For documentation, see the MC-equivalent
from functools import partial
from os import path
import logging
import numpy as np
import pandas as pd
from tqdm import tqdm
import h5py as h5
from dataprepro import csv2hdf5 as cth
# import dask.array as da
logger = logging.getLogger(__name__)
tqdm = partial(tqdm, mininterval=30.0)
NORMPREFIX = "normalized_"
PIXELFNAME = "pixelwise_norm.txt"
MERGEDFNAME = "mean_pixelwise_datafordata_norm.txt"
DATAFORMCNORM = "mean_pixelwise_dataformc_norm.txt"
OLDDIR = "/telmerged/"
NEWDIR = "/normalized/"
MEAN_MC_NORMNAME = "mean_pixelwise_mc_norm.txt"
MEAN_DATA_NORMNAME = "mean_pixelwise_data_norm.txt"
STANDARD_NORMFILENAME = "pixelwise_norm.txt"
def write_pixelnormfile(mean, std_dev, opath, fname=PIXELFNAME):
outfname = path.join(opath, fname)
df = pd.DataFrame(np.vstack([mean, std_dev]).T)
logger.info(f"saving to {path.abspath(outfname)}")
df.to_csv(outfname, header=None, index=None, mode="w", sep=" ")
return outfname
def generate_pixelnormfile(infilename, opath, fname=PIXELFNAME):
h5file = h5.File(infilename, "r")
if cth.get_shape(h5file)[0] > 5*10**5:
indices = np.random.choice(range(0,cth.get_shape(h5file)[0]),
5*10**5, replace=False)
data = h5file["data"].value[indices,::]
else:
data = h5file["data"].value #read the dataset
basename = path.basename(infilename)
# logger.debug("number of nans before norm: ", np.sum(np.isnan(data.flatten())))
if "GA_" in basename:
for i in tqdm(range(0, data.shape[0])):
data[i][0] = np.log(data[i][0]) #logarithmize the energy
else:
# if the norm is generated from real data, cut of the eventlinking
data = data[::,:-3]
# logger.debug("number of nans before norm: ", np.sum(np.isnan(data.flatten())))
mean = np.mean(data, axis=0, dtype='float64') #calculate the mean for all features
std_dev = np.std(data, axis=0, dtype='float64') #calculate the standard deviation for all features
normfile = write_pixelnormfile(mean, std_dev, opath, fname=fname)
return normfile, mean, std_dev
# needs to be read from path.join(path.dirname(path.dirname(infilename)), PIXELFNAME)
def add_mean_mcenergy(mean_mean, mean_std_dev, mcnormfile):
mcnorm = pd.read_csv(mcnormfile, sep=" ", header=None)
mcnorm.columns = ['mean', 'std']
mean_mean = np.insert(mean_mean, 0, mcnorm["mean"].values[0])
mean_std_dev = np.insert(mean_std_dev, 0, mcnorm["std"].values[0])
return mean_mean, mean_std_dev
def generate_normfiles(glob, opath, mcnormfile=PIXELFNAME):
infilenames = cth.glob_and_check(glob)
means, std_devs = [], []
for infilename in tqdm(infilenames):
fname = path.basename(infilename) + "_norm.txt"
normfile, mean, std_dev = generate_pixelnormfile(infilename, opath, fname)
means.append(mean)
std_devs.append(std_dev)
means = np.array(means)
std_devs = np.array(std_devs)
assert means.shape == std_devs.shape
mean_mean = np.nanmean(means, axis=0)
mean_std_dev = np.nanmean(std_devs, axis=0)
pathname = path.dirname(path.dirname(infilenames[0]))
if "GA" in infilenames[0]:
write_pixelnormfile(mean_mean, mean_std_dev, opath, fname=MEAN_MC_NORMNAME)
else:
mean_mean, mean_std_dev = add_mean_mcenergy(mean_mean, mean_std_dev, mcnormfile)
write_pixelnormfile(mean_mean, mean_std_dev,
opath, fname=MEAN_DATA_NORMNAME)
def normalize(infilePath, normfile=None, outdir=None):
if normfile is None:
normfile = STANDARD_NORMFILENAME
# normfile = path.join(path.dirname(path.dirname(infilename)), PIXELFNAME)
f2 = pd.read_csv(normfile, sep=" ", header=None, names=["mean", "std"])
mean = f2["mean"].values
std = f2["std"].values
data = h5.File(infilePath)["data"].value.astype("float16")
if "GA_" in path.basename(infilePath):
diff = 0
data[::,0] = np.log(data[::,0])
upper = data.shape[1] -3
# if not mc file ignore the mc energy at index 0 in norm.txt
else:
diff = 1
upper = f2.shape[0] -3
# -2 for excluding az and zd
# for i in range(diff,upper-2):
# if std[i] != 0:
# testdata[:,i-diff] -= mean[i]
# testdata[:,i-diff] /= std[i]
infmaskShort = np.isinf(mean) & np.isinf(std) & np.isnan(mean) & np.isnan(std)
infmask = np.zeros(data.shape[1], dtype=bool)
infmask[:len(infmaskShort)] = infmaskShort
stdmask = (std == 0)
std[stdmask] = 1.
mean[stdmask] = 0.
data[::,0:upper-diff-2] -= mean[diff:upper-2]
data[::,0:upper-diff-2] /= std[diff:upper-2]
data[::,infmask] = 0.
# np.testing.assert_array_almost_equal(testdata ,data, decimal=5)
data[::,-1] = (data[::,-1]-(360./2))/105.
data[::,-2] = (data[::,-2]-(45.))/26.
newBasename = NORMPREFIX + path.basename(infilePath)
newDirname = ""
if outdir is None:
newDirname = path.dirname(infilePath).replace(OLDDIR, NEWDIR)
newDirname = path.dirname(infilePath).replace(OLDDIR, NEWDIR)
else:
newDirname = outdir
outfilePath = path.join(newDirname, newBasename)
assert outfilePath != infilePath
np.nan_to_num(data, copy=False)
outfilename = cth.create_hdf5_from_dataset(data, outfilePath)
logger.info('normalized ', path.basename(outfilePath))
del data
return outfilename
|
the-stack_0_19190 | #!/usr/bin/env python3
import sys, re
from prjxray.segmaker import Segmaker
segmk = Segmaker("design.bits")
print("Loading tags from design.txt.")
with open("design.txt", "r") as f:
for line in f:
line, active = line.split()
tile, pip = line.split("/")
_, pip = pip.split(".")
print(tile, pip, active)
segmk.addtag(tile, pip, int(active))
segmk.compile()
segmk.write()
|
the-stack_0_19191 | class Match:
"""
Class representing a match of two columns target is the one we want to find the matches of, source an other
that exists in the database and the similarity between the two.
NOTE: Use the to_dict method when you want to append a match to a list of matches
"""
def __init__(self,
target_db_guid: object, target_table_name: str, target_table_guid: object, target_column_name: str,
target_column_guid: object,
source_db_guid: object, source_table_name: str, source_table_guid: object, source_column_name: str,
source_column_guid: object,
similarity: float):
self.target_db_guid = target_db_guid
self.target_table_name = target_table_name
self.target_table_guid = target_table_guid
self.target_column_name = target_column_name
self.target_column_guid = target_column_guid
self.source_db_guid = source_db_guid
self.source_table_name = source_table_name
self.source_table_guid = source_table_guid
self.source_column_name = source_column_name
self.source_column_guid = source_column_guid
self.similarity = similarity
@property
def to_dict(self):
return {"source": {"db_guid": self.source_db_guid,
"tbl_nm": self.source_table_name, "tbl_guid": self.source_table_guid,
"clm_nm": self.source_column_name, "clm_guid": self.source_column_guid},
"target": {"db_guid": self.target_db_guid,
"tbl_nm": self.target_table_name, "tbl_guid": self.target_table_guid,
"clm_nm": self.target_column_name, "clm_guid": self.target_column_guid},
"sim": self.similarity}
|
the-stack_0_19192 | import pytest
from jawiki import skkdict
d = skkdict.parse_skkdict('SKK-JISYO.jawiki', encoding='utf-8')
@pytest.mark.parametrize("kanji,yomi", [
('King Gnu', 'きんぐぬー'),
('令和', 'れいわ'),
('大トニー', 'おおとにー'),
('こちら葛飾区亀有公園前派出所', 'こちらかつしかくかめありこうえんまえはしゅつじょ'),
('内田カヲル', 'うちだかおる'),
('坂本フジヱ', 'さかもとふじえ'),
('赤トリヰ', 'あかとりい'),
('ヰセキ四国', 'いせきしこく'),
('木内キヤウ', 'きうちきょう'),
('因達羅蛇影幻魔流', 'いんだらじゃえいげんまりゅう'),
('アバンチュリエ級駆逐艦', 'あはんちゅりえきゆうくちくかん'),
('安蘇山', 'あそさん'),
('あに。', 'あにまる'),
('南夕子', 'みなみゆうこ'),
('青井惟董', 'あおいこれただ'),
('赤プル', 'あかぷる'),
('安藤孝子', 'あんどうたかこ'),
('EX大衆', 'いーえっくすたいしゅう'),
('古崤関', 'ここうかん'),
('鬼滅の刃', 'きめつのやいば'),
('鬱多羅僧', 'うったらそう'),
('三衣一鉢', 'さんねいっぱつ'),
('中川幸永', 'なかがわゆきえ'),
('姶良サティ', 'あいらさてぃ'),
('青木十良', 'あおきじゅうろう'),
('穴門みかん', 'あなとみかん'),
('Eye-Fi', 'あいふぁい'),
('ABBA', 'あば'),
('崎元酒造所', 'さきもとしゅぞうしょ'),
('志倉千代丸', 'しくらちよまる'),
('前田怜緒', 'まえだれお'),
('湊川四良兵衞', 'みなとがわしろべえ'),
('宮口しづえ', 'みやぐちしずえ'),
('吉本玲緒', 'よしもとれお'),
('若林令緒', 'わかばやしれお'),
('鷲谷いづみ', 'わしたにいずみ'),
('初井しづ枝', 'はついしずえ'),
('倉知玲鳳', 'くらちれお'),
('京山華千代', 'きょうやまはなちよ'),
('計算可能性理論', 'けいさんかのうせいりろん'),
])
def test_pair(kanji, yomi):
"""
SKK-JISYO.jawiki に入っていることが、必須のエントリー
"""
print([kanji, yomi, d.get(yomi)])
assert kanji in d.get(yomi)
@pytest.mark.parametrize("kanji,yomi", [
('お姉さま', 'ぼく'),
('109万本', 'いる'),
('擬餌状体', 'えすか'),
('銀河刑務所の囚人を全員脱獄させる。', 'えすか'),
('監督', 'あばんたいとる'),
('10代式守与太夫', 'しきもりよだゆう'),
('1703年の北アメリカ北東岸の襲撃', 'きたあめりかほくとうがんのしゅうげき'),
('島ぜんぶでおーきな祭', 'さい'),
('アジャリス', 'さんてぃあーご'),
('UTF-32', 'および'),
('江迎警察署 - 北部', 'および'),
('相補誤差関数', 'および'),
('二人で旅に出る理由は?', 'あいりす'),
('大切な者との記憶', 'きゅーぶ'),
('死者・行方不明者約2万2000人', 'うち'),
('死者273人', 'うち'),
('86校', 'うち'),
('INDIES', 'いんでぃーず'),
('ZAZZY', 'いんでぃーず'),
('長谷川榮', 'ゑい'),
('謝謝你,在世界的角落找到我', 'ありがとう'),
('謝謝你,在世界角落中找到我', 'ありがとう'),
('Five Colours in Her Hair', 'らいぶ'),
('LOVE Seiko Matsuda 20th Anniversary Best Selection', 'らゔ'),
('You♡I -Sweet Tuned by 5pb.-', 'ゆい'),
('尾道市土生幼稚園', 'めりー'),
('激突3&IMPACT.9', 'べーすめんともんすたー'),
('Guitar:Shin', 'しん'),
('Thank you, ROCK BANDS! 〜UNISON SQUARE GARDEN 15th Anniversary Tribute Album〜', 'さんきゅー'),
('日本初', 'かつ'),
('あなたがいるから、矢口真里', 'あなたがいるから'),
('島津安樹朗', 'あきお'),
('旭丘中学校、旭ヶ丘中学校、旭が丘中学校', 'あさひがおかちゅうがっこう'),
('旭酒造株式會社', 'あさひしゅぞう'),
('あしたはどっちだ、寺山修司', 'あしたはどっちだ'),
('青森県立木造高等学校', 'あおもりけんりつ'),
('青ヶ島酒造合資会社', 'あおがしましゅぞう'),
('御座船安宅丸', 'あたけまる'),
('福岡市立内浜小学校', 'うちはましょうがっこう'),
('東風汽車有限公司', 'とうふうきしゃ'),
('山添村立奈良県立山辺高等学校山添分校', 'やまべこうとうがっこうやまぞえぶんこう'),
('飯山愛宕中継局', 'いいやまあたご'),
('石包丁・石庖丁', 'いしぼうちょう'),
('覚醒具・打出の大槌', 'うちでのおおづち'),
('緒方三社川越し祭り', 'かわごしまつり'),
('無限責任広部銀行', 'ひろべぎんこう'),
('弁辺駅', 'べんべ'),
('福岡市立愛宕小学校', 'あたごしょうがっこう'),
])
def test_no_pair(kanji, yomi):
"""
SKK-JISYO.jawiki に入っていないことが、必須のエントリー
"""
assert yomi not in d or kanji not in d.get(yomi)
@pytest.mark.parametrize("yomi", [
('いずれもろっくふぃるだむ'),
('のちの'),
('あーけーどすてぃっく'),
('ぐらびああいどる'),
('いわゆる'),
('ぼーこーどあるいはぼどーこーど'),
('さんばーすと'),
('いいか'),
('てれび'),
('ーおーつう'),
])
def test_not_in(yomi):
"""
SKK-JISYO.jawiki に入っていないことが、必須のエントリー
"""
assert yomi not in d
# あがる /△△通○○上ル/
# いえす /YES/YES!!!/YES, NO./Yes/
# いみ /忌み、斎み/
# いろは /iroha/優勝内国産馬連合競走/
# いわたみつおのちょうらじ /Voice of A&G Digital 岩田光央の超ラジ!/
# うちゅうけいじたましい /宇宙刑事魂 THE SPACE SHERIFF SPIRITS/
# うづ /精衛海を填
|
the-stack_0_19195 | # Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
import logging
import os
import sawtooth_signing as signing
from sawtooth_signing import CryptoFactory
from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
from sawtooth_sdk.consensus.exceptions import UnknownBlock
from sawtooth_sdk.messaging.stream import Stream
from sawtooth_sdk.protobuf.batch_pb2 import Batch
from sawtooth_sdk.protobuf.batch_pb2 import BatchHeader
from sawtooth_sdk.protobuf.client_batch_submit_pb2 \
import ClientBatchSubmitRequest
from sawtooth_sdk.protobuf.client_batch_submit_pb2 \
import ClientBatchSubmitResponse
from sawtooth_sdk.protobuf.client_block_pb2 \
import ClientBlockGetByTransactionIdRequest
from sawtooth_sdk.protobuf.client_block_pb2 \
import ClientBlockGetResponse
from sawtooth_sdk.protobuf.block_pb2 import BlockHeader
from sawtooth_sdk.protobuf.consensus_pb2 import ConsensusBlock
from sawtooth_sdk.protobuf.validator_pb2 import Message
from sawtooth_poet.poet_consensus.poet_block_publisher \
import PoetBlockPublisher
from sawtooth_poet.poet_consensus.poet_block_verifier import PoetBlockVerifier
from sawtooth_poet.poet_consensus.poet_fork_resolver import PoetForkResolver
LOGGER = logging.getLogger(__name__)
class PoetOracle:
'''This is a wrapper around the PoET structures (publisher,
verifier, fork resolver) and their attendant proxies.
'''
def __init__(self, service, component_endpoint,
config_dir, data_dir, key_dir):
self._config_dir = config_dir
self._data_dir = data_dir
self._signer = _load_identity_signer(key_dir, 'validator')
self._validator_id = self._signer.get_public_key().as_hex()
stream = Stream(component_endpoint)
self._block_cache = _BlockCacheProxy(service, stream)
self._state_view_factory = _StateViewFactoryProxy(service)
self._batch_publisher = _BatchPublisherProxy(stream, self._signer)
self._publisher = None
def initialize_block(self, previous_block):
block_header = NewBlockHeader(
previous_block,
self._signer.get_public_key().as_hex())
self._publisher = PoetBlockPublisher(
block_cache=self._block_cache,
state_view_factory=self._state_view_factory,
batch_publisher=self._batch_publisher,
data_dir=self._data_dir,
config_dir=self._config_dir,
validator_id=self._validator_id)
return self._publisher.initialize_block(block_header)
def check_publish_block(self, block):
return self._publisher.check_publish_block(block)
def finalize_block(self, block):
return self._publisher.finalize_block(block)
def verify_block(self, block):
verifier = PoetBlockVerifier(
block_cache=self._block_cache,
state_view_factory=self._state_view_factory,
data_dir=self._data_dir,
config_dir=self._config_dir,
validator_id=self._validator_id)
return verifier.verify_block(block)
def switch_forks(self, cur_fork_head, new_fork_head):
'''"compare_forks" is not an intuitive name.'''
fork_resolver = PoetForkResolver(
block_cache=self._block_cache,
state_view_factory=self._state_view_factory,
data_dir=self._data_dir,
config_dir=self._config_dir,
validator_id=self._validator_id)
return fork_resolver.compare_forks(cur_fork_head, new_fork_head)
class PoetBlock:
def __init__(self, block):
# fields that come with consensus blocks
self.block_id = block.block_id
self.previous_id = block.previous_id
self.signer_id = block.signer_id
self.block_num = block.block_num
self.payload = block.payload
self.summary = block.summary
# fields that poet requires
identifier = block.block_id.hex()
previous_block_id = block.previous_id.hex()
signer_public_key = block.signer_id.hex()
self.identifier = identifier
self.header_signature = identifier
self.previous_block_id = previous_block_id
self.signer_public_key = signer_public_key
self.header = _DummyHeader(
consensus=block.payload,
signer_public_key=signer_public_key,
previous_block_id=previous_block_id)
# this is a trick
self.state_root_hash = block.block_id
def __str__(self):
return (
"Block("
+ ", ".join([
"block_num: {}".format(self.block_num),
"block_id: {}".format(self.block_id.hex()),
"previous_id: {}".format(self.previous_id.hex()),
"signer_id: {}".format(self.signer_id.hex()),
"payload: {}".format(self.payload),
"summary: {}".format(self.summary.hex()),
])
+ ")"
)
class NewBlockHeader:
'''The header for the block that is to be initialized.'''
def __init__(self, previous_block, signer_public_key):
self.consensus = None
self.signer_public_key = signer_public_key
self.previous_block_id = previous_block.identifier
self.block_num = previous_block.block_num + 1
class _DummyHeader:
def __init__(self, consensus, signer_public_key, previous_block_id):
self.consensus = consensus
self.signer_public_key = signer_public_key
self.previous_block_id = previous_block_id
class _BlockCacheProxy:
def __init__(self, service, stream):
self.block_store = _BlockStoreProxy(service, stream) # public
self._service = service
def __getitem__(self, block_id):
block_id = bytes.fromhex(block_id)
try:
return PoetBlock(self._service.get_blocks([block_id])[block_id])
except UnknownBlock:
return None
class _BlockStoreProxy:
def __init__(self, service, stream):
self._service = service
self._stream = stream
@property
def chain_head(self):
return PoetBlock(self._service.get_chain_head())
def get_block_by_transaction_id(self, transaction_id):
future = self._stream.send(
message_type=Message.CLIENT_BLOCK_GET_BY_TRANSACTION_ID_REQUEST,
content=ClientBlockGetByTransactionIdRequest(
transaction_id=transaction_id).SerializeToString())
content = future.result().content
response = ClientBlockGetResponse()
response.ParseFromString(content)
if response.status == ClientBlockGetResponse.NO_RESOURCE:
raise ValueError("The transaction supplied is not in a block")
block = response.block
header = BlockHeader()
header.ParseFromString(block.header)
consensus_block = ConsensusBlock(
block_id=bytes.fromhex(block.header_signature),
previous_id=bytes.fromhex(header.previous_block_id),
signer_id=bytes.fromhex(header.signer_public_key),
block_num=header.block_num,
payload=header.consensus,
summary=b'')
poet_block = PoetBlock(consensus_block)
return poet_block
def get_block_iter(self, reverse):
# Ignore the reverse flag, since we can only get blocks
# starting from the head.
chain_head = self.chain_head
yield chain_head
curr = chain_head
while curr.previous_id:
try:
previous_block = PoetBlock(
self._service.get_blocks(
[curr.previous_id]
)[curr.previous_id])
except UnknownBlock:
return
yield previous_block
curr = previous_block
class _StateViewFactoryProxy:
def __init__(self, service):
self._service = service
def create_view(self, state_root_hash=None):
'''The "state_root_hash" is really the block_id.'''
block_id = state_root_hash
return _StateViewProxy(self._service, block_id)
class _StateViewProxy:
def __init__(self, service, block_id):
self._service = service
self._block_id = block_id
def get(self, address):
result = self._service.get_state(
block_id=self._block_id,
addresses=[address])
return result[address]
def leaves(self, prefix):
result = self._service.get_state(
block_id=self._block_id,
addresses=[prefix])
return [
(address, data)
for address, data in result.items()
]
class _BatchPublisherProxy:
def __init__(self, stream, signer):
self.identity_signer = signer # public
self._stream = stream
def send(self, transactions):
txn_signatures = [txn.header_signature for txn in transactions]
header = BatchHeader(
signer_public_key=self.identity_signer.get_public_key().as_hex(),
transaction_ids=txn_signatures
).SerializeToString()
signature = self.identity_signer.sign(header)
batch = Batch(
header=header,
transactions=transactions,
header_signature=signature)
future = self._stream.send(
message_type=Message.CLIENT_BATCH_SUBMIT_REQUEST,
content=ClientBatchSubmitRequest(
batches=[batch]).SerializeToString())
result = future.result()
assert result.message_type == Message.CLIENT_BATCH_SUBMIT_RESPONSE
response = ClientBatchSubmitResponse()
response.ParseFromString(result.content)
if response.status != ClientBatchSubmitResponse.OK:
LOGGER.warning("Submitting batch failed with status %s", response)
def _load_identity_signer(key_dir, key_name):
"""Loads a private key from the key directory, based on a validator's
identity.
Args:
key_dir (str): The path to the key directory.
key_name (str): The name of the key to load.
Returns:
Signer: the cryptographic signer for the key
"""
key_path = os.path.join(key_dir, '{}.priv'.format(key_name))
if not os.path.exists(key_path):
raise Exception(
"No such signing key file: {}".format(key_path))
if not os.access(key_path, os.R_OK):
raise Exception(
"Key file is not readable: {}".format(key_path))
LOGGER.info('Loading signing key: %s', key_path)
try:
with open(key_path, 'r') as key_file:
private_key_str = key_file.read().strip()
except IOError as e:
raise Exception(
"Could not load key file: {}".format(str(e)))
try:
private_key = Secp256k1PrivateKey.from_hex(private_key_str)
except signing.ParseError as e:
raise Exception(
"Invalid key in file {}: {}".format(key_path, str(e)))
context = signing.create_context('secp256k1')
crypto_factory = CryptoFactory(context)
return crypto_factory.new_signer(private_key)
|
the-stack_0_19196 | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from functools import partial
from pathlib import Path
from textwrap import dedent
import pytest
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.target_types import PexBinary, PythonSourcesGeneratorTarget
from pants.backend.python.target_types_rules import rules as python_target_type_rules
from pants.backend.python.util_rules import pex_from_targets
from pants.core.goals.test import (
BuildPackageDependenciesRequest,
BuiltPackageDependencies,
ConsoleCoverageReport,
CoverageData,
CoverageDataCollection,
CoverageReports,
RuntimePackageDependenciesField,
ShowOutput,
Test,
TestDebugRequest,
TestFieldSet,
TestResult,
TestSubsystem,
build_runtime_package_dependencies,
run_tests,
)
from pants.core.util_rules.distdir import DistDir
from pants.engine.addresses import Address
from pants.engine.desktop import OpenFiles, OpenFilesRequest
from pants.engine.fs import (
EMPTY_DIGEST,
EMPTY_FILE_DIGEST,
Digest,
MergeDigests,
Snapshot,
Workspace,
)
from pants.engine.process import InteractiveProcess, InteractiveProcessResult
from pants.engine.target import (
MultipleSourcesField,
Target,
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest,
)
from pants.engine.unions import UnionMembership
from pants.testutil.option_util import create_goal_subsystem
from pants.testutil.rule_runner import (
MockEffect,
MockGet,
QueryRule,
RuleRunner,
mock_console,
run_rule_with_mocks,
)
from pants.util.logging import LogLevel
class MockTarget(Target):
alias = "mock_target"
core_fields = (MultipleSourcesField,)
@dataclass(frozen=True)
class MockCoverageData(CoverageData):
address: Address
class MockCoverageDataCollection(CoverageDataCollection):
element_type = MockCoverageData
class MockTestFieldSet(TestFieldSet, metaclass=ABCMeta):
required_fields = (MultipleSourcesField,)
@staticmethod
@abstractmethod
def exit_code(_: Address) -> int:
pass
@property
def test_result(self) -> TestResult:
return TestResult(
exit_code=self.exit_code(self.address),
stdout="",
stdout_digest=EMPTY_FILE_DIGEST,
stderr="",
stderr_digest=EMPTY_FILE_DIGEST,
address=self.address,
coverage_data=MockCoverageData(self.address),
output_setting=ShowOutput.ALL,
)
class SuccessfulFieldSet(MockTestFieldSet):
@staticmethod
def exit_code(_: Address) -> int:
return 0
class ConditionallySucceedsFieldSet(MockTestFieldSet):
@staticmethod
def exit_code(address: Address) -> int:
return 27 if address.target_name == "bad" else 0
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner()
def make_target(address: Address | None = None) -> Target:
if address is None:
address = Address("", target_name="tests")
return MockTarget({}, address)
def run_test_rule(
rule_runner: RuleRunner,
*,
field_set: type[TestFieldSet],
targets: list[Target],
debug: bool = False,
use_coverage: bool = False,
xml_dir: str | None = None,
output: ShowOutput = ShowOutput.ALL,
valid_targets: bool = True,
) -> tuple[int, str]:
test_subsystem = create_goal_subsystem(
TestSubsystem,
debug=debug,
use_coverage=use_coverage,
xml_dir=xml_dir,
output=output,
extra_env_vars=[],
)
workspace = Workspace(rule_runner.scheduler, _enforce_effects=False)
union_membership = UnionMembership(
{
TestFieldSet: [field_set],
CoverageDataCollection: [MockCoverageDataCollection],
}
)
def mock_find_valid_field_sets(
_: TargetRootsToFieldSetsRequest,
) -> TargetRootsToFieldSets:
if not valid_targets:
return TargetRootsToFieldSets({})
return TargetRootsToFieldSets({tgt: [field_set.create(tgt)] for tgt in targets})
def mock_debug_request(_: TestFieldSet) -> TestDebugRequest:
return TestDebugRequest(InteractiveProcess(["/bin/example"], input_digest=EMPTY_DIGEST))
def mock_coverage_report_generation(
coverage_data_collection: MockCoverageDataCollection,
) -> CoverageReports:
addresses = ", ".join(
coverage_data.address.spec for coverage_data in coverage_data_collection
)
console_report = ConsoleCoverageReport(
coverage_insufficient=False, report=f"Ran coverage on {addresses}"
)
return CoverageReports(reports=(console_report,))
with mock_console(rule_runner.options_bootstrapper) as (console, stdio_reader):
result: Test = run_rule_with_mocks(
run_tests,
rule_args=[
console,
test_subsystem,
workspace,
union_membership,
DistDir(relpath=Path("dist")),
],
mock_gets=[
MockGet(
output_type=TargetRootsToFieldSets,
input_type=TargetRootsToFieldSetsRequest,
mock=mock_find_valid_field_sets,
),
MockGet(
output_type=TestResult,
input_type=TestFieldSet,
mock=lambda fs: fs.test_result,
),
MockGet(
output_type=TestDebugRequest,
input_type=TestFieldSet,
mock=mock_debug_request,
),
# Merge XML results.
MockGet(
output_type=Digest,
input_type=MergeDigests,
mock=lambda _: EMPTY_DIGEST,
),
MockGet(
output_type=CoverageReports,
input_type=CoverageDataCollection,
mock=mock_coverage_report_generation,
),
MockGet(
output_type=OpenFiles,
input_type=OpenFilesRequest,
mock=lambda _: OpenFiles(()),
),
MockEffect(
output_type=InteractiveProcessResult,
input_type=InteractiveProcess,
mock=lambda _: InteractiveProcessResult(0),
),
],
union_membership=union_membership,
)
assert not stdio_reader.get_stdout()
return result.exit_code, stdio_reader.get_stderr()
def test_invalid_target_noops(rule_runner: RuleRunner) -> None:
exit_code, stderr = run_test_rule(
rule_runner,
field_set=SuccessfulFieldSet,
targets=[make_target()],
valid_targets=False,
)
assert exit_code == 0
assert stderr.strip() == ""
def test_summary(rule_runner: RuleRunner) -> None:
good_address = Address("", target_name="good")
bad_address = Address("", target_name="bad")
exit_code, stderr = run_test_rule(
rule_runner,
field_set=ConditionallySucceedsFieldSet,
targets=[make_target(good_address), make_target(bad_address)],
)
assert exit_code == ConditionallySucceedsFieldSet.exit_code(bad_address)
assert stderr == dedent(
"""\
✓ //:good succeeded.
𐄂 //:bad failed.
"""
)
def test_debug_target(rule_runner: RuleRunner) -> None:
exit_code, _ = run_test_rule(
rule_runner,
field_set=SuccessfulFieldSet,
targets=[make_target()],
debug=True,
)
assert exit_code == 0
def test_xml_dir(rule_runner: RuleRunner) -> None:
xml_dir = "dist/test-results"
addr1 = Address("", target_name="t1")
addr2 = Address("", target_name="t2")
exit_code, stderr = run_test_rule(
rule_runner,
field_set=SuccessfulFieldSet,
targets=[make_target(addr1), make_target(addr2)],
xml_dir=xml_dir,
)
assert exit_code == 0
assert f"Wrote test XML to `{xml_dir}`" in stderr
def test_coverage(rule_runner: RuleRunner) -> None:
addr1 = Address("", target_name="t1")
addr2 = Address("", target_name="t2")
exit_code, stderr = run_test_rule(
rule_runner,
field_set=SuccessfulFieldSet,
targets=[make_target(addr1), make_target(addr2)],
use_coverage=True,
)
assert exit_code == 0
assert stderr.strip().endswith(f"Ran coverage on {addr1.spec}, {addr2.spec}")
def sort_results() -> None:
create_test_result = partial(
TestResult,
stdout="",
stdout_digest=EMPTY_FILE_DIGEST,
stderr="",
stderr_digest=EMPTY_FILE_DIGEST,
output_setting=ShowOutput.ALL,
)
skip1 = create_test_result(exit_code=None, address=Address("t1"))
skip2 = create_test_result(exit_code=None, address=Address("t2"))
success1 = create_test_result(exit_code=0, address=Address("t1"))
success2 = create_test_result(exit_code=0, address=Address("t2"))
fail1 = create_test_result(exit_code=1, address=Address("t1"))
fail2 = create_test_result(exit_code=1, address=Address("t2"))
assert sorted([fail2, success2, skip2, fail1, success1, skip1]) == [
skip1,
skip2,
success1,
success2,
fail1,
fail2,
]
def assert_streaming_output(
*,
exit_code: int | None,
stdout: str = "stdout",
stderr: str = "stderr",
output_setting: ShowOutput = ShowOutput.ALL,
expected_level: LogLevel,
expected_message: str,
) -> None:
result = TestResult(
exit_code=exit_code,
stdout=stdout,
stdout_digest=EMPTY_FILE_DIGEST,
stderr=stderr,
stderr_digest=EMPTY_FILE_DIGEST,
output_setting=output_setting,
address=Address("demo_test"),
)
assert result.level() == expected_level
assert result.message() == expected_message
def test_streaming_output_skip() -> None:
assert_streaming_output(
exit_code=None,
stdout="",
stderr="",
expected_level=LogLevel.DEBUG,
expected_message="demo_test:demo_test skipped.",
)
def test_streaming_output_success() -> None:
assert_success_streamed = partial(
assert_streaming_output, exit_code=0, expected_level=LogLevel.INFO
)
assert_success_streamed(
expected_message=dedent(
"""\
demo_test:demo_test succeeded.
stdout
stderr
"""
),
)
assert_success_streamed(
output_setting=ShowOutput.FAILED, expected_message="demo_test:demo_test succeeded."
)
assert_success_streamed(
output_setting=ShowOutput.NONE, expected_message="demo_test:demo_test succeeded."
)
def test_streaming_output_failure() -> None:
assert_failure_streamed = partial(
assert_streaming_output, exit_code=1, expected_level=LogLevel.ERROR
)
message = dedent(
"""\
demo_test:demo_test failed (exit code 1).
stdout
stderr
"""
)
assert_failure_streamed(expected_message=message)
assert_failure_streamed(output_setting=ShowOutput.FAILED, expected_message=message)
assert_failure_streamed(
output_setting=ShowOutput.NONE, expected_message="demo_test:demo_test failed (exit code 1)."
)
def test_runtime_package_dependencies() -> None:
rule_runner = RuleRunner(
rules=[
build_runtime_package_dependencies,
*pex_from_targets.rules(),
*package_pex_binary.rules(),
*python_target_type_rules(),
QueryRule(BuiltPackageDependencies, [BuildPackageDependenciesRequest]),
],
target_types=[PythonSourcesGeneratorTarget, PexBinary],
)
rule_runner.set_options(args=[], env_inherit={"PATH", "PYENV_ROOT", "HOME"})
rule_runner.write_files(
{
"src/py/main.py": "",
"src/py/BUILD": dedent(
"""\
python_sources()
pex_binary(name='main', entry_point='main.py')
"""
),
}
)
# Include an irrelevant target that cannot be built with `./pants package`.
input_field = RuntimePackageDependenciesField(["src/py", "src/py:main"], Address("fake"))
result = rule_runner.request(
BuiltPackageDependencies, [BuildPackageDependenciesRequest(input_field)]
)
assert len(result) == 1
built_package = result[0]
snapshot = rule_runner.request(Snapshot, [built_package.digest])
assert snapshot.files == ("src.py/main.pex",)
|
the-stack_0_19197 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.ads.googleads.v1.services HotelPerformanceViewService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.path_template
from google.ads.google_ads.v1.services import hotel_performance_view_service_client_config
from google.ads.google_ads.v1.services.transports import hotel_performance_view_service_grpc_transport
from google.ads.google_ads.v1.proto.services import hotel_performance_view_service_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads', ).version
class HotelPerformanceViewServiceClient(object):
"""Service to manage Hotel Performance Views."""
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.ads.googleads.v1.services.HotelPerformanceViewService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
HotelPerformanceViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def hotel_performance_view_path(cls, customer):
"""Return a fully-qualified hotel_performance_view string."""
return google.api_core.path_template.expand(
'customers/{customer}/hotelPerformanceView',
customer=customer,
)
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None):
"""Constructor.
Args:
transport (Union[~.HotelPerformanceViewServiceGrpcTransport,
Callable[[~.Credentials, type], ~.HotelPerformanceViewServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
'The `client_config` argument is deprecated.',
PendingDeprecationWarning,
stacklevel=2)
else:
client_config = hotel_performance_view_service_client_config.config
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning,
stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=hotel_performance_view_service_grpc_transport
.HotelPerformanceViewServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = hotel_performance_view_service_grpc_transport.HotelPerformanceViewServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION, )
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def get_hotel_performance_view(
self,
resource_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns the requested Hotel Performance View in full detail.
Args:
resource_name (str): Resource name of the Hotel Performance View to fetch.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v1.types.HotelPerformanceView` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_hotel_performance_view' not in self._inner_api_calls:
self._inner_api_calls[
'get_hotel_performance_view'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_hotel_performance_view,
default_retry=self.
_method_configs['GetHotelPerformanceView'].retry,
default_timeout=self.
_method_configs['GetHotelPerformanceView'].timeout,
client_info=self._client_info,
)
request = hotel_performance_view_service_pb2.GetHotelPerformanceViewRequest(
resource_name=resource_name, )
return self._inner_api_calls['get_hotel_performance_view'](
request, retry=retry, timeout=timeout, metadata=metadata)
|
the-stack_0_19199 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Hpccg(MakefilePackage):
"""Proxy Application. Intended to be the 'best approximation
to an unstructured implicit finite element or finite volume
application in 800 lines or fewer.'
"""
homepage = "https://mantevo.org/about/applications/"
url = "http://mantevo.org/downloads/releaseTarballs/miniapps/HPCCG/HPCCG-1.0.tar.gz"
tags = ['proxy-app']
version('1.0', '2e99da1a89de5ef0844da5e6ffbf39dc')
variant('mpi', default=True, description='Build with MPI support')
variant('openmp', default=True, description='Build with OpenMP support')
# Optional dependencies
depends_on('mpi', when='+mpi')
@property
def build_targets(self):
targets = []
if '+mpi' in self.spec:
targets.append('CXX={0}'.format(self.spec['mpi'].mpicxx))
targets.append('LINKER={0}'.format(self.spec['mpi'].mpicxx))
targets.append('USE_MPI=-DUSING_MPI')
else:
targets.append('CXX=c++')
targets.append('LINKER=c++')
if '+openmp' in self.spec:
targets.append('USE_OMP=-DUSING_OMP')
targets.append('OMP_FLAGS={0}'.format(self.compiler.openmp_flag))
# Remove Compiler Specific Optimization Flags
if '%gcc' not in self.spec:
targets.append('CPP_OPT_FLAGS=')
return targets
def install(self, spec, prefix):
# Manual installation
mkdirp(prefix.bin)
mkdirp(prefix.doc)
install('test_HPCCG', prefix.bin)
install('README', prefix.doc)
install('weakScalingRunScript', prefix.bin)
install('strongScalingRunScript', prefix.bin)
|
the-stack_0_19200 | '''
File: visualization.py
Author: Yutong Dai ([email protected])
File Created: Friday, 2018-10-28 09:07
Last Modified: Sunday, 2018-10-28 15:01
--------------------------------------------
Desscription:
'''
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch import autograd
import utils
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import time
import utils
batch_size = 100
transform_test = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
testset = torchvision.datasets.CIFAR10(root='./', train=False, download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=8)
testloader = enumerate(testloader)
"""
part 1
"""
model = torch.load('cifar10.model')
model.cuda()
model.eval()
batch_idx, (X_batch, Y_batch) = testloader.__next__()
X_batch = Variable(X_batch, requires_grad=True).cuda()
Y_batch_alternate = (Y_batch + 1) % 10
Y_batch_alternate = Variable(Y_batch_alternate).cuda()
Y_batch = Variable(Y_batch).cuda()
# save real images
samples = X_batch.data.cpu().numpy()
samples += 1.0
samples /= 2.0
samples = samples.transpose(0, 2, 3, 1)
fig = utils.plot(samples[0:100])
plt.savefig('./visualization/real_images.png', bbox_inches='tight')
plt.close(fig)
output = model(X_batch)[1]
prediction = output.data.max(1)[1] # first column has actual prob.
accuracy = (float(prediction.eq(Y_batch.data).sum()) / float(batch_size))*100.0
print(accuracy)
# slightly jitter all input images
criterion = nn.CrossEntropyLoss(reduce=False)
loss = criterion(output, Y_batch_alternate)
gradients = torch.autograd.grad(outputs=loss, inputs=X_batch,
grad_outputs=torch.ones(loss.size()).cuda(),
create_graph=True, retain_graph=False, only_inputs=True)[0]
gradient_image = gradients.data.cpu().numpy()
gradient_image = (gradient_image - np.min(gradient_image))/(np.max(gradient_image)-np.min(gradient_image))
gradient_image = gradient_image.transpose(0, 2, 3, 1)
fig = utils.plot(gradient_image[0:100])
plt.savefig('./visualization/gradient_image.png', bbox_inches='tight')
plt.close(fig)
# jitter input image
gradients[gradients > 0.0] = 1.0
gradients[gradients < 0.0] = -1.0
gain = 8.0
X_batch_modified = X_batch - gain*0.007843137*gradients
X_batch_modified[X_batch_modified > 1.0] = 1.0
X_batch_modified[X_batch_modified < -1.0] = -1.0
# evaluate new fake images
output = model(X_batch_modified)
output = output[1]
prediction = output.data.max(1)[1] # first column has actual prob.
accuracy = (float(prediction.eq(Y_batch.data).sum()) / float(batch_size))*100.0
print(accuracy)
# save fake images
samples = X_batch_modified.data.cpu().numpy()
samples += 1.0
samples /= 2.0
samples = samples.transpose(0, 2, 3, 1)
fig = utils.plot(samples[0:100])
plt.savefig('./visualization/jittered_images.png', bbox_inches='tight')
plt.close(fig)
"""
Part2:
plots for the discriminator without the generator:
Synthetic Images Maximizing Classification Output
"""
model = torch.load('cifar10.model')
model.cuda()
model.eval()
utils.plot_max_class(X_batch, model, label="without")
utils.plot_max_feature(X_batch, model, extract_features=2, label="without", batch_size=100)
utils.plot_max_feature(X_batch, model, extract_features=4, label="without", batch_size=100)
utils.plot_max_feature(X_batch, model, extract_features=8, label="without", batch_size=100)
"""
Part3:
plots for the discriminator with the generator:
Synthetic Images Maximizing Classification Output
"""
model = torch.load('discriminator.model')
model.cuda()
model.eval()
utils.plot_max_class(X_batch, model, label="with")
utils.plot_max_feature(X_batch, model, extract_features=2, label="with", batch_size=100)
utils.plot_max_feature(X_batch, model, extract_features=4, label="with", batch_size=100)
utils.plot_max_feature(X_batch, model, extract_features=8, label="with", batch_size=100)
|
the-stack_0_19201 | import unittest
from pyalink.alink import *
class TestPinjiu(unittest.TestCase):
def test_kmeans(self):
import numpy as np
import pandas as pd
data = np.array([
[0, "0 0 0"],
[1, "0.1,0.1,0.1"],
[2, "0.2,0.2,0.2"],
[3, "9 9 9"],
[4, "9.1 9.1 9.1"],
[5, "9.2 9.2 9.2"]
])
df = pd.DataFrame({"id": data[:, 0], "vec": data[:, 1]})
inOp = BatchOperator.fromDataframe(df, schemaStr='id int, vec string')
kmeans = KMeans().setVectorCol("vec").setK(2).setPredictionCol("pred")
kmeans.fit(inOp).transform(inOp).collectToDataframe()
|
the-stack_0_19202 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Functions shared across different jax optimization libraries."""
import collections
from typing import Callable
from jax import lax
import jax.numpy as jnp
import numpy as onp
NAdamWHyperParams = collections.namedtuple("NAdamWHyperParams", [
"learning_rate", "beta1", "beta2", "epsilon", "adamw_weight_decay",
"l2_weight_decay", "use_nesterov", "constant_fraction", "warmup_fraction",
"min_learning_rate_mult", "training_steps", "use_bias_correction"
])
NAdamWParamState = collections.namedtuple("NAdamWParamState",
["grad_ema", "grad_sq_ema"])
def nadamw_update(step, hyper_params, param, state, grad):
"""Compute the next update using the nadamw optimizer.
This term should then be *added* to the parameter value.
Args:
step: int
Current training iteration.
hyper_params: NAdamWHyperParams
A object containing all of the hyper parameters to perform a step.
param: ndarray
Current parameter value.
state: NAdamWParamState
State consiting of EMA of the gradient and gradient squared.
grad: ndarray
Gradient to use when computing the update.
Returns:
new_param: ndarray
The next parameter value
new_state: NAdamWParamState
The updated state (gradient and gradient squared) value.
"""
assert hyper_params.learning_rate is not None, "no learning rate provided."
beta1 = hyper_params.beta1
beta2 = hyper_params.beta2
lr = get_cosine_learning_rate_fn(hyper_params.training_steps,
hyper_params.learning_rate,
hyper_params.min_learning_rate_mult,
hyper_params.constant_fraction,
hyper_params.warmup_fraction)(
step)
grad = grad - param * hyper_params.l2_weight_decay
grad_sq = lax.square(grad)
grad_ema = beta1 * state.grad_ema + (1. - beta1) * grad
grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * grad_sq
t = step + 1.
# correction
if hyper_params.use_bias_correction:
lr_t = lr * jnp.sqrt(1.0 - beta2**t) / (1.0 - beta1**t)
else:
lr_t = lr
if hyper_params.use_nesterov:
numerator = (beta1 * grad_ema + (1.0 - beta1) * grad)
denom = jnp.sqrt(grad_sq_ema) + hyper_params.epsilon
step = lr_t * numerator / denom
else:
denom = jnp.sqrt(grad_sq_ema) + hyper_params.epsilon
step = lr_t * grad_ema / denom
step = step + lr_t * hyper_params.adamw_weight_decay * param
new_state = NAdamWParamState(grad_ema, grad_sq_ema)
return -step, new_state
def get_cosine_learning_rate_fn(
training_steps, learning_rate, min_learning_rate_mult,
constant_fraction, warmup_fraction):
"""Get a function that does cosine learning rate decay with warmup.
The learning rate starts at zero, is "warmed up" linearly over
`warmup_fraction * training_steps` iterations to achieve a final value of
`learning_rate`. A constant learning rate of `learning_rate` is held up until
`training_steps*constant_fraction` at which point a cosine decay is started
to a final learning rate of `min_learning_rate_mult * learning_rate`.
The cosine decay sets the learning rate using a monotomically decreasing
section of the cosine function from 0 to pi/2. It has been proven to be useful
in large large language modeling (gpt, megatron-lm) and image classification.
See https://arxiv.org/abs/1608.03983 for more information on the cosine decay.
Args:
training_steps: number of training steps the schedule should be run for.
learning_rate: base learning rate. This is the learning rate used just after
warmup and where the decay starts from.
min_learning_rate_mult: a multiplicative factor to control how low the
learning rate should be decayed to.
constant_fraction: the fraction of training steps number of steps to take
before starting the decay. This includes the time spent warming up the
learning rate.
warmup_fraction: the fraction of training steps to use for a learning rate
warmup.
Returns:
A function that takes as input a training iteration and returns the learning
rate from the specified schedule.
"""
def ff(x):
"""Convert input to float32."""
return jnp.asarray(x, dtype=onp.float32)
def fn(global_step):
"""Returns a learning rate given the current training iteration."""
float_training_steps = ff(training_steps)
global_step = ff(global_step)
# ensure we don't train longer than training steps
global_step = jnp.minimum(global_step, float_training_steps)
constant_steps = float_training_steps * constant_fraction
x = jnp.maximum(ff(global_step), ff(constant_steps))
min_learning_rate = min_learning_rate_mult * learning_rate
if warmup_fraction:
min_warmup_fraction = jnp.maximum(warmup_fraction, constant_fraction)
warmup_steps = float_training_steps * min_warmup_fraction
is_warmup = ff(jnp.greater(ff(warmup_steps), ff(global_step)))
warmup_lr = (global_step / warmup_steps) * learning_rate
else:
warmup_lr = learning_rate
is_warmup = 0.0
step = x - constant_steps
constant_and_decay = (learning_rate - min_learning_rate) * (
jnp.cos(step * onp.pi / (float_training_steps - constant_steps)) / 2.0 +
0.5) + min_learning_rate
new_learning_rate = constant_and_decay * (1.0 - is_warmup) + is_warmup * (
warmup_lr)
return new_learning_rate
return fn
|
the-stack_0_19205 | # -*- coding: UTF-8 -*-
'''
dash_streambot.py
'''
import boss
import urlparse
import logging
import os
import time
import sys
from mpegdash.parser import MPEGDASHParser
import mpegdash
import streambot
logger = logging.getLogger('streambot.dash_streambot')
def _make_segment_url(mpd_base_url, period_base_url, adaptation_set_base_url, representation_base_url):
mpd_base_url_value = mpd_base_url.base_url_value if mpd_base_url else ''
period_base_url_value = period_base_url.base_url_value if period_base_url else ''
adaptation_set_base_url_value = adaptation_set_base_url.base_url_value if adaptation_set_base_url else ''
representation_base_url_value = representation_base_url.base_url_value if representation_base_url else ''
if streambot.is_full_uri(representation_base_url_value):
return representation_base_url_value
url = ''.join([mpd_base_url_value, period_base_url_value, adaptation_set_base_url_value, representation_base_url_value])
return url
def _check_is_byterange(mpd_base_url, period_base_url, adaptation_set_base_url, representation_base_url):
mpd_is_byterange = mpd_base_url.byte_range if mpd_base_url else False
period_is_byterange = period_base_url.byte_range if period_base_url else False
adaptation_set_is_byterange = adaptation_set_base_url.byte_range if adaptation_set_base_url else False
representation_is_byterange = representation_base_url.byte_range if representation_base_url else False
return mpd_is_byterange or period_is_byterange or adaptation_set_is_byterange or representation_is_byterange
class DASHSegment():
def __init__(self, uri, is_byterange=False):
'''
@param uri Absolute URI of segment
@param is_byterange Default False
'''
if not streambot.is_full_uri(uri):
raise streambot.StreamBotError('DASHSegment URI is not absolute: {uri}'.format(uri=uri))
self.uri = uri
self.is_byterange = is_byterange
def log(self):
logger.debug('Segment URI: {uri}'.format(uri=self.uri))
class MPD():
def __init__(self, mpd_uri):
'''
@param mpd_uri Absolute URI of playlist
'''
if not streambot.is_full_uri(mpd_uri):
raise streambot.StreamBotError('DASHPlaylist URI is not absolute: {uri}'.format(uri=mpd_uri))
self.uri = mpd_uri
self.mpd = None
def download_and_save(self, output_dir=streambot._OUTPUT_DIR):
'''
download and save playlist
also parse media playlists
'''
self.local = streambot.download_and_save_to(self.uri, output_dir, True)
logger.debug('stream playlist is saved as: {local}'.format(local=self.local))
with open(self.local, 'r') as f:
content = f.read()
self.mpd = MPEGDASHParser.parse(content)
def is_live(self):
return 'static' != self.mpd.type
def parse_segments(self):
'''
Parse segment URLs from MPD
Support Single Segment URL, Segment timeline, and Segment template
@return List of segment URLs
'''
mpd_base_url = self.mpd.base_urls[0] if self.mpd.base_urls else None
segments = []
for period in self.mpd.periods:
period_base_url = period.base_urls[0] if period.base_urls else None
if period.segment_lists:
segments = self._get_segments_from_period_segment_list(period)
elif period.segment_templates:
segments = self._get_segments_from_period_segment_template(period)
else:
for adaptation_set in period.adaptation_sets:
adaptation_set_base_url = adaptation_set.base_urls[0] if adaptation_set.base_urls else None
if adaptation_set.segment_lists:
segments = self._get_segments_from_adaptation_set_segment_list(period, adaptation_set)
elif adaptation_set.segment_templates:
segments = self._get_segments_from_adaptation_set_segment_template(period, adaptation_set)
else:
for representation in adaptation_set.representations:
representation_base_url = representation.base_urls[0] if representation.base_urls else None
if representation.segment_lists:
segments = self._get_segments_from_representation_segment_list(period, adaptation_set, representation)
elif representation.segment_templates:
segments = self._get_segments_from_representation_segment_template(period, adaptation_set, representation)
else:
# case of single segment URL
segment_url = _make_segment_url(mpd_base_url, period_base_url, adaptation_set_base_url, representation_base_url)
is_byterange = _check_is_byterange(mpd_base_url, period_base_url, adaptation_set_base_url, representation_base_url)
segments.append(DASHSegment(segment_url, is_byterange))
return segments
class DASHStreamBot(streambot.Bot):
'''
Stream bot for DASH stream
'''
def __init__(self, mpd_uri):
'''
@param mpd_uri Absolute URI of the master playlist
'''
streambot.Bot.__init__(self)
self.uri = mpd_uri
self.mpd = MPD(self.uri)
logger.debug('MPD URI: {uri}'.format(uri=self.mpd.uri))
def run(self):
try:
boss.start(num_workers=self.num_worker, action=streambot._download_task_action)
self._get_mpd()
if self.mpd.is_live():
self._get_live_segments()
else:
self._get_segments()
while not boss.have_all_tasks_done():
time.sleep(1)
except Exception as e:
logger.exception(e)
finally:
boss.stop()
self._report()
def _get_mpd(self):
self.mpd.download_and_save()
def _get_live_segments(self):
pass
def _get_segments(self):
segments = self.mpd.parse_segments()
logger.debug('Download {n} segments from mpd {uri}'.format(n=len(segments), uri=self.mpd.uri))
for s in segments:
task = streambot.create_download_task(s.uri, self.output_dir)
boss.assign_task(task)
if s.is_byterange:
break
for s in segments:
print(s.uri)
def _report(self):
pass
def _test():
mpd_url = 'http://dash.akamaized.net/dash264/TestCases/1a/netflix/exMPD_BIP_TC1.mpd'
bot = DASHStreamBot(mpd_url)
bot._get_mpd()
bot._get_segments()
if __name__ == '__main__':
_test()
|
the-stack_0_19206 | import os, time, bpy
import mathutils, bpy_extras.io_utils
print("export Radiance grid ...")
def name_compat(name):
if name is None:
return 'None'
else:
return name.replace(' ', '_')
def mesh2radgrid(filepath):
basename, ext = os.path.splitext(filepath)
scene = bpy.context.scene
objects = bpy.context.selected_objects
def veckey3d(v):
return round(v.x, 6), round(v.y, 6), round(v.z, 6)
def veckey2d(v):
return round(v[0], 6), round(v[1], 6)
# Initialize totals, these are updated each object
totverts = totuvco = totno = 1
face_vert_index = 1
globalNormals = {}
copy_set = set()
# Get all meshes
for ob_main in objects:
name = bpy.path.clean_name(ob_main.name)
fn = os.path.join(basename, name)
context_name = [fn, '', '', '.pnt'] # Base name, scene name, frame number, extension
filepath = ''.join(context_name)
print(filepath)
file = open(filepath, "w", encoding="utf8", newline="\n")
fw = file.write
# ignore dupli children
if ob_main.parent and ob_main.parent.dupli_type in {'VERTS', 'FACES'}:
# XXX
print(ob_main.name, 'is a dupli child - ignoring')
continue
obs = []
if ob_main.dupli_type != 'NONE':
# XXX
print('creating dupli_list on', ob_main.name)
ob_main.dupli_list_create(scene)
obs = [(dob.object, dob.matrix) for dob in ob_main.dupli_list]
# XXX debug print
print(ob_main.name, 'has', len(obs), 'dupli children')
else:
obs = [(ob_main, ob_main.matrix_world)]
for ob, ob_mat in obs:
#print(ob, ob_mat)
#print(dir(ob))
mesh = ob.data
#print(dir(mesh))
for vert in mesh.vertices:
#print(vert.co, vert.normal)
#print(dir(vert))
fw('%f %f %f %f %f %f\n' % (vert.co.x, vert.co.y, vert.co.z, vert.normal.x, vert.normal.y, vert.normal.z) )
file.close()
mesh2radgrid(os.path.dirname(bpy.data.filepath)) |
the-stack_0_19207 | """
Test that stepping works even when the OS Plugin doesn't report
all threads at every stop.
"""
from __future__ import print_function
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class TestOSPluginStepping(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@skipIfWindows
def test_python_os_plugin(self):
"""Test that stepping works when the OS Plugin doesn't report all
threads at every stop"""
self.build()
self.main_file = lldb.SBFileSpec('main.cpp')
self.run_python_os_step_missing_thread(False)
@skipIfWindows
def test_python_os_plugin_prune(self):
"""Test that pruning the unreported PlanStacks works"""
self.build()
self.main_file = lldb.SBFileSpec('main.cpp')
self.run_python_os_step_missing_thread(True)
def get_os_thread(self):
return self.process.GetThreadByID(0x111111111)
def is_os_thread(self, thread):
id = thread.GetID()
return id == 0x111111111
def run_python_os_step_missing_thread(self, do_prune):
"""Test that the Python operating system plugin works correctly"""
# Our OS plugin does NOT report all threads:
result = self.dbg.HandleCommand("settings set target.experimental.os-plugin-reports-all-threads false")
python_os_plugin_path = os.path.join(self.getSourceDir(),
"operating_system.py")
(target, self.process, thread, thread_bkpt) = lldbutil.run_to_source_breakpoint(
self, "first stop in thread - do a step out", self.main_file)
main_bkpt = target.BreakpointCreateBySourceRegex('Stop here and do not make a memory thread for thread_1',
self.main_file)
self.assertEqual(main_bkpt.GetNumLocations(), 1, "Main breakpoint has one location")
# There should not be an os thread before we load the plugin:
self.assertFalse(self.get_os_thread().IsValid(), "No OS thread before loading plugin")
# Now load the python OS plug-in which should update the thread list and we should have
# an OS plug-in thread overlaying thread_1 with id 0x111111111
command = "settings set target.process.python-os-plugin-path '%s'" % python_os_plugin_path
self.dbg.HandleCommand(command)
# Verify our OS plug-in threads showed up
os_thread = self.get_os_thread()
self.assertTrue(
os_thread.IsValid(),
"Make sure we added the thread 0x111111111 after we load the python OS plug-in")
# Now we are going to step-out. This should get interrupted by main_bkpt. We've
# set up the OS plugin so at this stop, we have lost the OS thread 0x111111111.
# Make sure both of these are true:
os_thread.StepOut()
stopped_threads = lldbutil.get_threads_stopped_at_breakpoint(self.process, main_bkpt)
self.assertEqual(len(stopped_threads), 1, "Stopped at main_bkpt")
thread = self.process.GetThreadByID(0x111111111)
self.assertFalse(thread.IsValid(), "No thread 0x111111111 on second stop.")
# Make sure we still have the thread plans for this thread:
# First, don't show unreported threads, that should fail:
command = "thread plan list -t 0x111111111"
result = lldb.SBCommandReturnObject()
interp = self.dbg.GetCommandInterpreter()
interp.HandleCommand(command, result)
self.assertFalse(result.Succeeded(), "We found no plans for the unreported thread.")
# Now do it again but with the -u flag:
command = "thread plan list -u -t 0x111111111"
result = lldb.SBCommandReturnObject()
interp.HandleCommand(command, result)
self.assertTrue(result.Succeeded(), "We found plans for the unreported thread.")
if do_prune:
# Prune the thread plan and continue, and we will run to exit.
interp.HandleCommand("thread plan prune 0x111111111", result)
self.assertTrue(result.Succeeded(), "Found the plan for 0x111111111 and pruned it")
# List again, make sure it doesn't work:
command = "thread plan list -u -t 0x111111111"
interp.HandleCommand(command, result)
self.assertFalse(result.Succeeded(), "We still found plans for the unreported thread.")
self.process.Continue()
self.assertEqual(self.process.GetState(), lldb.eStateExited, "We exited.")
else:
# Now we are going to continue, and when we hit the step-out breakpoint, we will
# put the OS plugin thread back, lldb will recover its ThreadPlanStack, and
# we will stop with a "step-out" reason.
self.process.Continue()
os_thread = self.get_os_thread()
self.assertTrue(os_thread.IsValid(), "The OS thread is back after continue")
self.assertTrue("step out" in os_thread.GetStopDescription(100), "Completed step out plan")
|
the-stack_0_19210 | import discord
from discord.ext import tasks
class MyClient(discord.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# an attribute we can access from our task
self.counter = 0
# start the task to run in the background
self.my_background_task.start()
async def on_ready(self):
print(f"Logged in as {self.user} (ID: {self.user.id})")
print("------")
@tasks.loop(seconds=60) # task runs every 60 seconds
async def my_background_task(self):
channel = self.get_channel(1234567) # channel ID goes here
self.counter += 1
await channel.send(self.counter)
@my_background_task.before_loop
async def before_my_task(self):
await self.wait_until_ready() # wait until the bot logs in
client = MyClient()
client.run("token")
|
the-stack_0_19211 | """dbLoadTemplate and msi -S substitution grammar helpers."""
from __future__ import annotations
import collections
import logging
import pathlib
import re
import shlex
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
import apischema
import lark
from . import transformer
from .common import AnyPath, FullLoadContext
from .macro import MacroContext
from .transformer import context_from_token
logger = logging.getLogger(__name__)
def _strip_double_quote(value: str) -> str:
"""Strip one leading/single trailing double-quote."""
if value[0] == '"':
value = value[1:]
if value[-1] == '"':
value = value[:-1]
return value
RE_REMOVE_ESCAPE = re.compile(r"\\(.)")
def _fix_value(value: str) -> str:
"""Remove quotes, and fix up escaping."""
value = _strip_double_quote(value)
return RE_REMOVE_ESCAPE.sub(r"\1", value)
@dataclass
class Substitution:
"""
Single database template file from a full template.
Represents approximately one line of a .substitutions file. For example,
in this substitution file,
```
file template.txt {
pattern {a, b, c}
{A, B, C}
}
```
The resulting Substitution would be
``Substitution(macros={"a": "A", "b": "B", "c": "C"}, filename="template.txt")``.
Global macro values will be aggregated into this dictionary.
Inside of the template file - ``template.txt`` above:
* "include" is a supported command for the template file.
* "substitute" is optionally supported (set ``allow_substitute``)
"""
context: FullLoadContext
filename: Optional[str] = None
macros: Dict[str, str] = field(default_factory=dict)
use_environment: bool = False
allow_substitute: bool = True
_items: Optional[List[Any]] = field(
default_factory=list,
metadata=apischema.metadata.skip,
)
def expand_file(
self,
*,
filename: Optional[str] = None,
search_paths: Optional[List[AnyPath]] = None
) -> str:
"""
Expand the given file, looking in ``search_paths`` for template files.
Parameters
----------
filename : str, optional
Expand this file or fall back to the instance-defined filename.
search_paths : list of str or pathlib.Path, optional
List of paths to search for template files.
"""
filename = filename or self.filename
if filename is None:
raise ValueError("This substitution does not have a file defined")
filename = pathlib.Path(filename)
search_paths = search_paths or [filename.resolve().parent]
with open(self.filename, "rt") as fp:
return self.expand(fp.read(), search_paths=search_paths)
@property
def macro_context(self) -> MacroContext:
"""The macro context to be used when expanding the template."""
ctx = MacroContext(use_environment=self.use_environment)
ctx.define(**self.macros)
return ctx
@staticmethod
def handle_include(filename: str, search_paths: List[AnyPath]) -> str:
"""Expand include files from the given search path."""
for path in search_paths:
option = pathlib.Path(path) / filename
if option.exists():
with open(option, "rt") as fp:
return fp.read()
friendly_paths = " or ".join(f'"{path}"' for path in search_paths)
raise FileNotFoundError(f"{filename} not found in {friendly_paths}")
def expand(self, source: str, search_paths: Optional[List[AnyPath]] = None):
"""
Expand the provided substitution template, using the macro environment.
Parameters
----------
source : str
The source substitution template. May contain "include" or
"substitute" lines.
search_paths : list of str or pathlib.Path, optional
List of paths to search for template files.
"""
ctx = self.macro_context
search_paths = search_paths or [pathlib.Path(".")]
results = []
source_stack = collections.deque(source.splitlines())
while source_stack:
line = source_stack.popleft()
logger.debug("line %r", line)
line = ctx.expand(line)
command, *command_args = line.strip().split(" ", 1)
if command == "include": # case sensitive
args = shlex.split(command_args[0])
if len(args) != 1:
raise ValueError(
f"Include command takes one argument; got: {args} "
f"where line={line!r}"
)
include_file = args[0]
logger.debug("Including file from %s", include_file)
include_source = self.handle_include(include_file, search_paths)
source_stack.extendleft(reversed(include_source.splitlines()))
logger.debug("stack %r", source_stack)
elif command == "substitute" and self.allow_substitute:
# Note that dbLoadTemplate does not support substitute, but msi
# does.
macro_string = command_args[0].strip()
# Strip only single beginning and end quotes
macro_string = _strip_double_quote(macro_string).strip()
logger.debug("Substituting additional macros %s", macro_string)
ctx.define_from_string(macro_string)
else:
results.append(line)
return "\n".join(results)
@dataclass
class VariableDefinitions:
"""Variable definitions."""
context: FullLoadContext
definitions: Dict[str, str] = field(default_factory=dict)
@dataclass
class GlobalDefinitions(VariableDefinitions):
"""Global variable definitions."""
@dataclass
class PatternHeader:
"""Pattern header."""
context: FullLoadContext
patterns: List[str]
@dataclass
class PatternValues:
"""Pattern values."""
context: FullLoadContext
values: List[str]
@dataclass
class TemplateSubstitution:
"""Database substitutions, containing zero or more template files."""
substitutions: List[Substitution] = field(default_factory=list)
def expand_template(
self,
template: str,
search_paths: Optional[List[AnyPath]] = None,
delimiter: str = "\n",
) -> str:
"""
Expands all substitutions for the given string.
Parameters
----------
template : str
The template text.
delimiter : str, optional
Delimiter to join individual substitutions.
search_paths : list of str or pathlib.Path, optional
List of paths to search for template files.
"""
return delimiter.join(
sub.expand(template, search_paths=search_paths)
for sub in self.substitutions
)
def expand_files(
self,
search_paths: Optional[List[AnyPath]] = None,
delimiter: str = "\n"
) -> str:
"""
Expands and combines all contained substitution files.
Parameters
----------
delimiter : str, optional
Delimiter to join individual substitutions.
search_paths : list of str or pathlib.Path, optional
List of paths to search for template files.
"""
return delimiter.join(
sub.expand_file(search_paths=search_paths)
for sub in self.substitutions
)
@classmethod
def from_string(
cls,
contents,
filename=None,
msi_format=False,
all_global_scope=False,
) -> TemplateSubstitution:
"""Load a template substitutions file given its string contents."""
comments = []
grammar_filename = "msi-sub.lark" if msi_format else "dbtemplate.lark"
grammar = lark.Lark.open_from_package(
"whatrecord",
grammar_filename,
search_paths=("grammar",),
parser="earley",
# TODO: This is unsupported in lark:
# lexer_callbacks={"COMMENT": comments.append},
maybe_placeholders=False,
propagate_positions=True,
)
if msi_format:
tr = _TemplateMsiTransformer(
cls, filename, all_global_scope=all_global_scope
)
else:
tr = _TemplateTransformer(cls, filename)
subs = tr.transform(grammar.parse(contents))
subs.comments = comments
return subs
@classmethod
def from_file_obj(cls, fp, filename=None) -> TemplateSubstitution:
"""Load a template file given a file object."""
return cls.from_string(
fp.read(),
filename=getattr(fp, "name", filename),
)
@classmethod
def from_file(cls, fn) -> TemplateSubstitution:
"""
Load a template file.
Parameters
----------
filename : pathlib.Path or str
The filename.
Returns
-------
template : TemplateSubstitution
The template file.
"""
with open(fn, "rt") as fp:
return cls.from_string(fp.read(), filename=fn)
@lark.visitors.v_args(inline=True)
class _TemplateMsiTransformer(lark.visitors.Transformer):
_allow_substitute = True
def __init__(self, cls, fn, visit_tokens=False, all_global_scope=False):
super().__init__(visit_tokens=visit_tokens)
self.fn = str(fn)
self._template = cls()
self._stack = []
self._globals = {}
self.all_global_scope = all_global_scope
@lark.visitors.v_args(tree=True)
def msi_substitutions(self, *_):
self._template.substitutions = self._squash_stack(self._stack)
return self._template
template_filename = transformer.stringify
pattern_name = transformer.stringify
pattern_value = transformer.pass_through
pattern_names = transformer.tuple_args
variable_substitutions = transformer.tuple_args
variable_definition = transformer.tuple_args
variable_definitions = transformer.dictify
def global_definitions(self, global_token: lark.Token, variable_definitions=None):
self._stack.append(
GlobalDefinitions(
context=context_from_token(self.fn, global_token),
definitions=dict(
(str(key), str(value))
for key, value in (variable_definitions or {}).items()
),
)
)
def variable_subs(self, variable_definitions):
token = list(variable_definitions)[0]
definitions = VariableDefinitions(
context=context_from_token(self.fn, token),
definitions=dict(
(str(key), _fix_value(value))
for key, value in (variable_definitions or {}).items()
),
)
self._stack.append(definitions)
return definitions
def pattern_header(self, pattern_token: lark.Token, *values):
header = PatternHeader(
context=context_from_token(self.fn, pattern_token),
patterns=list(_fix_value(value) for value in values),
)
self._stack.append(header)
return header
def pattern_values(self, *values):
pattern_values = PatternValues(
context=context_from_token(self.fn, values[0]),
values=list(_fix_value(value) for value in values),
)
self._stack.append(pattern_values)
return pattern_values
@lark.visitors.v_args(tree=True)
def empty(self, tree):
empty = PatternValues(
context=context_from_token(self.fn, tree),
values=[],
)
self._stack.append(empty)
return empty
def dbfile(self, file_token: lark.Token, filename: str, *fields):
stack = self._stack
file = Substitution(
context=context_from_token(self.fn, file_token),
filename=str(pathlib.Path(self.fn).parent / _strip_double_quote(filename)),
allow_substitute=self._allow_substitute,
_items=stack,
)
self._stack = [file]
return file
def _squash_stack(self, stack):
patterns = []
results = []
for item in stack:
if isinstance(item, Substitution):
item_stack = item._items
item._items = None
subs = self._squash_stack(item_stack)
for sub in subs:
sub.filename = item.filename
results.extend(subs)
elif isinstance(item, PatternHeader):
patterns = item.patterns
elif isinstance(item, PatternValues):
values = dict(self._globals)
pattern_dict = dict(zip(patterns, item.values))
values.update(pattern_dict)
results.append(
Substitution(
context=item.context,
filename=None,
macros=values,
allow_substitute=self._allow_substitute,
# fields=patterns,
# instances=values,
)
)
elif isinstance(item, GlobalDefinitions):
self._globals.update(item.definitions)
elif isinstance(item, VariableDefinitions):
values = dict(self._globals)
values.update(item.definitions)
if self.all_global_scope:
self._globals.update(item.definitions)
results.append(
Substitution(
context=item.context,
filename=None,
macros=values,
allow_substitute=self._allow_substitute,
# fields=patterns,
# instances=values,
)
)
return results
@lark.visitors.v_args(inline=True)
class _TemplateTransformer(_TemplateMsiTransformer):
_allow_substitute = False
@lark.visitors.v_args(tree=True)
def substitution_file(self, *_):
self._template.substitutions = self._squash_stack(self._stack)
return self._template
pattern_substitutions = transformer.tuple_args
substitutions = transformer.tuple_args
|
the-stack_0_19212 | """Implementation of ceQTL-tools genotype-qc"""
# pylint: disable=invalid-name,assigning-non-slot
from pathlib import Path
from pyppl import PyPPL
from bioprocs.plink import (pGTMat2Plink,
pPlinkMiss,
pPlinkFreq,
pPlinkHWE,
pPlinkIBD,
pPlinkRemove,
pPlink2GTMat,
pPlinkStats)
def plink_from_gtmat(opts):
"""Convert genotype matrix to plink format"""
pGTMat2Plink.input = [opts.gtmat]
pGTMat2Plink.args.snpbed = opts.snpbed
return pGTMat2Plink
def plink_to_gtmat(opts, indir):
"""Convert plink back to GT matrix"""
pPlinkStats.input = [indir]
pPlinkStats.args.params['het'] = False
pPlinkStats.args.params['check-sex'] = False
pPlinkStats.args.cutoff['hardy.hwe'] = opts.hwe
pPlinkStats.args.cutoff['missing.sample'] = opts.samplecr
pPlinkStats.args.cutoff['missing.snp'] = opts.snpcr
pPlinkStats.args.cutoff['freq'] = opts.maf
pPlinkStats.args.plot['het'] = False
pPlinkStats.args.plot['hardy.mingt'] = False
pPlinkStats.config.export_dir = opts.outdir
pPlink2GTMat.input = [indir]
pPlink2GTMat.output = 'outfile:file:%s, outsnp:file:%s.snp.bed' % (
Path(opts.gtmat).name, Path(opts.gtmat).stem.split('.')[0])
pPlink2GTMat.args.samid = 'iid'
pPlink2GTMat.args.snpid = '{rs}'
pPlink2GTMat.args.refallele = opts.snpbed
pPlink2GTMat.config.export_dir = opts.outdir
return pPlinkStats, pPlink2GTMat
def pipeline(opts, tag, indir):
"""Construct the pipeline"""
# processes are not supposed to be reused
# so we copy each process
# SNP & sample call rate
pPlinkMissQC = pPlinkMiss.copy(tag=tag)
pPlinkMissQC.input = [indir]
pPlinkMissQC.args.samplecr = opts.samplecr
pPlinkMissQC.args.snpcr = opts.snpcr
pPlinkMissQC.args.plot = False
pPlinkRemoveMiss = pPlinkRemove.copy(tag=tag)
pPlinkRemoveMiss.depends = pPlinkMissQC
pPlinkRemoveMiss.input = lambda ch: ch.insert(0, indir)
pPlinkFreqQC = pPlinkFreq.copy(tag=tag)
pPlinkFreqQC.depends = pPlinkRemoveMiss
pPlinkFreqQC.args.cutoff = opts.maf
pPlinkFreqQC.args.plot = False
pPlinkRemoveFreq = pPlinkRemove.copy(tag=tag)
pPlinkRemoveFreq.depends = pPlinkRemoveMiss, pPlinkFreqQC
pPlinkHWEQC = pPlinkHWE.copy(tag=tag)
pPlinkHWEQC.depends = pPlinkRemoveFreq
pPlinkHWEQC.args.cutoff = opts.hwe
pPlinkHWEQC.args.plot = False
pPlinkRemoveHWE = pPlinkRemove.copy(tag=tag)
pPlinkRemoveHWE.depends = pPlinkRemoveFreq, pPlinkHWEQC
pPlinkIBDQC = pPlinkIBD.copy(tag=tag)
pPlinkIBDQC.depends = pPlinkRemoveHWE
pPlinkIBDQC.args.plot = False
pPlinkIBDQC.args.seed = 8525
pPlinkRemoveIBD = pPlinkRemove.copy(tag=tag)
pPlinkRemoveIBD.depends = pPlinkRemoveHWE, pPlinkIBDQC
pPlinkRemoveIBD.output = 'outdir:dir:%s.plink' % Path(opts.gtmat).stem
return pPlinkMissQC, pPlinkRemoveIBD
def main(opts):
"""Main function"""
PyPPL(name='Convert genotype matrix to plink format') \
.start(plink_from_gtmat(opts)) \
.run()
indir = pGTMat2Plink.channel.get(0)
for i in range(opts.iter):
start, end = pipeline(opts, tag='iter%s' % (i + 1), indir=indir)
if i == opts.iter - 1:
end.config.export_dir = opts.outdir
PyPPL(name="Genotype data quality control, iteration %s" % (i + 1)) \
.start(start) \
.run()
indir = end.channel.get(0)
PyPPL(name='Convert plink back to genotype matrix') \
.start(plink_to_gtmat(opts, indir)) \
.run()
|
the-stack_0_19215 | import datetime
from xml.etree import ElementTree as etree
import pytest
from ..core.exceptions import (
FRITZ_ERRORS,
ActionError,
ServiceError,
FritzActionError,
FritzArgumentError,
FritzActionFailedError,
FritzArgumentValueError,
FritzOutOfMemoryError,
FritzSecurityError,
FritzArrayIndexError,
FritzLookUpError,
FritzArgumentStringToShortError,
FritzArgumentStringToLongError,
FritzArgumentCharacterError,
FritzInternalError,
)
from ..core.soaper import (
boolean_convert,
encode_boolean,
get_argument_value,
get_converted_value,
get_html_safe_value,
raise_fritzconnection_error,
)
content_template = """
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<s:Fault>
<faultcode>s:Client</faultcode>
<faultstring>UPnPError</faultstring>
<detail>
<UPnPError xmlns="urn:schemas-upnp-org:control-1-0">
<errorCode>{error_code}</errorCode>
<errorDescription>Invalid Action</errorDescription>
</UPnPError>
</detail>
</s:Fault>
</s:Body>
</s:Envelope>
"""
class Response:
"""Namespace object."""
@pytest.mark.parametrize(
"error_code, exception", [
('401', FritzActionError),
('402', FritzArgumentError),
('501', FritzActionFailedError),
('600', FritzArgumentValueError),
('603', FritzOutOfMemoryError),
('606', FritzSecurityError),
('713', FritzArrayIndexError),
('714', FritzLookUpError),
('801', FritzArgumentStringToShortError),
('802', FritzArgumentStringToLongError),
('803', FritzArgumentCharacterError),
('820', FritzInternalError),
('713', IndexError),
('714', KeyError),
('401', ActionError),
]
)
def test_raise_fritzconnection_error(error_code, exception):
"""check for exception raising depending on the error_code"""
content = content_template.format(error_code=error_code)
response = Response()
response.content = content.encode()
pytest.raises(exception, raise_fritzconnection_error, response)
@pytest.mark.parametrize(
"value, expected_result", [
('0', False),
('1', True),
]
)
def test_boolean_convert(value, expected_result):
result = boolean_convert(value)
assert result == expected_result
@pytest.mark.parametrize(
"value", ['2', 'x', '3.1']
)
def test_boolean_convert_fails(value):
with pytest.raises(ValueError):
boolean_convert(value)
long_error = """
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<s:Fault>
<faultcode> s:Client </faultcode>
<faultstring>
UPnPError </faultstring>
<detail>
<UPnPError xmlns="urn:schemas-upnp-org:control-1-0">
<errorCode> 401 </errorCode>
<errorDescription> Invalid Action </errorDescription>
</UPnPError>
</detail>
</s:Fault>
</s:Body>
</s:Envelope>
"""
def test_long_error_message():
response = Response()
response.content = long_error.encode()
with pytest.raises(ActionError) as exc:
raise_fritzconnection_error(response)
assert exc.value.args[0] == "\n".join(
["UPnPError: ",
"errorCode: 401",
"errorDescription: Invalid Action",
]
)
@pytest.mark.parametrize(
"value, expected_type", [
("text", str),
(0, int),
(1, int),
(None, int),
(False, int),
(True, int),
]
)
def test_encode_boolean(value, expected_type):
result = encode_boolean(value)
assert isinstance(result, expected_type)
@pytest.mark.parametrize(
"value, expected_results", [
(True, 1),
(False, 0),
(None, 0),
(3.141, 3.141),
("hello test", "hello test"),
("2021-07-17T12:00:00", "2021-07-17T12:00:00"), # redundant, but ISO ;)
("ham, spam & eggs", "ham, spam & eggs"),
("5 > 3", "5 > 3"),
("3 < 5", "3 < 5"),
('say "hello"', "say "hello""),
("let's test again", ["let' test again", "let's test again"])
]
)
def test_get_html_safe_value(value, expected_results):
if not isinstance(expected_results, list):
expected_results = [expected_results]
result = get_html_safe_value(value)
assert result in expected_results
@pytest.mark.parametrize(
"value, not_expected_type", [
(False, bool), # should be int after encoding, not bool
(True, bool),
]
)
def test_encode_boolean2(value, not_expected_type):
result = encode_boolean(value)
assert not isinstance(result, not_expected_type)
soap_root = etree.fromstring("""<?xml version="1.0"?>
<data>
<container>
<year>2010</year>
<msg>message text</msg>
<number>3.141</number>
<ip></ip>
</container>
</data>""")
@pytest.mark.parametrize(
"argument_name, expected_value", [
('year', '2010'),
('msg', 'message text'),
('number', '3.141'),
('ip', ''),
]
)
def test_get_argument_value(argument_name, expected_value):
value = get_argument_value(soap_root, argument_name)
assert value == expected_value
@pytest.mark.parametrize(
"data_type, value, expected_value", [
('datetime', '2020-02-02T10:10:10', datetime.datetime(2020, 2, 2, 10, 10, 10)),
('boolean', '1', True),
('boolean', '0', False),
('uuid', 'uuid:123', '123'),
('uuid', '123', '123'),
('i4', '42', 42),
('ui1', '42', 42),
('ui2', '42', 42),
('ui4', '42', 42),
]
)
def test_get_converted_value(data_type, value, expected_value):
result = get_converted_value(data_type, value)
assert result == expected_value
@pytest.mark.parametrize(
"data_type, value", [
('datetime', '2010.02.02-10:10:10'), # not ISO 8601
('boolean', ''), # neither '1' nor '0'
]
)
def test_get_converted_value_fails(data_type, value):
with pytest.raises(ValueError):
get_converted_value(data_type, value)
|
the-stack_0_19218 | # This Software (Dioptra) is being made available as a public service by the
# National Institute of Standards and Technology (NIST), an Agency of the United
# States Department of Commerce. This software was developed in part by employees of
# NIST and in part by NIST contractors. Copyright in portions of this software that
# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant
# to Title 17 United States Code Section 105, works of NIST employees are not
# subject to copyright protection in the United States. However, NIST may hold
# international copyright in software created by its employees and domestic
# copyright (or licensing rights) in portions of software that were assigned or
# licensed to NIST. To the extent that NIST holds copyright in this software, it is
# being made available under the Creative Commons Attribution 4.0 International
# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts
# of the software developed or licensed by NIST.
#
# ACCESS THE FULL CC BY 4.0 LICENSE HERE:
# https://creativecommons.org/licenses/by/4.0/legalcode
import os
from pathlib import Path
from posixpath import join as urljoin
from typing import Any, Dict, List, Optional, Union
from urllib.parse import urlparse, urlunparse
import requests
from IPython.display import HTML
PathLike = List[Union[str, Path]]
class SecuringAIClient(object):
def __init__(self, address: Optional[str] = None) -> None:
address = f"{address}/api" if address else f"{os.environ['AI_RESTAPI_URI']}/api"
self._scheme, self._netloc, self._path, _, _, _ = urlparse(address)
@property
def experiment_endpoint(self) -> str:
return urlunparse(
(self._scheme, self._netloc, urljoin(self._path, "experiment/"), "", "", "")
)
@property
def job_endpoint(self) -> str:
return urlunparse(
(self._scheme, self._netloc, urljoin(self._path, "job/"), "", "", "")
)
@property
def task_plugin_endpoint(self) -> str:
return urlunparse(
(self._scheme, self._netloc, urljoin(self._path, "taskPlugin/"), "", "", "")
)
@property
def task_plugin_builtins_endpoint(self) -> str:
return urlunparse(
(
self._scheme,
self._netloc,
urljoin(self._path, "taskPlugin/securingai_builtins"),
"",
"",
"",
)
)
@property
def task_plugin_custom_endpoint(self) -> str:
return urlunparse(
(
self._scheme,
self._netloc,
urljoin(self._path, "taskPlugin/securingai_custom"),
"",
"",
"",
)
)
@property
def queue_endpoint(self) -> str:
return urlunparse(
(self._scheme, self._netloc, urljoin(self._path, "queue/"), "", "", "")
)
def delete_custom_task_plugin(self, name: str):
plugin_name_query: str = urljoin(self.task_plugin_custom_endpoint, name)
return requests.delete(plugin_name_query).json()
def get_experiment_by_id(self, id: int):
experiment_id_query: str = urljoin(self.experiment_endpoint, str(id))
return requests.get(experiment_id_query).json()
def get_experiment_by_name(self, name: str):
experiment_name_query: str = urljoin(self.experiment_endpoint, "name", name)
return requests.get(experiment_name_query).json()
def get_job_by_id(self, id: str):
job_id_query: str = urljoin(self.job_endpoint, id)
return requests.get(job_id_query).json()
def get_queue_by_id(self, id: int):
queue_id_query: str = urljoin(self.queue_endpoint, str(id))
return requests.get(queue_id_query).json()
def get_queue_by_name(self, name: str):
queue_name_query: str = urljoin(self.queue_endpoint, "name", name)
return requests.get(queue_name_query).json()
def get_builtin_task_plugin(self, name: str):
task_plugin_name_query: str = urljoin(self.task_plugin_builtins_endpoint, name)
return requests.get(task_plugin_name_query).json()
def get_custom_task_plugin(self, name: str):
task_plugin_name_query: str = urljoin(self.task_plugin_custom_endpoint, name)
return requests.get(task_plugin_name_query).json()
def list_experiments(self) -> List[Dict[str, Any]]:
return requests.get(self.experiment_endpoint).json()
def list_jobs(self) -> List[Dict[str, Any]]:
return requests.get(self.job_endpoint).json()
def list_queues(self) -> List[Dict[str, Any]]:
return requests.get(self.queue_endpoint).json()
def list_all_task_plugins(self) -> List[Dict[str, Any]]:
return requests.get(self.task_plugin_endpoint).json()
def list_builtin_task_plugins(self) -> List[Dict[str, Any]]:
return requests.get(self.task_plugin_builtins_endpoint).json()
def list_custom_task_plugins(self) -> List[Dict[str, Any]]:
return requests.get(self.task_plugin_custom_endpoint).json()
def lock_queue(self, name: str):
queue_name_query: str = urljoin(self.queue_endpoint, "name", name, "lock")
return requests.put(queue_name_query).json()
def unlock_queue(self, name: str):
queue_name_query: str = urljoin(self.queue_endpoint, "name", name, "lock")
return requests.delete(queue_name_query).json()
def register_experiment(self, name: str) -> Dict[str, Any]:
experiment_registration_form = {"name": name}
response = requests.post(
self.experiment_endpoint,
data=experiment_registration_form,
)
return response.json()
def register_queue(self, name: str = "tensorflow_cpu") -> Dict[str, Any]:
queue_registration_form = {"name": name}
response = requests.post(
self.queue_endpoint,
data=queue_registration_form,
)
return response.json()
def submit_job(
self,
workflows_file: PathLike,
experiment_name: str,
entry_point: str,
entry_point_kwargs: Optional[str] = None,
depends_on: Optional[str] = None,
queue: str = "tensorflow_cpu",
timeout: str = "24h",
) -> Dict[str, Any]:
job_form = {
"experiment_name": experiment_name,
"queue": queue,
"timeout": timeout,
"entry_point": entry_point,
}
if entry_point_kwargs is not None:
job_form["entry_point_kwargs"] = entry_point_kwargs
if depends_on is not None:
job_form["depends_on"] = depends_on
workflows_file = Path(workflows_file)
with workflows_file.open("rb") as f:
job_files = {"workflow": (workflows_file.name, f)}
response = requests.post(
self.job_endpoint,
data=job_form,
files=job_files,
)
return response.json()
def upload_custom_plugin_package(
self,
custom_plugin_name: str,
custom_plugin_file: PathLike,
collection: str = "securingai_custom",
) -> Dict[str, Any]:
plugin_upload_form = {
"task_plugin_name": custom_plugin_name,
"collection": collection,
}
custom_plugin_file = Path(custom_plugin_file)
with custom_plugin_file.open("rb") as f:
custom_plugin_file = {"task_plugin_file": (custom_plugin_file.name, f)}
response = requests.post(
self.task_plugin_endpoint,
data=plugin_upload_form,
files=custom_plugin_file,
)
return response.json()
def notebook_gallery(images: PathLike, row_height: str = "auto") -> HTML:
"""Display a set of images in a gallery that flexes with the width of the notebook.
Adapted from https://mindtrove.info/jupyter-tidbit-image-gallery/.
Args:
images: Filepaths of images to display
row_height: CSS height value to assign to all images. Set to 'auto' by default
to show images with their native dimensions. Set to a value like '250px' to
make all rows in the gallery equal height.
"""
figures = []
for image in images:
caption = f'<figcaption style="font-size: 0.6em">{image}</figcaption>'
figures.append(
f"""
<figure style="margin: 5px !important;">
<img src="{image}" style="height: {row_height}">
{caption}
</figure>
"""
)
return HTML(
data=f"""
<div style="display: flex; flex-flow: row wrap; text-align: center;">
{''.join(figures)}
</div>
"""
)
|
the-stack_0_19223 | import cv2
import mediapipe as mp
import time
import mouse
import pyautogui
import time
from pynput.mouse import Button
from pynput.mouse import Controller as mouseController
from pynput.keyboard import Key, Controller as keyController
mouse = mouseController()
keyCont = keyController()
class HandDetection():
def __init__(self, mode = False, maxHands = 1, detect_confidence = 0.5, track_confidence = 0.5):
self.mode = mode
self.maxHands = maxHands
self.detect_confidence = detect_confidence
self.track_confidence = track_confidence
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, #detects image - tracks only if there is a high confidence: setting True makes program very slow
self.maxHands,
self.detect_confidence,
self.track_confidence) #if this param value goes below 0.5, the module will start detection again instead of tracking
self.mpDraw = mp.solutions.drawing_utils
def findGestures(self):
cap = cv2.VideoCapture(0)
pTime = 0
cTime = 0
while(True):
key = ""
self.success, self.frame = cap.read()
imgRGB = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
#print(results.multi_hand_landmarks)
self.landmark = []
yval = []
xval = []
if self.results.multi_hand_landmarks:
for handLm in self.results.multi_hand_landmarks:
for id, lm in enumerate(handLm.landmark):
height, width, channels = self.frame.shape
realx ,realy = int(lm.x*width), int(lm.y * height) #landmark coordinates are given as proportions of the actual image instead of pixel - have to be converted
#print("Landmark: {}, X: {}, Y: {}".format(id, realx, realy))
self.landmark.append(id)
yval.append(realy)
xval.append(realx)
#can use if statements pertaining to landmark numbers to test their movement/status
# for example: for thumbs up, check whether the y position of landmark 4 (thumb tip) is higher than landmark 3 (lower thumb landmark), etc.
self.mpDraw.draw_landmarks(self.frame, handLm, self.mpHands.HAND_CONNECTIONS)
if (self.thumbDown(yval)):
print("thumb down")
mouse.scroll(0, -2)
elif (self.thumbUp(yval)):
print("thumb up")
mouse.scroll(0, 2)
elif(self.pointFor(xval)):
print("forward")
keyCont.press(Key.ctrl)
keyCont.press(Key.ctrl)
keyCont.press(Key.tab)
keyCont.release(Key.ctrl)
keyCont.release(Key.tab)
time.sleep(1)
elif (self.leftClick(yval)):
print("left click")
mouse.press(Button.left)
mouse.release(Button.left)
time.sleep(0.5)
elif(self.rightClick(yval)):
print("right click")
mouse.press(Button.right)
mouse.release(Button.right)
time.sleep(0.5)
elif(self.pointBack(xval)):
print("backward")
key = "ctrl+shift+tab"
keyCont.press(Key.ctrl)
keyCont.press(Key.shift)
keyCont.press(Key.tab)
keyCont.release(Key.ctrl)
keyCont.release(Key.shift)
keyCont.release(Key.tab)
time.sleep(1)
elif(self.palm(yval)):
print("mouse")
mouse.position = (realx*5, realy*5)
else:
print("none")
#cTime = time.time()
#fps = 1/(cTime - pTime)
#pTime = cTime
#cv2.putText(self.frame, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 255), 5)
cv2.imshow("Frame", self.frame)
cv2.waitKey(10)
def thumbUp(self, yval):
if len(yval) != 0:
if self.landmark[yval.index(min(yval))] == 4:
return True
def palm(self, yval):
if len(yval) != 0:
if(self.landmark[yval.index(max(yval))]) == 0 and self.landmark[yval.index(min(yval))] == 12 and yval[17]>yval[20]and yval[13]>yval[16] and yval[9]>yval[12] and yval[5]>yval[8]:
return True
def pointBack(self, xval):
if len(xval) != 0:
if (self.landmark[xval.index(max(xval))]) == 8 and self.landmark[xval.index(min(xval))] == 0:
return True
def pointFor(self, xval):
if len(xval) != 0:
if (self.landmark[xval.index(max(xval))]) == 0 and self.landmark[xval.index(min(xval))] == 8:
return True
def thumbDown(self, yval):
if len(yval) != 0:
if self.landmark[yval.index(max(yval))] == 4:
return True
def rightClick(self, yval):
if len(yval)!= 0:
if(self.landmark[yval.index(min(yval))] == 12 and yval[17]<yval[20]and yval[13]<yval[16] and yval[5]>yval[8]): #5,9,13,17 - knuckles
return True
def leftClick(self, yval):
if len(yval) != 0:
if(yval[5]<yval[8] and yval[17]>yval[20]and yval[13]>yval[16]):
return True
if __name__ == "__main__":
hand = HandDetection()
hand.findGestures() |
the-stack_0_19224 | from rest_framework import routers
from rest_framework.routers import Route
class BatchRouter(routers.SimpleRouter):
"""A router that is designed for batch updates on the `OrderItem` entity."""
routes = [
Route(
url=r'^{prefix}$',
mapping={
'post': 'create',
'delete': 'destroy_all',
'put': 'update_all'
},
name='{basename}-list',
detail=False,
initkwargs={}
),
Route(
url=r'^{prefix}/{lookup}$',
mapping={
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
detail=True,
initkwargs={}
)
]
|
the-stack_0_19227 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
# Maltego local tranform for the Baidu Translate API.
# Translate simpified Chinese to English.
import sys
import random
import codecs
import requests
import json
import md5
import urllib
from MaltegoTransform import *
import baiduApiKeys
# Function to replace multiple chars in a string
def multiReplace(inputStr, replacements):
newStr = inputStr
# Iterate over the strings to be replaced
for replaceStr in replacements:
newStr = newStr.replace(replaceStr, "")
return newStr
# Function to clean input strings
def cleanInput(inputStr):
cleanStr = multiReplace(inputStr, [',', '.', '-', "'", ':', ';']).strip()
return cleanStr
# Initialize Maltego library
m = MaltegoTransform()
# Handle and clean user input (simplified Chinese character string)
inputText = cleanInput((sys.argv[1]).decode('utf8'))
# Import Baidu API keys
appid = baiduApiKeys.appid
secretKey = baiduApiKeys.secretKey
# Configure required parameters for Baidu Translate API
baseurl = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
fromLang = 'zh'
toLang = 'en'
salt = random.randint(32768, 65536)
sign = appid + inputText.encode('utf8') + str(salt) + secretKey
m1 = md5.new()
m1.update(sign)
sign = m1.hexdigest()
# Create Baidu Translate API connection URL
url = '{}?appid={}&q={}&from={}&to={}&salt={}&sign={}'.format(baseurl, appid, urllib.quote(inputText.encode('utf8')), fromLang, toLang, str(salt), sign)
# Connect to Baidu Translate API using the created URL
try:
# Baidu translate API get request
r = requests.get(url)
if r.status_code == 200:
# Baidu translate API returns results in JSON array
result = json.loads(r.content)
translation = result['trans_result'][0]['dst']
# Add translation results as Maltego Phrase entity
m.addEntity('maltego.Phrase', translation.encode('utf8'))
except Exception as e:
# Pass error message to Maltego UI
m.addUIMessage("Source Language not Recognized.")
# Return results to Maltego chart
m.returnOutput()
|
the-stack_0_19230 | import gym
import numpy as np
# Types
Program = [str]
Population = [Program]
class Agent:
def __init__(self):
# Agent structure parameters
self._T_values = ["pa", 'pv', '0.0', '0.025']
self._T_actions = ['L', 'R']
self._F = ["IFLTE"]
self._program_depth = 2
self._actions = {'L': 0, 'R': 1}
# GP experiment parameters
self._pop_size = 100
self._num_eps = 1000 # number of episodes to evaluate each program on
self._max_gens = 2 # max number of generations to evolve
self._term_score = 195.0 # fitness score termination criterion
self._init_pop = self._gen_init_pop()
self._best_program = []
def run(self):
if self._best_program == []:
self.train()
print("\nBest program after training:")
print(self._best_program)
env = gym.make("CartPole-v0")
net_reward = 0
for _ in range(self._num_eps):
ep_reward = 0
done = False
obs = env.reset()
while not done:
env.render()
action = self._eval(self._best_program, obs)
obs, reward, done, _ = env.step(action)
ep_reward += reward
net_reward += ep_reward
print("\nAverage reward over {} trials: {}".format(self._num_eps, net_reward/self._num_eps))
env.close()
def train(self):
best_program = []
# Evolve generations
current_pop = self._init_pop
for gen_idx in range(self._max_gens):
print("\nGeneration {}...".format(gen_idx+1))
scores = self._batch_fit(current_pop)
# Check termination criteria before evolving next generation
max_score = max(scores)
if max_score >= self._term_score:
best_program = current_pop[scores.index(max_score)]
break
# Selection & reproduction
next_pop = [self._select(current_pop, scores) for _ in range(self._pop_size)]
current_pop = next_pop
# If a solution wasn't found before reaching the last generation
# pick the best program from the last generation as the solution.
if gen_idx >= self._max_gens-1:
last_scores = self._batch_fit(current_pop)
max_score_idx = last_scores.index(max(last_scores))
best_program = current_pop[max_score_idx]
self._best_program = best_program
def _gen_init_pop(self) -> Population:
n = self._pop_size
pop = [self._gen_program(self._program_depth) for _ in range(n)]
return pop
def _gen_program(self, d: int) -> Program:
"""
Generates a program of arbitrary depth d.
"""
p = []
func = np.random.choice(self._F)
arg1 = np.random.choice(self._T_values)
arg2 = np.random.choice(self._T_values)
if d <= 1:
arg3 = np.random.choice(self._T_actions)
arg4 = np.random.choice(self._T_actions)
else:
arg3 = self._gen_program(d-1)
arg4 = self._gen_program(d-1)
p = [func, arg1, arg2, arg3, arg4]
return p
def _batch_fit(self, pop: Population) -> [float]:
"""
Computes the fitness of a population of programs.
- pop: population (list of programs)
- return: list of fitness scores
"""
fit_scores = []
env = gym.make("CartPole-v0")
fit_scores = [self._fit(p, env) for p in pop]
env.close()
return fit_scores
def _fit(self, p: Program, env) -> float:
"""
Computes the average fitness of a program over
a certain number of runs of the environment.
- p: program
- env: gym environment object
- return: fitness score
"""
avg_reward = 0
net_reward = 0
num_eps = self._num_eps
# Run episodes
for _ in range(num_eps):
ep_reward = 0
done = False
obs = env.reset()
# Run single episode
while not done:
action = self._eval(p, obs)
obs, rew, done, _ = env.step(action)
ep_reward += rew
net_reward += ep_reward
avg_reward = net_reward / num_eps
return avg_reward
def _eval(self, p:Program, obs:[float]) -> int:
"""
Interpreter: this function evaluates a program and outputs
the action it takes, parameterised by an observation from the environment.
- p: program to evaluate
- obs: gym environment observation object
- return: action (0 or 1 for CartPole-v0)
"""
action = -1
pa = obs[2]
pv = obs[3]
# Evaluate arguments 1 and 2
if p[1] == 'pa':
arg1 = pa
elif p[1] == 'pv':
arg1 = pv
else:
arg1 = float(p[1])
if p[2] == 'pa':
arg2 = pa
elif p[2] == 'pv':
arg2 = pv
else:
arg2 = float(p[2])
# Evaluate arguments 3 and 4
arg3 = self._eval(p[3], obs) if type(p[3]) is list else self._actions[p[3]]
arg4 = self._eval(p[4], obs) if type(p[4]) is list else self._actions[p[4]]
# Evaluate IFLTE(arg1, arg2, arg3, arg4)
if arg1 <= arg2:
action = arg3
else:
action = arg4
return action
# Genetic operators #
def _select(self, pop: Population, fit_scores: [float]) -> Program:
"""
Fitness Proportionate Selection (Roulette Wheel Selection)
pop: population
f_scores: fitness scores
"""
selected = []
F = sum(fit_scores)
r = np.random.uniform(0, F)
# Simulate roulette wheel with r as the fixed point
counter = 0
for i in range(len(fit_scores)):
counter += fit_scores[i]
if counter > r:
selected = pop[i]
break
return selected
|
the-stack_0_19235 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Graph transformation specific to accelerator.
This module provide specific NNVM graph transformations
to transform a generic NNVM graph to a version that can
be executed on accelerator.
"""
import nnvm
from nnvm.compiler import graph_attr, graph_util
def _pack_batch_channel(data, dshape, bfactor, cfactor):
"""Pack the data channel dimension.
"""
assert dshape[0] % bfactor == 0
assert dshape[1] % cfactor == 0
data = nnvm.sym.reshape(data,
shape=(dshape[0] // bfactor, bfactor,
dshape[1] // cfactor, cfactor,
dshape[2], dshape[3]))
data = nnvm.sym.transpose(
data, axes=(0, 2, 4, 5, 1, 3))
return data
def _unpack_batch_channel(data, old_shape):
"""Unpack the data channel dimension.
"""
data = nnvm.sym.transpose(data, axes=(0, 4, 1, 5, 2, 3))
data = nnvm.sym.reshape(data, shape=old_shape)
return data
def _pack_weight(data, dshape, cfactor):
"""Pack the weight into packed format.
"""
assert len(dshape) == 4
assert dshape[0] % cfactor == 0
assert dshape[1] % cfactor == 0
data = nnvm.sym.reshape(data,
shape=(dshape[0] // cfactor, cfactor,
dshape[1] // cfactor, cfactor,
dshape[2], dshape[3]))
data = nnvm.sym.transpose(
data, axes=(0, 2, 4, 5, 1, 3))
return data
def _pack_bias(data, dshape, bfactor, cfactor):
"""Pack the bias parameter.
"""
assert len(dshape) == 3
assert dshape[0] % cfactor == 0
data = nnvm.sym.reshape(data,
shape=(dshape[0] // cfactor,
cfactor, dshape[1],
dshape[2], 1))
data = nnvm.sym.transpose(
data, axes=(0, 2, 3, 4, 1))
# broadcast batch dimension to bfactor
data = nnvm.sym.broadcast_to(
data,
shape=(dshape[0] // cfactor, dshape[1], dshape[2], bfactor, cfactor))
return data
def _get_shape(sym, shape_dict):
"""Get the shape of a node.
"""
return graph_util.infer_shape(
nnvm.graph.create(sym), **shape_dict)[1][0]
def clean_conv_fuse(graph):
"""Cleanup the convolution's later fuse stages
Parameters
----------
graph : Graph
Input graph
Returns
-------
graph : Graph
Optimized graph
"""
def _clean_entry(entry):
node, flag = entry
if flag:
node = nnvm.symbol.clip(node, a_max=127, a_min=-127)
node = nnvm.symbol.cast(node, dtype="int8")
# Use copy as a hint to block conv2d schedules
node = nnvm.symbol.copy(node)
flag = False
return node, flag
gidx = graph.index
ref_count = {}
# count reference of each node
for nid, node in enumerate(gidx.nodes):
ref_count[nid] = 0
for elem in node["inputs"]:
ref_count[elem[0]] += 1
# construction remap
# entry_id->(new_node, conv_fuse)
# need_fold: bool indicates if we need fold
node_map = {}
for nid, node in enumerate(gidx.nodes):
children = [node_map[e[0]] for e in node["inputs"]]
attrs = node.get("attrs", {})
node_name = node["name"]
op_name = node["op"]
get_clone = lambda c, o_n, n_n, a: getattr(nnvm.symbol, o_n)(
*c, name=n_n, **a)
new_entry = None
if op_name == "null":
new_entry = (nnvm.symbol.Variable(node_name), False)
elif op_name in ("cast", "clip"):
if children[0][1]:
new_entry = children[0]
else:
new_entry = (
get_clone([children[0][0]], op_name, node_name, attrs),
False)
elif op_name == "conv2d" and attrs["out_dtype"] == "int32":
data, weight = children
data = _clean_entry(data)
new_node = nnvm.sym.conv2d(
data[0], weight[0], name=node_name, **attrs)
new_entry = (new_node, True)
elif op_name in ("__lshift_scalar__", "__rshift_scalar__", "relu"):
new_entry = (
get_clone([children[0][0]], op_name, node_name, attrs),
children[0][1])
elif op_name in ("broadcast_add", "broadcast_mul"):
rhs = children[1][0]
lhs, _ = _clean_entry(children[0])
lhs = nnvm.sym.cast(lhs, dtype="int32")
rhs = nnvm.sym.cast(rhs, dtype="int32")
new_entry = (
get_clone([lhs, rhs], op_name, node_name, attrs),
False)
if new_entry is None:
inputs = [_clean_entry(x) for x in children]
new_entry = (
get_clone([x[0] for x in inputs], op_name, node_name, attrs),
False)
if ref_count[nid] > 1:
new_entry = _clean_entry(new_entry)
node_map[nid] = new_entry
assert len(graph.index.output_entries) == 1
ret = node_map[graph.index.output_entries[0][0]][0]
ret = nnvm.graph.create(ret)
return ret
def clean_cast(graph):
"""
Move the casts to early part of graph,
remove uncessary clip operations when possible.
"""
gidx = graph.index
node_map = {}
def _clean_cast(node, target_type):
op_name = node.attr("op_name")
if op_name == "cast":
return _clean_cast(node.get_children(), target_type)
if op_name == "relu":
data, has_clip = _clean_cast(
node.get_children(), target_type)
data = nnvm.sym.relu(data)
return data, has_clip
return nnvm.sym.cast(node, dtype=target_type), False
for nid, node in enumerate(gidx.nodes):
children = [node_map[e[0]] for e in node["inputs"]]
attrs = node.get("attrs", {})
node_name = node["name"]
op_name = node["op"]
get_clone = lambda c, o_n, n_n, a: getattr(nnvm.symbol, o_n)(
*c, name=n_n, **a)
if op_name == "null":
new_node = nnvm.symbol.Variable(node_name)
elif op_name == "cast":
dtype = attrs["dtype"]
new_node, _ = _clean_cast(children[0], dtype)
elif op_name == "conv2d" and attrs["out_dtype"] == "int32":
data, weight = children
data, _ = _clean_cast(data, "int8")
weight, _ = _clean_cast(weight, "int8")
new_node = nnvm.sym.conv2d(
data, weight, name=node_name, **attrs)
elif op_name == "elemwise_add":
lhs, rhs = children
rhs = nnvm.sym.cast(rhs, dtype="int8")
new_node = nnvm.sym.elemwise_add(lhs, rhs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
node_map[nid] = new_node
assert len(graph.index.output_entries) == 1
ret = node_map[graph.index.output_entries[0][0]]
ret = nnvm.graph.create(ret)
return ret
def pack(graph, shape_dict, bfactor, cfactor, start_name=None):
"""Pack the graph into batch&channel packed format.
Parameters
----------
graph : Graph
The input graph.
shape_dict : dict of str to shapex
The input shape.
bfactor : int
The packing factor in batch
cfactor : int
The packing factor in channel
start_name: str, optional
Start name start packing from certain known node.
Returns
-------
graph : Graph
The transformed graph.
"""
graph = graph_attr.set_shape_inputs(graph, shape_dict)
graph = graph.apply("InferShape")
shape = graph.json_attr("shape")
gidx = graph.index
node_map = {}
dset = set()
counter = 0
start_pack = False
for nid, node in enumerate(gidx.nodes):
children = [node_map[e[0]] for e in node["inputs"]]
ishape = [shape[gidx.entry_id(e)] for e in node["inputs"]]
oshape = shape[gidx.entry_id(nid, 0)]
attrs = node.get("attrs", {})
node_name = node["name"]
op_name = node["op"]
get_clone = lambda c, o_n, n_n, a: getattr(nnvm.symbol, o_n)(
*c, name=n_n, **a)
if op_name == "null":
new_node = nnvm.symbol.Variable(node_name)
if start_name and node_name == start_name:
start_pack = True
new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
elif op_name == "max_pool2d":
assert not start_pack
start_pack = True
new_node = get_clone(children, op_name, node_name, attrs)
new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
elif op_name == "global_avg_pool2d":
if start_pack:
start_pack = False
children[0] = _unpack_batch_channel(children[0], ishape[0])
new_node = getattr(nnvm.symbol, op_name)(
*children, name=node_name, **attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name == "conv2d" and attrs["out_dtype"] == "int32":
if start_pack:
attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
attrs["kernel_layout"] = "OIHW%do%di" % (cfactor, cfactor)
data, weight = children
weight = _pack_weight(weight, ishape[1], cfactor)
new_node = nnvm.sym.conv2d(
data, weight, name=node_name, **attrs)
elif counter == 1:
attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
attrs["kernel_layout"] = "OIHW%do%di" % (cfactor, cfactor)
data, weight = children
data = _pack_batch_channel(data, ishape[0], bfactor, cfactor)
weight = _pack_weight(weight, ishape[1], cfactor)
new_node = nnvm.sym.conv2d(
data, weight, name=node_name, **attrs)
new_node = _unpack_batch_channel(new_node, oshape)
counter = counter + 1
else:
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name.startswith("broadcast"):
if start_pack:
assert len(ishape[1]) == 3
children[1] = _pack_bias(children[1], ishape[1], bfactor, cfactor)
new_node = getattr(nnvm.symbol, op_name)(
*children, name=node_name, **attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name.startswith("elementwise_add"):
new_node = get_clone(children, op_name, node_name, attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
dset.add(op_name)
node_map[nid] = new_node
assert len(graph.index.output_entries) == 1
ret = node_map[graph.index.output_entries[0][0]]
if start_pack:
oshape = shape[graph.index.output_entries[0][0]]
ret = _unpack_batch_channel(ret, oshape)
graph = nnvm.graph.create(ret)
graph = graph_attr.set_shape_inputs(graph, shape_dict)
graph = graph.apply("InferShape")
return graph
|
the-stack_0_19236 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model.utils.config import cfg
from model.faster_rcnn.faster_rcnn import _fasterRCNN
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
import torch.utils.model_zoo as model_zoo
import pdb
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth',
'resnet34': 'https://s3.amazonaws.com/pytorch/models/resnet34-333f7ec4.pth',
'resnet50': 'https://s3.amazonaws.com/pytorch/models/resnet50-19c8e357.pth',
'resnet101': 'https://s3.amazonaws.com/pytorch/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://s3.amazonaws.com/pytorch/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# it is slightly better whereas slower to set stride = 1
# self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
class resnet(_fasterRCNN):
def __init__(self, classes, num_layers=101, pretrained=False, class_agnostic=False):
self.model_path = 'data/imagenet_weights/resnet50.pth'
self.dout_base_model = 1024
self.pretrained = pretrained
self.class_agnostic = class_agnostic
_fasterRCNN.__init__(self, classes, class_agnostic)
def _init_modules(self):
resnet = resnet101()
if self.pretrained == True:
print("Loading pretrained weights from %s" %(self.model_path))
state_dict = torch.load(self.model_path)
resnet.load_state_dict({k:v for k,v in state_dict.items() if k in resnet.state_dict()},strict = False)
# Build resnet.
self.RCNN_base = nn.Sequential(resnet.conv1, resnet.bn1,resnet.relu,
resnet.maxpool,resnet.layer1,resnet.layer2,resnet.layer3)
self.RCNN_top = nn.Sequential(resnet.layer4)
self.RCNN_cls_score = nn.Linear(2048, self.n_classes)
if self.class_agnostic:
self.RCNN_bbox_pred = nn.Linear(2048, 4)
else:
self.RCNN_bbox_pred = nn.Linear(2048, 4 * self.n_classes)
# Fix blocks
for p in self.RCNN_base[0].parameters(): p.requires_grad=False
for p in self.RCNN_base[1].parameters(): p.requires_grad=False
assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
if cfg.RESNET.FIXED_BLOCKS >= 3:
for p in self.RCNN_base[6].parameters(): p.requires_grad=False
if cfg.RESNET.FIXED_BLOCKS >= 2:
for p in self.RCNN_base[5].parameters(): p.requires_grad=False
if cfg.RESNET.FIXED_BLOCKS >= 1:
for p in self.RCNN_base[4].parameters(): p.requires_grad=False
def set_bn_fix(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
for p in m.parameters(): p.requires_grad=False
self.RCNN_base.apply(set_bn_fix)
self.RCNN_top.apply(set_bn_fix)
def train(self, mode=True):
# Override train so that the training mode is set as we want
nn.Module.train(self, mode)
if mode:
# Set fixed blocks to be in eval mode
self.RCNN_base.eval()
self.RCNN_base[5].train()
self.RCNN_base[6].train()
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
self.RCNN_base.apply(set_bn_eval)
self.RCNN_top.apply(set_bn_eval)
def _head_to_tail(self, pool5):
fc7 = self.RCNN_top(pool5).mean(3).mean(2)
return fc7
|
the-stack_0_19237 | from __future__ import annotations
import operator
from operator import (
le,
lt,
)
import textwrap
from typing import (
Optional,
Sequence,
Type,
TypeVar,
cast,
)
import numpy as np
from pandas._config import get_option
from pandas._libs import NaT
from pandas._libs.interval import (
VALID_CLOSED,
Interval,
IntervalMixin,
intervals_to_interval_bounds,
)
from pandas._libs.missing import NA
from pandas._typing import (
ArrayLike,
Dtype,
NpDtype,
)
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import maybe_convert_platform
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCIntervalIndex,
ABCPeriodIndex,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
notna,
)
from pandas.core.algorithms import (
isin,
take,
value_counts,
)
from pandas.core.arrays.base import (
ExtensionArray,
_extension_array_shared_docs,
)
from pandas.core.arrays.categorical import Categorical
import pandas.core.common as com
from pandas.core.construction import (
array as pd_array,
ensure_wrapped_if_datetimelike,
extract_array,
)
from pandas.core.indexers import check_array_indexer
from pandas.core.indexes.base import ensure_index
from pandas.core.ops import (
invalid_comparison,
unpack_zerodim_and_defer,
)
IntervalArrayT = TypeVar("IntervalArrayT", bound="IntervalArray")
_interval_shared_docs = {}
_shared_docs_kwargs = {
"klass": "IntervalArray",
"qualname": "arrays.IntervalArray",
"name": "",
}
_interval_shared_docs[
"class"
] = """
%(summary)s
.. versionadded:: %(versionadded)s
Parameters
----------
data : array-like (1-dimensional)
Array-like containing Interval objects from which to build the
%(klass)s.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both or
neither.
dtype : dtype or None, default None
If None, dtype will be inferred.
copy : bool, default False
Copy the input data.
%(name)s\
verify_integrity : bool, default True
Verify that the %(klass)s is valid.
Attributes
----------
left
right
closed
mid
length
is_empty
is_non_overlapping_monotonic
%(extra_attributes)s\
Methods
-------
from_arrays
from_tuples
from_breaks
contains
overlaps
set_closed
to_tuples
%(extra_methods)s\
See Also
--------
Index : The base pandas Index type.
Interval : A bounded slice-like interval; the elements of an %(klass)s.
interval_range : Function to create a fixed frequency IntervalIndex.
cut : Bin values into discrete Intervals.
qcut : Bin values into equal-sized Intervals based on rank or sample quantiles.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#intervalindex>`_
for more.
%(examples)s\
"""
@Appender(
_interval_shared_docs["class"]
% {
"klass": "IntervalArray",
"summary": "Pandas array for interval data that are closed on the same side.",
"versionadded": "0.24.0",
"name": "",
"extra_attributes": "",
"extra_methods": "",
"examples": textwrap.dedent(
"""\
Examples
--------
A new ``IntervalArray`` can be constructed directly from an array-like of
``Interval`` objects:
>>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
It may also be constructed using one of the constructor
methods: :meth:`IntervalArray.from_arrays`,
:meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`.
"""
),
}
)
class IntervalArray(IntervalMixin, ExtensionArray):
ndim = 1
can_hold_na = True
_na_value = _fill_value = np.nan
# ---------------------------------------------------------------------
# Constructors
def __new__(
cls: Type[IntervalArrayT],
data,
closed=None,
dtype: Optional[Dtype] = None,
copy: bool = False,
verify_integrity: bool = True,
):
data = extract_array(data, extract_numpy=True)
if isinstance(data, cls):
left = data._left
right = data._right
closed = closed or data.closed
else:
# don't allow scalars
if is_scalar(data):
msg = (
f"{cls.__name__}(...) must be called with a collection "
f"of some kind, {data} was passed"
)
raise TypeError(msg)
# might need to convert empty or purely na data
data = _maybe_convert_platform_interval(data)
left, right, infer_closed = intervals_to_interval_bounds(
data, validate_closed=closed is None
)
closed = closed or infer_closed
return cls._simple_new(
left,
right,
closed,
copy=copy,
dtype=dtype,
verify_integrity=verify_integrity,
)
@classmethod
def _simple_new(
cls: Type[IntervalArrayT],
left,
right,
closed=None,
copy: bool = False,
dtype: Optional[Dtype] = None,
verify_integrity: bool = True,
) -> IntervalArrayT:
result = IntervalMixin.__new__(cls)
if closed is None and isinstance(dtype, IntervalDtype):
closed = dtype.closed
closed = closed or "right"
left = ensure_index(left, copy=copy)
right = ensure_index(right, copy=copy)
if dtype is not None:
# GH 19262: dtype must be an IntervalDtype to override inferred
dtype = pandas_dtype(dtype)
if is_interval_dtype(dtype):
dtype = cast(IntervalDtype, dtype)
if dtype.subtype is not None:
left = left.astype(dtype.subtype)
right = right.astype(dtype.subtype)
else:
msg = f"dtype must be an IntervalDtype, got {dtype}"
raise TypeError(msg)
if dtype.closed is None:
# possibly loading an old pickle
dtype = IntervalDtype(dtype.subtype, closed)
elif closed != dtype.closed:
raise ValueError("closed keyword does not match dtype.closed")
# coerce dtypes to match if needed
if is_float_dtype(left) and is_integer_dtype(right):
right = right.astype(left.dtype)
elif is_float_dtype(right) and is_integer_dtype(left):
left = left.astype(right.dtype)
if type(left) != type(right):
msg = (
f"must not have differing left [{type(left).__name__}] and "
f"right [{type(right).__name__}] types"
)
raise ValueError(msg)
elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype):
# GH 19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalArray"
)
raise TypeError(msg)
elif isinstance(left, ABCPeriodIndex):
msg = "Period dtypes are not supported, use a PeriodIndex instead"
raise ValueError(msg)
elif isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz):
msg = (
"left and right must have the same time zone, got "
f"'{left.tz}' and '{right.tz}'"
)
raise ValueError(msg)
# For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray
left = ensure_wrapped_if_datetimelike(left)
left = extract_array(left, extract_numpy=True)
right = ensure_wrapped_if_datetimelike(right)
right = extract_array(right, extract_numpy=True)
lbase = getattr(left, "_ndarray", left).base
rbase = getattr(right, "_ndarray", right).base
if lbase is not None and lbase is rbase:
# If these share data, then setitem could corrupt our IA
right = right.copy()
dtype = IntervalDtype(left.dtype, closed=closed)
result._dtype = dtype
result._left = left
result._right = right
if verify_integrity:
result._validate()
return result
@classmethod
def _from_sequence(
cls: Type[IntervalArrayT],
scalars,
*,
dtype: Optional[Dtype] = None,
copy: bool = False,
) -> IntervalArrayT:
return cls(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_factorized(
cls: Type[IntervalArrayT], values: np.ndarray, original: IntervalArrayT
) -> IntervalArrayT:
if len(values) == 0:
# An empty array returns object-dtype here. We can't create
# a new IA from an (empty) object-dtype array, so turn it into the
# correct dtype.
values = values.astype(original.dtype.subtype)
return cls(values, closed=original.closed)
_interval_shared_docs["from_breaks"] = textwrap.dedent(
"""
Construct an %(klass)s from an array of splits.
Parameters
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
%(klass)s
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_arrays : Construct from a left and right array.
%(klass)s.from_tuples : Construct from a sequence of tuples.
%(examples)s\
"""
)
@classmethod
@Appender(
_interval_shared_docs["from_breaks"]
% {
"klass": "IntervalArray",
"examples": textwrap.dedent(
"""\
Examples
--------
>>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3])
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, dtype: interval[int64, right]
"""
),
}
)
def from_breaks(
cls: Type[IntervalArrayT],
breaks,
closed="right",
copy: bool = False,
dtype: Optional[Dtype] = None,
) -> IntervalArrayT:
breaks = _maybe_convert_platform_interval(breaks)
return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype)
_interval_shared_docs["from_arrays"] = textwrap.dedent(
"""
Construct from two arrays defining the left and right bounds.
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
dtype : dtype, optional
If None, dtype will be inferred.
Returns
-------
%(klass)s
Raises
------
ValueError
When a value is missing in only one of `left` or `right`.
When a value in `left` is greater than the corresponding value
in `right`.
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
%(klass)s.from_tuples : Construct an %(klass)s from an
array-like of tuples.
Notes
-----
Each element of `left` must be less than or equal to the `right`
element at the same position. If an element is missing, it must be
missing in both `left` and `right`. A TypeError is raised when
using an unsupported type for `left` or `right`. At the moment,
'category', 'object', and 'string' subtypes are not supported.
%(examples)s\
"""
)
@classmethod
@Appender(
_interval_shared_docs["from_arrays"]
% {
"klass": "IntervalArray",
"examples": textwrap.dedent(
"""\
>>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, dtype: interval[int64, right]
"""
),
}
)
def from_arrays(
cls: Type[IntervalArrayT],
left,
right,
closed="right",
copy: bool = False,
dtype: Optional[Dtype] = None,
) -> IntervalArrayT:
left = _maybe_convert_platform_interval(left)
right = _maybe_convert_platform_interval(right)
return cls._simple_new(
left, right, closed, copy=copy, dtype=dtype, verify_integrity=True
)
_interval_shared_docs["from_tuples"] = textwrap.dedent(
"""
Construct an %(klass)s from an array-like of tuples.
Parameters
----------
data : array-like (1-dimensional)
Array of tuples.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
By-default copy the data, this is compat only and ignored.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
%(klass)s
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
%(klass)s.from_arrays : Construct an %(klass)s from a left and
right array.
%(klass)s.from_breaks : Construct an %(klass)s from an array of
splits.
%(examples)s\
"""
)
@classmethod
@Appender(
_interval_shared_docs["from_tuples"]
% {
"klass": "IntervalArray",
"examples": textwrap.dedent(
"""\
Examples
--------
>>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])
<IntervalArray>
[(0, 1], (1, 2]]
Length: 2, dtype: interval[int64, right]
"""
),
}
)
def from_tuples(
cls: Type[IntervalArrayT],
data,
closed="right",
copy: bool = False,
dtype: Optional[Dtype] = None,
) -> IntervalArrayT:
if len(data):
left, right = [], []
else:
# ensure that empty data keeps input dtype
left = right = data
for d in data:
if isna(d):
lhs = rhs = np.nan
else:
name = cls.__name__
try:
# need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...]
lhs, rhs = d
except ValueError as err:
msg = f"{name}.from_tuples requires tuples of length 2, got {d}"
raise ValueError(msg) from err
except TypeError as err:
msg = f"{name}.from_tuples received an invalid item, {d}"
raise TypeError(msg) from err
left.append(lhs)
right.append(rhs)
return cls.from_arrays(left, right, closed, copy=False, dtype=dtype)
def _validate(self):
"""
Verify that the IntervalArray is valid.
Checks that
* closed is valid
* left and right match lengths
* left and right have the same missing values
* left is always below right
"""
if self.closed not in VALID_CLOSED:
msg = f"invalid option for 'closed': {self.closed}"
raise ValueError(msg)
if len(self._left) != len(self._right):
msg = "left and right must have the same length"
raise ValueError(msg)
left_mask = notna(self._left)
right_mask = notna(self._right)
if not (left_mask == right_mask).all():
msg = (
"missing values must be missing in the same "
"location both left and right sides"
)
raise ValueError(msg)
if not (self._left[left_mask] <= self._right[left_mask]).all():
msg = "left side of interval must be <= right side"
raise ValueError(msg)
def _shallow_copy(self: IntervalArrayT, left, right) -> IntervalArrayT:
"""
Return a new IntervalArray with the replacement attributes
Parameters
----------
left : Index
Values to be used for the left-side of the intervals.
right : Index
Values to be used for the right-side of the intervals.
"""
return self._simple_new(left, right, closed=self.closed, verify_integrity=False)
# ---------------------------------------------------------------------
# Descriptive
@property
def dtype(self) -> IntervalDtype:
return self._dtype
@property
def nbytes(self) -> int:
return self.left.nbytes + self.right.nbytes
@property
def size(self) -> int:
# Avoid materializing self.values
return self.left.size
# ---------------------------------------------------------------------
# EA Interface
def __iter__(self):
return iter(np.asarray(self))
def __len__(self) -> int:
return len(self._left)
def __getitem__(self, key):
key = check_array_indexer(self, key)
left = self._left[key]
right = self._right[key]
if not isinstance(left, (np.ndarray, ExtensionArray)):
# scalar
if is_scalar(left) and isna(left):
return self._fill_value
return Interval(left, right, self.closed)
# error: Argument 1 to "ndim" has incompatible type "Union[ndarray,
# ExtensionArray]"; expected "Union[Union[int, float, complex, str, bytes,
# generic], Sequence[Union[int, float, complex, str, bytes, generic]],
# Sequence[Sequence[Any]], _SupportsArray]"
if np.ndim(left) > 1: # type: ignore[arg-type]
# GH#30588 multi-dimensional indexer disallowed
raise ValueError("multi-dimensional indexing not allowed")
return self._shallow_copy(left, right)
def __setitem__(self, key, value):
value_left, value_right = self._validate_setitem_value(value)
key = check_array_indexer(self, key)
self._left[key] = value_left
self._right[key] = value_right
def _cmp_method(self, other, op):
# ensure pandas array for list-like and eliminate non-interval scalars
if is_list_like(other):
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
other = pd_array(other)
elif not isinstance(other, Interval):
# non-interval scalar -> no matches
return invalid_comparison(self, other, op)
# determine the dtype of the elements we want to compare
if isinstance(other, Interval):
other_dtype = pandas_dtype("interval")
elif not is_categorical_dtype(other.dtype):
other_dtype = other.dtype
else:
# for categorical defer to categories for dtype
other_dtype = other.categories.dtype
# extract intervals if we have interval categories with matching closed
if is_interval_dtype(other_dtype):
if self.closed != other.categories.closed:
return invalid_comparison(self, other, op)
other = other.categories.take(
other.codes, allow_fill=True, fill_value=other.categories._na_value
)
# interval-like -> need same closed and matching endpoints
if is_interval_dtype(other_dtype):
if self.closed != other.closed:
return invalid_comparison(self, other, op)
elif not isinstance(other, Interval):
other = type(self)(other)
if op is operator.eq:
return (self._left == other.left) & (self._right == other.right)
elif op is operator.ne:
return (self._left != other.left) | (self._right != other.right)
elif op is operator.gt:
return (self._left > other.left) | (
(self._left == other.left) & (self._right > other.right)
)
elif op is operator.ge:
return (self == other) | (self > other)
elif op is operator.lt:
return (self._left < other.left) | (
(self._left == other.left) & (self._right < other.right)
)
else:
# operator.lt
return (self == other) | (self < other)
# non-interval/non-object dtype -> no matches
if not is_object_dtype(other_dtype):
return invalid_comparison(self, other, op)
# object dtype -> iteratively check for intervals
result = np.zeros(len(self), dtype=bool)
for i, obj in enumerate(other):
try:
result[i] = op(self[i], obj)
except TypeError:
if obj is NA:
# comparison with np.nan returns NA
# github.com/pandas-dev/pandas/pull/37124#discussion_r509095092
result[i] = op is operator.ne
else:
raise
return result
@unpack_zerodim_and_defer("__eq__")
def __eq__(self, other):
return self._cmp_method(other, operator.eq)
@unpack_zerodim_and_defer("__ne__")
def __ne__(self, other):
return self._cmp_method(other, operator.ne)
@unpack_zerodim_and_defer("__gt__")
def __gt__(self, other):
return self._cmp_method(other, operator.gt)
@unpack_zerodim_and_defer("__ge__")
def __ge__(self, other):
return self._cmp_method(other, operator.ge)
@unpack_zerodim_and_defer("__lt__")
def __lt__(self, other):
return self._cmp_method(other, operator.lt)
@unpack_zerodim_and_defer("__le__")
def __le__(self, other):
return self._cmp_method(other, operator.le)
def argsort(
self,
ascending: bool = True,
kind: str = "quicksort",
na_position: str = "last",
*args,
**kwargs,
) -> np.ndarray:
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
if ascending and kind == "quicksort" and na_position == "last":
return np.lexsort((self.right, self.left))
# TODO: other cases we can use lexsort for? much more performant.
return super().argsort(
ascending=ascending, kind=kind, na_position=na_position, **kwargs
)
def fillna(
self: IntervalArrayT, value=None, method=None, limit=None
) -> IntervalArrayT:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should be either Interval objects or NA/NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
(Not implemented yet for IntervalArray)
Method to use for filling holes in reindexed Series
limit : int, default None
(Not implemented yet for IntervalArray)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : IntervalArray with NA/NaN filled
"""
if method is not None:
raise TypeError("Filling by method is not supported for IntervalArray.")
if limit is not None:
raise TypeError("limit is not supported for IntervalArray.")
value_left, value_right = self._validate_fill_value(value)
left = self.left.fillna(value=value_left)
right = self.right.fillna(value=value_right)
return self._shallow_copy(left, right)
def astype(self, dtype, copy: bool = True):
"""
Cast to an ExtensionArray or NumPy array with dtype 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ExtensionArray or ndarray
ExtensionArray or NumPy ndarray with 'dtype' for its dtype.
"""
from pandas import Index
from pandas.core.arrays.string_ import StringDtype
if dtype is not None:
dtype = pandas_dtype(dtype)
if is_interval_dtype(dtype):
if dtype == self.dtype:
return self.copy() if copy else self
# need to cast to different subtype
try:
# We need to use Index rules for astype to prevent casting
# np.nan entries to int subtypes
new_left = Index(self._left, copy=False).astype(dtype.subtype)
new_right = Index(self._right, copy=False).astype(dtype.subtype)
except TypeError as err:
msg = (
f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible"
)
raise TypeError(msg) from err
return self._shallow_copy(new_left, new_right)
elif is_categorical_dtype(dtype):
return Categorical(np.asarray(self), dtype=dtype)
elif isinstance(dtype, StringDtype):
return dtype.construct_array_type()._from_sequence(self, copy=False)
# TODO: This try/except will be repeated.
try:
return np.asarray(self).astype(dtype, copy=copy)
except (TypeError, ValueError) as err:
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg) from err
def equals(self, other) -> bool:
if type(self) != type(other):
return False
return bool(
self.closed == other.closed
and self.left.equals(other.left)
and self.right.equals(other.right)
)
@classmethod
def _concat_same_type(
cls: Type[IntervalArrayT], to_concat: Sequence[IntervalArrayT]
) -> IntervalArrayT:
"""
Concatenate multiple IntervalArray
Parameters
----------
to_concat : sequence of IntervalArray
Returns
-------
IntervalArray
"""
closed = {interval.closed for interval in to_concat}
if len(closed) != 1:
raise ValueError("Intervals must all be closed on the same side.")
closed = closed.pop()
left = np.concatenate([interval.left for interval in to_concat])
right = np.concatenate([interval.right for interval in to_concat])
return cls._simple_new(left, right, closed=closed, copy=False)
def copy(self: IntervalArrayT) -> IntervalArrayT:
"""
Return a copy of the array.
Returns
-------
IntervalArray
"""
left = self._left.copy()
right = self._right.copy()
closed = self.closed
# TODO: Could skip verify_integrity here.
return type(self).from_arrays(left, right, closed=closed)
def isna(self) -> np.ndarray:
return isna(self._left)
def shift(
self: IntervalArrayT, periods: int = 1, fill_value: object = None
) -> IntervalArray:
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
# ExtensionArray.shift doesn't work for two reasons
# 1. IntervalArray.dtype.na_value may not be correct for the dtype.
# 2. IntervalArray._from_sequence only accepts NaN for missing values,
# not other values like NaT
empty_len = min(abs(periods), len(self))
if isna(fill_value):
from pandas import Index
fill_value = Index(self._left, copy=False)._na_value
empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1))
else:
empty = self._from_sequence([fill_value] * empty_len)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods) :]
b = empty
return self._concat_same_type([a, b])
def take(
self: IntervalArrayT,
indices,
*,
allow_fill: bool = False,
fill_value=None,
axis=None,
**kwargs,
) -> IntervalArrayT:
"""
Take elements from the IntervalArray.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : Interval or NA, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
axis : any, default None
Present for compat with IntervalIndex; does nothing.
Returns
-------
IntervalArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
"""
nv.validate_take((), kwargs)
fill_left = fill_right = fill_value
if allow_fill:
fill_left, fill_right = self._validate_fill_value(fill_value)
left_take = take(
self._left, indices, allow_fill=allow_fill, fill_value=fill_left
)
right_take = take(
self._right, indices, allow_fill=allow_fill, fill_value=fill_right
)
return self._shallow_copy(left_take, right_take)
def _validate_listlike(self, value):
# list-like of intervals
try:
array = IntervalArray(value)
self._check_closed_matches(array, name="value")
value_left, value_right = array.left, array.right
except TypeError as err:
# wrong type: not interval or NA
msg = f"'value' should be an interval type, got {type(value)} instead."
raise TypeError(msg) from err
try:
self.left._validate_fill_value(value_left)
except (ValueError, TypeError) as err:
msg = (
"'value' should be a compatible interval type, "
f"got {type(value)} instead."
)
raise TypeError(msg) from err
return value_left, value_right
def _validate_scalar(self, value):
if isinstance(value, Interval):
self._check_closed_matches(value, name="value")
left, right = value.left, value.right
elif is_valid_na_for_dtype(value, self.left.dtype):
# GH#18295
left = right = value
else:
raise TypeError(
"can only insert Interval objects and NA into an IntervalArray"
)
return left, right
def _validate_fill_value(self, value):
return self._validate_scalar(value)
def _validate_setitem_value(self, value):
needs_float_conversion = False
if is_valid_na_for_dtype(value, self.left.dtype):
# na value: need special casing to set directly on numpy arrays
if is_integer_dtype(self.dtype.subtype):
# can't set NaN on a numpy integer array
needs_float_conversion = True
elif is_datetime64_dtype(self.dtype.subtype):
# need proper NaT to set directly on the numpy array
value = np.datetime64("NaT")
elif is_datetime64tz_dtype(self.dtype.subtype):
# need proper NaT to set directly on the DatetimeArray array
value = NaT
elif is_timedelta64_dtype(self.dtype.subtype):
# need proper NaT to set directly on the numpy array
value = np.timedelta64("NaT")
value_left, value_right = value, value
elif isinstance(value, Interval):
# scalar interval
self._check_closed_matches(value, name="value")
value_left, value_right = value.left, value.right
self.left._validate_fill_value(value_left)
self.left._validate_fill_value(value_right)
else:
return self._validate_listlike(value)
if needs_float_conversion:
raise ValueError("Cannot set float NaN to integer-backed IntervalArray")
return value_left, value_right
def value_counts(self, dropna: bool = True):
"""
Returns a Series containing counts of each interval.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
# TODO: implement this is a non-naive way!
return value_counts(np.asarray(self), dropna=dropna)
# ---------------------------------------------------------------------
# Rendering Methods
def _format_data(self) -> str:
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10)
formatter = str
if n == 0:
summary = "[]"
elif n == 1:
first = formatter(self[0])
summary = f"[{first}]"
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = f"[{first}, {last}]"
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
head_str = ", ".join(head)
tail_str = ", ".join(tail)
summary = f"[{head_str} ... {tail_str}]"
else:
tail = [formatter(x) for x in self]
tail_str = ", ".join(tail)
summary = f"[{tail_str}]"
return summary
def __repr__(self) -> str:
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
data = self._format_data()
class_name = f"<{type(self).__name__}>\n"
template = f"{class_name}{data}\nLength: {len(self)}, dtype: {self.dtype}"
return template
def _format_space(self) -> str:
space = " " * (len(type(self).__name__) + 1)
return f"\n{space}"
# ---------------------------------------------------------------------
# Vectorized Interval Properties/Attributes
@property
def left(self):
"""
Return the left endpoints of each Interval in the IntervalArray as
an Index.
"""
from pandas import Index
return Index(self._left, copy=False)
@property
def right(self):
"""
Return the right endpoints of each Interval in the IntervalArray as
an Index.
"""
from pandas import Index
return Index(self._right, copy=False)
@property
def length(self):
"""
Return an Index with entries denoting the length of each Interval in
the IntervalArray.
"""
try:
return self.right - self.left
except TypeError as err:
# length not defined for some types, e.g. string
msg = (
"IntervalArray contains Intervals without defined length, "
"e.g. Intervals with string endpoints"
)
raise TypeError(msg) from err
@property
def mid(self):
"""
Return the midpoint of each Interval in the IntervalArray as an Index.
"""
try:
return 0.5 * (self.left + self.right)
except TypeError:
# datetime safe version
return self.left + 0.5 * self.length
_interval_shared_docs["overlaps"] = textwrap.dedent(
"""
Check elementwise if an Interval overlaps the values in the %(klass)s.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
.. versionadded:: 0.24.0
Parameters
----------
other : %(klass)s
Interval to check against for an overlap.
Returns
-------
ndarray
Boolean array positionally indicating where an overlap occurs.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
Examples
--------
%(examples)s
>>> intervals.overlaps(pd.Interval(0.5, 1.5))
array([ True, True, False])
Intervals that share closed endpoints overlap:
>>> intervals.overlaps(pd.Interval(1, 3, closed='left'))
array([ True, True, True])
Intervals that only have an open endpoint in common do not overlap:
>>> intervals.overlaps(pd.Interval(1, 2, closed='right'))
array([False, True, False])
"""
)
@Appender(
_interval_shared_docs["overlaps"]
% {
"klass": "IntervalArray",
"examples": textwrap.dedent(
"""\
>>> data = [(0, 1), (1, 3), (2, 4)]
>>> intervals = pd.arrays.IntervalArray.from_tuples(data)
>>> intervals
<IntervalArray>
[(0, 1], (1, 3], (2, 4]]
Length: 3, dtype: interval[int64, right]
"""
),
}
)
def overlaps(self, other):
if isinstance(other, (IntervalArray, ABCIntervalIndex)):
raise NotImplementedError
elif not isinstance(other, Interval):
msg = f"`other` must be Interval-like, got {type(other).__name__}"
raise TypeError(msg)
# equality is okay if both endpoints are closed (overlap at a point)
op1 = le if (self.closed_left and other.closed_right) else lt
op2 = le if (other.closed_left and self.closed_right) else lt
# overlaps is equivalent negation of two interval being disjoint:
# disjoint = (A.left > B.right) or (B.left > A.right)
# (simplifying the negation allows this to be done in less operations)
return op1(self.left, other.right) & op2(other.left, self.right)
# ---------------------------------------------------------------------
@property
def closed(self):
"""
Whether the intervals are closed on the left-side, right-side, both or
neither.
"""
return self.dtype.closed
_interval_shared_docs["set_closed"] = textwrap.dedent(
"""
Return an %(klass)s identical to the current one, but closed on the
specified side.
.. versionadded:: 0.24.0
Parameters
----------
closed : {'left', 'right', 'both', 'neither'}
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
new_index : %(klass)s
%(examples)s\
"""
)
@Appender(
_interval_shared_docs["set_closed"]
% {
"klass": "IntervalArray",
"examples": textwrap.dedent(
"""\
Examples
--------
>>> index = pd.arrays.IntervalArray.from_breaks(range(4))
>>> index
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, dtype: interval[int64, right]
>>> index.set_closed('both')
<IntervalArray>
[[0, 1], [1, 2], [2, 3]]
Length: 3, dtype: interval[int64, both]
"""
),
}
)
def set_closed(self: IntervalArrayT, closed) -> IntervalArrayT:
if closed not in VALID_CLOSED:
msg = f"invalid option for 'closed': {closed}"
raise ValueError(msg)
return type(self)._simple_new(
left=self._left, right=self._right, closed=closed, verify_integrity=False
)
_interval_shared_docs[
"is_non_overlapping_monotonic"
] = """
Return True if the %(klass)s is non-overlapping (no Intervals share
points) and is either monotonic increasing or monotonic decreasing,
else False.
"""
# https://github.com/python/mypy/issues/1362
# Mypy does not support decorated properties
@property # type: ignore[misc]
@Appender(
_interval_shared_docs["is_non_overlapping_monotonic"] % _shared_docs_kwargs
)
def is_non_overlapping_monotonic(self) -> bool:
# must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
# or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
# we already require left <= right
# strict inequality for closed == 'both'; equality implies overlapping
# at a point when both sides of intervals are included
if self.closed == "both":
return bool(
(self._right[:-1] < self._left[1:]).all()
or (self._left[:-1] > self._right[1:]).all()
)
# non-strict inequality when closed != 'both'; at least one side is
# not included in the intervals, so equality does not imply overlapping
return bool(
(self._right[:-1] <= self._left[1:]).all()
or (self._left[:-1] >= self._right[1:]).all()
)
# ---------------------------------------------------------------------
# Conversion
def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
"""
Return the IntervalArray's data as a numpy array of Interval
objects (with dtype='object')
"""
left = self._left
right = self._right
mask = self.isna()
closed = self.closed
result = np.empty(len(left), dtype=object)
for i in range(len(left)):
if mask[i]:
result[i] = np.nan
else:
result[i] = Interval(left[i], right[i], closed)
return result
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import ArrowIntervalType
try:
subtype = pyarrow.from_numpy_dtype(self.dtype.subtype)
except TypeError as err:
raise TypeError(
f"Conversion to arrow with subtype '{self.dtype.subtype}' "
"is not supported"
) from err
interval_type = ArrowIntervalType(subtype, self.closed)
storage_array = pyarrow.StructArray.from_arrays(
[
pyarrow.array(self._left, type=subtype, from_pandas=True),
pyarrow.array(self._right, type=subtype, from_pandas=True),
],
names=["left", "right"],
)
mask = self.isna()
if mask.any():
# if there are missing values, set validity bitmap also on the array level
null_bitmap = pyarrow.array(~mask).buffers()[1]
storage_array = pyarrow.StructArray.from_buffers(
storage_array.type,
len(storage_array),
[null_bitmap],
children=[storage_array.field(0), storage_array.field(1)],
)
if type is not None:
if type.equals(interval_type.storage_type):
return storage_array
elif isinstance(type, ArrowIntervalType):
# ensure we have the same subtype and closed attributes
if not type.equals(interval_type):
raise TypeError(
"Not supported to convert IntervalArray to type with "
f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) "
f"and 'closed' ({self.closed} vs {type.closed}) attributes"
)
else:
raise TypeError(
f"Not supported to convert IntervalArray to '{type}' type"
)
return pyarrow.ExtensionArray.from_storage(interval_type, storage_array)
_interval_shared_docs[
"to_tuples"
] = """
Return an %(return_type)s of tuples of the form (left, right).
Parameters
----------
na_tuple : bool, default True
Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA
value itself if False, ``nan``.
Returns
-------
tuples: %(return_type)s
%(examples)s\
"""
@Appender(
_interval_shared_docs["to_tuples"] % {"return_type": "ndarray", "examples": ""}
)
def to_tuples(self, na_tuple=True) -> np.ndarray:
tuples = com.asarray_tuplesafe(zip(self._left, self._right))
if not na_tuple:
# GH 18756
tuples = np.where(~self.isna(), tuples, np.nan)
return tuples
# ---------------------------------------------------------------------
def putmask(self, mask: np.ndarray, value) -> None:
value_left, value_right = self._validate_setitem_value(value)
if isinstance(self._left, np.ndarray):
np.putmask(self._left, mask, value_left)
np.putmask(self._right, mask, value_right)
else:
self._left.putmask(mask, value_left)
self._right.putmask(mask, value_right)
def delete(self: IntervalArrayT, loc) -> IntervalArrayT:
if isinstance(self._left, np.ndarray):
new_left = np.delete(self._left, loc)
new_right = np.delete(self._right, loc)
else:
new_left = self._left.delete(loc)
new_right = self._right.delete(loc)
return self._shallow_copy(left=new_left, right=new_right)
@Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs)
def repeat(self: IntervalArrayT, repeats: int, axis=None) -> IntervalArrayT:
nv.validate_repeat((), {"axis": axis})
left_repeat = self.left.repeat(repeats)
right_repeat = self.right.repeat(repeats)
return self._shallow_copy(left=left_repeat, right=right_repeat)
_interval_shared_docs["contains"] = textwrap.dedent(
"""
Check elementwise if the Intervals contain the value.
Return a boolean mask whether the value is contained in the Intervals
of the %(klass)s.
.. versionadded:: 0.25.0
Parameters
----------
other : scalar
The value to check whether it is contained in the Intervals.
Returns
-------
boolean array
See Also
--------
Interval.contains : Check whether Interval object contains value.
%(klass)s.overlaps : Check if an Interval overlaps the values in the
%(klass)s.
Examples
--------
%(examples)s
>>> intervals.contains(0.5)
array([ True, False, False])
"""
)
@Appender(
_interval_shared_docs["contains"]
% {
"klass": "IntervalArray",
"examples": textwrap.dedent(
"""\
>>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)])
>>> intervals
<IntervalArray>
[(0, 1], (1, 3], (2, 4]]
Length: 3, dtype: interval[int64, right]
"""
),
}
)
def contains(self, other):
if isinstance(other, Interval):
raise NotImplementedError("contains not implemented for two intervals")
return (self._left < other if self.open_left else self._left <= other) & (
other < self._right if self.open_right else other <= self._right
)
def isin(self, values) -> np.ndarray:
if not hasattr(values, "dtype"):
values = np.array(values)
values = extract_array(values, extract_numpy=True)
if is_interval_dtype(values.dtype):
if self.closed != values.closed:
# not comparable -> no overlap
return np.zeros(self.shape, dtype=bool)
if is_dtype_equal(self.dtype, values.dtype):
# GH#38353 instead of casting to object, operating on a
# complex128 ndarray is much more performant.
left = self._combined.view("complex128")
right = values._combined.view("complex128")
return np.in1d(left, right)
elif needs_i8_conversion(self.left.dtype) ^ needs_i8_conversion(
values.left.dtype
):
# not comparable -> no overlap
return np.zeros(self.shape, dtype=bool)
return isin(self.astype(object), values.astype(object))
@property
def _combined(self) -> ArrayLike:
left = self.left._values.reshape(-1, 1)
right = self.right._values.reshape(-1, 1)
if needs_i8_conversion(left.dtype):
comb = left._concat_same_type([left, right], axis=1)
else:
comb = np.concatenate([left, right], axis=1)
return comb
def _maybe_convert_platform_interval(values) -> ArrayLike:
"""
Try to do platform conversion, with special casing for IntervalArray.
Wrapper around maybe_convert_platform that alters the default return
dtype in certain cases to be compatible with IntervalArray. For example,
empty lists return with integer dtype instead of object dtype, which is
prohibited for IntervalArray.
Parameters
----------
values : array-like
Returns
-------
array
"""
if isinstance(values, (list, tuple)) and len(values) == 0:
# GH 19016
# empty lists/tuples get object dtype by default, but this is
# prohibited for IntervalArray, so coerce to integer instead
return np.array([], dtype=np.int64)
elif not is_list_like(values) or isinstance(values, ABCDataFrame):
# This will raise later, but we avoid passing to maybe_convert_platform
return values
elif is_categorical_dtype(values):
values = np.asarray(values)
elif not hasattr(values, "dtype") and not isinstance(values, (list, tuple, range)):
# TODO: should we just cast these to list?
return values
else:
values = extract_array(values, extract_numpy=True)
return maybe_convert_platform(values)
|
the-stack_0_19239 | from typing import Dict, Iterable, List
from explainaboard.constants import Source, FileType
from explainaboard.tasks import TaskType
from .loader import register_loader
from .loader import Loader
@register_loader(TaskType.qa_multiple_choice)
class QAMultipleChoiceLoader(Loader):
"""
Validate and Reformat system output file with json format:
"head \t relation \t trueTail": [predTail1, predTail2, predTail3, predTail4, predTail5],
usage:
please refer to `test_loaders.py`
"""
def __init__(self, source: Source, file_type: FileType, data: str = None):
if source is None:
source = Source.local_filesystem
if file_type is None:
file_type = FileType.json
self._source = source
self._file_type = file_type
self._data = data
def load(self) -> Iterable[Dict]:
"""
:param path_system_output: the path of system output file with following format:
"head \t relation \t trueTail": [predTail1, predTail2, predTail3, predTail4, predTail5],
:return: class object
"""
super().load()
data: List[Dict] = []
if self._file_type == FileType.json:
for id, data_info in enumerate(self._raw_data):
data.append(
{
"id": str(id), # should be string type
"context": data_info["context"],
"question": data_info["question"],
"answers": data_info["answers"],
"predicted_answers": data_info["predicted_answers"],
}
)
else:
raise NotImplementedError
return data
|
the-stack_0_19240 | import pytest
import requests
@pytest.fixture(scope='session')
def auth0_access_token():
creds = {
'connection': 'Username-Password-Authentication',
'scope': 'openid',
'client_id': 'fucNqQ1x5rSFOjXNqtm0NWzzxG1g1xVs', # AUTH0: CLIENT ID
'grant_type': 'password',
'username': '[email protected]', # AUTH0: AUTOMATED TEST CREDENTIALS
'password': 'curateme'
}
url = 'https://clingen.auth0.com/oauth/ro' # AUTH0: LOGIN DOMAIN
try:
res = requests.post(url, data=creds)
res.raise_for_status()
except Exception as e:
pytest.skip("Error retrieving auth0 test user access token: %r" % e)
data = res.json()
if 'access_token' not in data:
pytest.skip("Missing 'access_token' in auth0 test user access token: %r" % data)
return data['access_token']
@pytest.fixture(scope='session')
def auth0_encode_user_token(auth0_access_token):
return {'accessToken': auth0_access_token}
@pytest.fixture(scope='session')
def auth0_encode_user_profile(auth0_access_token):
user_url = "https://{domain}/userinfo?access_token={access_token}" \
.format(domain='clingen.auth0.com', access_token=auth0_access_token) # AUTH0: LOGIN DOMAIN
user_info = requests.get(user_url).json()
return user_info
def test_login_no_csrf(anontestapp, auth0_encode_user_token):
res = anontestapp.post_json('/login', auth0_encode_user_token, status=400)
assert 'Set-Cookie' in res.headers
def test_login_unknown_user(anontestapp, auth0_encode_user_token):
res = anontestapp.get('/session')
csrf_token = str(res.json['_csrft_'])
headers = {'X-CSRF-Token': csrf_token}
res = anontestapp.post_json('/login', auth0_encode_user_token, headers=headers, status=503)
assert 'Set-Cookie' in res.headers
def test_login_logout(testapp, anontestapp, auth0_encode_user_token, auth0_encode_user_profile):
# Create a user with the persona email
url = '/users/'
email = auth0_encode_user_profile['email']
item = {
'email': email,
'first_name': 'Auth0',
'last_name': 'Test User',
'user_status': 'active'
}
testapp.post_json(url, item, status=201)
# Log in
res = anontestapp.get('/session')
csrf_token = str(res.json['_csrft_'])
headers = {'X-CSRF-Token': csrf_token}
res = anontestapp.post_json('/login', auth0_encode_user_token, headers=headers, status=200)
assert 'Set-Cookie' in res.headers
res = anontestapp.get('/session')
assert res.json['auth.userid'] == email
# Log out
res = anontestapp.get('/logout?redirect=false', headers=headers, status=200)
assert 'Set-Cookie' in res.headers
res = anontestapp.get('/session')
assert 'auth.userid' not in res.json
|
the-stack_0_19241 | import midi
from midiutil import MIDIFile
degrees = [60, 62, 64, 65, 67, 69, 71, 72] # MIDI note number
track = 0
channel = 0
time = 0 # In beats
duration = 1 # In beats
tempo = 60 # In BPM
volume = 100 # 0-127, as per the MIDI standard
MyMIDI = MIDIFile(1) # One track, defaults to format 1 (tempo track is created
# automatically)
MyMIDI.addTempo(track, time, tempo)
for i, pitch in enumerate(degrees):
MyMIDI.addNote(track, channel, pitch, time + i, duration, volume)
with open("major-scale.mid", "wb") as output_file:
MyMIDI.writeFile(output_file) |
the-stack_0_19242 | import logging
import os
from torch import nn
from farm.modeling.language_model import LanguageModel
from farm.modeling.prediction_head import PredictionHead, BertLMHead
from farm.utils import MLFlowLogger as MlLogger
logger = logging.getLogger(__name__)
class AdaptiveModel(nn.Module):
""" Contains all the modelling needed for your NLP task. Combines a language model and a prediction head.
Allows for gradient flow back to the language model component."""
def __init__(
self,
language_model,
prediction_heads,
embeds_dropout_prob,
lm_output_types,
device,
):
"""
:param language_model: Any model that turns token ids into vector representations
:type language_model: LanguageModel
:param prediction_heads: A list of models that take embeddings and return logits for a given task
:type prediction_heads: list
:param embeds_dropout_prob: The probability that a value in the embeddings returned by the
language model will be zeroed.
:param embeds_dropout_prob: float
:param lm_output_types: How to extract the embeddings from the final layer of the language model. When set
to "per_token", one embedding will be extracted per input token. If set to
"per_sequence", a single embedding will be extracted to represent the full
input sequence. Can either be a single string, or a list of strings,
one for each prediction head.
:type lm_output_types: list or str
:param device: The device on which this model will operate. Either "cpu" or "cuda".
"""
super(AdaptiveModel, self).__init__()
self.language_model = language_model.to(device)
self.prediction_heads = nn.ModuleList([ph.to(device) for ph in prediction_heads])
# set shared weights for LM finetuning
for head in self.prediction_heads:
if head.model_type == "language_modelling":
head.set_shared_weights(language_model.model.embeddings.word_embeddings.weight)
self.num_labels = [head.num_labels for head in prediction_heads]
self.dropout = nn.Dropout(embeds_dropout_prob)
self.lm_output_types = (
[lm_output_types] if isinstance(lm_output_types, str) else lm_output_types
)
self.log_params()
def save(self, save_dir):
"""
Saves the language model and prediction heads. This will generate a config file
and model weights for each.
:param save_dir: path to save to
:type save_dir: str
"""
os.makedirs(save_dir, exist_ok=True)
self.language_model.save(save_dir)
for i, ph in enumerate(self.prediction_heads):
ph.save(save_dir, i)
# Need to save config and pipeline
@classmethod
def load(cls, load_dir, device):
"""
Loads an AdaptiveModel from a directory. The directory must contain:
* language_model.bin
* language_model_config.json
* prediction_head_X.bin multiple PH possible
* prediction_head_X_config.json
* processor_config.json config for transforming input
* vocab.txt vocab file for language model, turning text to Wordpiece Tokens
:param load_dir: location where adaptive model is stored
:type load_dir: str
:param device: to which device we want to sent the model, either cpu or cuda
:type device: torch.device
"""
# Language Model
language_model = LanguageModel.load(load_dir)
# Prediction heads
_, ph_config_files = cls._get_prediction_head_files(load_dir)
prediction_heads = []
ph_output_type = []
for config_file in ph_config_files:
head = PredictionHead.load(config_file)
# # set shared weights between LM and PH
# if type(head) == BertLMHead:
# head.set_shared_weights(language_model)
prediction_heads.append(head)
ph_output_type.append(head.ph_output_type)
return cls(language_model, prediction_heads, 0.1, ph_output_type, device)
def logits_to_loss_per_head(self, logits, **kwargs):
"""
Collect losses from each prediction head.
:param logits: logits, can vary in shape and type, depending on task.
:type logits: object
:return: The per sample per prediciton head loss whose first two dimensions have length n_pred_heads, batch_size
"""
all_losses = []
for head, logits_for_one_head in zip(self.prediction_heads, logits):
all_losses.append(head.logits_to_loss(logits=logits_for_one_head, **kwargs))
return all_losses
def logits_to_loss(self, logits, **kwargs):
"""
Get losses from all prediction heads & reduce to single loss *per sample*.
:param logits: logits, can vary in shape and type, depending on task
:type logits: object
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
:return loss: torch.tensor that is the per sample loss (len: batch_size)
"""
all_losses = self.logits_to_loss_per_head(logits, **kwargs)
loss = sum(all_losses)
return loss
def logits_to_preds(self, logits, label_maps, **kwargs):
"""
Get predictions from all prediction heads.
:param logits: logits, can vary in shape and type, depending on task
:type logits: object
:param label_maps: Maps from label encoding to label string
:param label_maps: dict
:return: A list of all predictions from all prediction heads
"""
all_preds = []
# collect preds from all heads
for head, logits_for_head, label_map_for_head in zip(
self.prediction_heads, logits, label_maps
):
preds = head.logits_to_preds(
logits=logits_for_head, label_map=label_map_for_head, **kwargs
)
all_preds.append(preds)
return all_preds
def prepare_labels(self, label_maps, **kwargs):
"""
Label conversion to original label space, per prediction head.
:param label_maps: dictionary for mapping ids to label strings
:type label_maps: dict[int:str]
:return: labels in the right format
"""
all_labels = []
for head, label_map_one_head in zip(self.prediction_heads, label_maps):
labels = head.prepare_labels(label_map=label_map_one_head, **kwargs)
all_labels.append(labels)
return all_labels
def formatted_preds(self, logits, label_maps, **kwargs):
"""
Format predictions for inference.
:param logits: model logits
:type logits: torch.tensor
:param label_maps: dictionary for mapping ids to label strings
:type label_maps: dict[int:str]
:param kwargs: placeholder for passing generic parameters
:type kwargs: object
:return: predictions in the right format
"""
all_preds = []
# collect preds from all heads
for head, logits_for_head, label_map_for_head in zip(
self.prediction_heads, logits, label_maps
):
preds = head.formatted_preds(
logits=logits_for_head, label_map=label_map_for_head, **kwargs
)
all_preds.append(preds)
return all_preds
def forward(self, **kwargs):
"""
Push data through the whole model and returns logits. The data will propagate through the language
model and each of the attached prediction heads.
:param kwargs: Holds all arguments that need to be passed to the language model and prediction head(s).
:return: all logits as torch.tensor or multiple tensors.
"""
# Run language model
sequence_output, pooled_output = self.language_model(
**kwargs, output_all_encoded_layers=False
)
# Run (multiple) prediction heads
all_logits = []
for head, lm_out in zip(self.prediction_heads, self.lm_output_types):
# Choose relevant vectors from LM as output and perform dropout
if lm_out == "per_token":
output = self.dropout(sequence_output)
elif lm_out == "per_sequence":
output = self.dropout(pooled_output)
elif (
lm_out == "per_token_squad"
): # we need a per_token_squad because of variable metric computation later on...
output = self.dropout(sequence_output)
else:
raise ValueError(
"Unknown extraction strategy from language model: {}".format(lm_out)
)
# Do the actual forward pass of a single head
all_logits.append(head(output))
return all_logits
@classmethod
def _get_prediction_head_files(cls, load_dir):
files = os.listdir(load_dir)
model_files = [
os.path.join(load_dir, f)
for f in files
if ".bin" in f and "prediction_head" in f
]
config_files = [
os.path.join(load_dir, f)
for f in files
if "config.json" in f and "prediction_head" in f
]
# sort them to get correct order in case of multiple prediction heads
model_files.sort()
config_files.sort()
error_str = (
"There is a mismatch in number of model files and config files. "
"This might be because the Language Model Prediction Head "
"does not currently support saving and loading"
)
assert len(model_files) == len(config_files), error_str
logger.info(f"Found files for loading {len(model_files)} prediction heads")
return model_files, config_files
def log_params(self):
"""
Logs paramteres to generic logger MlLogger
"""
params = {
"lm_type": self.language_model.__class__.__name__,
"lm_name": self.language_model.name,
"prediction_heads": ",".join(
[head.__class__.__name__ for head in self.prediction_heads]
),
"lm_output_types": ",".join(self.lm_output_types),
}
try:
MlLogger.log_params(params)
except Exception as e:
logger.warning(f"ML logging didn't work: {e}")
|
the-stack_0_19243 | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import logging
import uuid
import threading
import time
from contextlib import contextmanager
from collections import OrderedDict
from collections import namedtuple
from datetime import datetime
from datetime import timezone
from guppy import hpy
import tensorflow_io # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from tensorflow.compat.v1 import gfile
import psutil
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common import data_join_service_pb2 as dj_pb
DataBlockSuffix = '.data'
DataBlockMetaSuffix = '.meta'
RawDataMetaPrefix = 'raw_data_'
RawDataPubSuffix = '.pub'
InvalidExampleId = ''.encode()
TmpFileSuffix = '.tmp'
DoneFileSuffix = '.done'
RawDataFileSuffix = '.rd'
InvalidEventTime = -9223372036854775808
InvalidRawId = ''.encode() # deprecated in V2
InvalidBytes = ''.encode()
InvalidInt = -1
SYNC_ALLOWED_OPTIONAL_FIELDS = [
'example_id', 'event_time', 'id_type', 'event_time_deep', 'type',
'click_id']
# must: both old and new version of raw data should have this field
ALLOWED_FIELD = namedtuple('ALLOW_FIELD', ['default_value', 'type', 'must'])
ALLOWED_FIELDS = dict({
'example_id': ALLOWED_FIELD(InvalidExampleId, bytes, True),
'event_time': ALLOWED_FIELD(InvalidEventTime, int, False),
'index': ALLOWED_FIELD(InvalidInt, int, False),
'event_time_deep': ALLOWED_FIELD(InvalidEventTime, int, False),
'raw_id': ALLOWED_FIELD(InvalidRawId, bytes, False),
'type': ALLOWED_FIELD(InvalidBytes, bytes, False),
'id_type': ALLOWED_FIELD(InvalidBytes, bytes, False),
'joined': ALLOWED_FIELD(InvalidInt, int, False),
'click_id': ALLOWED_FIELD(InvalidBytes, bytes, False),
'req_id': ALLOWED_FIELD(InvalidBytes, bytes, False),
'label': ALLOWED_FIELD(InvalidInt, int, False),
'cid': ALLOWED_FIELD(InvalidBytes, bytes, False)
})
@contextmanager
def make_tf_record_iter(fpath, options=None):
record_iter = None
expt = None
try:
record_iter = tf.io.tf_record_iterator(fpath, options)
yield record_iter
except Exception as e: # pylint: disable=broad-except
logging.warning("Failed make tf_record_iterator for "\
"%s, reason %s", fpath, e)
expt = e
if record_iter is not None:
del record_iter
if expt is not None:
raise expt
def partition_repr(partition_id):
return 'partition_{:04}'.format(partition_id)
def encode_data_block_meta_fname(data_source_name,
partition_id,
data_block_index):
return '{}.{}.{:08}{}'.format(
data_source_name, partition_repr(partition_id),
data_block_index, DataBlockMetaSuffix
)
def encode_block_id(data_source_name, meta):
return '{}.{}.{:08}.{}-{}'.format(
data_source_name, partition_repr(meta.partition_id),
meta.data_block_index, meta.start_time, meta.end_time
)
def decode_block_id(block_id):
segs = block_id.split('.')
if len(segs) != 4:
raise ValueError("{} invalid. Segmenet of block_id split "\
"by . shoud be 4".format(block_id))
data_source_name = segs[0]
partition_id = int(segs[1][len('partition_'):])
data_block_index = int(segs[2])
time_frame_segs = segs[3].split('-')
if len(time_frame_segs) != 2:
raise ValueError("{} invalid. Segmenet of time frame split "
"by - should be 2".format(block_id))
start_time, end_time = int(time_frame_segs[0]), int(time_frame_segs[1])
return {"data_source_name": data_source_name,
"partition_id": partition_id,
"data_block_index": data_block_index,
"time_frame": (start_time, end_time)}
def encode_data_block_fname(data_source_name, meta):
block_id = encode_block_id(data_source_name, meta)
return '{}{}'.format(block_id, DataBlockSuffix)
def load_data_block_meta(meta_fpath):
assert meta_fpath.endswith(DataBlockMetaSuffix)
if not gfile.Exists(meta_fpath):
return None
with make_tf_record_iter(meta_fpath) as fitr:
return text_format.Parse(next(fitr).decode(), dj_pb.DataBlockMeta())
def data_source_kvstore_base_dir(data_source_name):
return os.path.join('data_source', data_source_name)
def retrieve_data_source(kvstore, data_source_name):
kvstore_key = data_source_kvstore_base_dir(data_source_name)
raw_data = kvstore.get_data(kvstore_key)
if raw_data is None:
raise ValueError("kvstore master key is None for {}".format(
data_source_name)
)
return text_format.Parse(raw_data, common_pb.DataSource())
def commit_data_source(kvstore, data_source):
kvstore_key = \
data_source_kvstore_base_dir(data_source.data_source_meta.name)
kvstore.set_data(kvstore_key, text_format.MessageToString(data_source))
def partition_manifest_kvstore_key(data_source_name, partition_id):
return os.path.join(data_source_kvstore_base_dir(data_source_name),
'raw_data_dir', partition_repr(partition_id))
def raw_data_meta_kvstore_key(data_source_name, partition_id, process_index):
manifest_kvstore_key = partition_manifest_kvstore_key(data_source_name,
partition_id)
return os.path.join(manifest_kvstore_key,
'{}{:08}'.format(RawDataMetaPrefix, process_index))
def example_id_anchor_kvstore_key(data_source_name, partition_id):
db_base_dir = data_source_kvstore_base_dir(data_source_name)
return os.path.join(db_base_dir, 'dumped_example_id_anchor',
partition_repr(partition_id))
def raw_data_pub_kvstore_key(pub_base_dir, partition_id, process_index):
return os.path.join(pub_base_dir, partition_repr(partition_id),
'{:08}{}'.format(process_index, RawDataPubSuffix))
_valid_basic_feature_type = (int, str, float)
def convert_dict_to_tf_example(src_dict):
assert isinstance(src_dict, dict)
tf_feature = {}
for key, feature in src_dict.items():
if not isinstance(key, str):
raise RuntimeError('the key {}({}) of dict must a '\
'string'.format(key, type(key)))
basic_type = type(feature)
# Due to all fields' value are type of str in csv format,
# we try best to convert the digital string into numerical value.
if basic_type == str and (
(key in ALLOWED_FIELDS and ALLOWED_FIELDS[key].type != bytes) or
key not in ALLOWED_FIELDS):
if feature.lstrip('-').isdigit():
feature = int(feature)
basic_type = int
else:
try:
feature = float(feature)
basic_type = float
except ValueError as e:
if key in ALLOWED_FIELDS:
raise ValueError(
'%s should be numerical instead of str'%key)
if isinstance(type(feature), list):
if len(feature) == 0:
logging.debug('skip %s since feature is empty list', key)
continue
basic_type = feature[0]
if not all(isinstance(x, basic_type) for x in feature):
raise RuntimeError('type of elements in feature of key {} '\
'is not the same'.format(key))
if not isinstance(feature, _valid_basic_feature_type):
raise RuntimeError("feature type({}) of key {} is not support "\
"for tf Example".format(basic_type, key))
if basic_type == int:
value = feature if isinstance(feature, list) else [feature]
tf_feature[key] = tf.train.Feature(
int64_list=tf.train.Int64List(value=value))
elif basic_type == str:
value = [feat.encode() for feat in feature] if \
isinstance(feature, list) else [feature.encode()]
tf_feature[key] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=value))
else:
assert basic_type == float
value = feature if isinstance(feature, list) else [feature]
tf_feature[key] = tf.train.Feature(
float_list=tf.train.FloatList(value=value))
return tf.train.Example(features=tf.train.Features(feature=tf_feature))
def convert_tf_example_to_dict(src_tf_example):
assert isinstance(src_tf_example, tf.train.Example)
dst_dict = OrderedDict()
tf_feature = src_tf_example.features.feature
for key, feat in tf_feature.items():
if feat.HasField('int64_list'):
csv_val = [item for item in feat.int64_list.value] # pylint: disable=unnecessary-comprehension
elif feat.HasField('bytes_list'):
csv_val = [item for item in feat.bytes_list.value] # pylint: disable=unnecessary-comprehension
elif feat.HasField('float_list'):
csv_val = [item for item in feat.float_list.value] #pylint: disable=unnecessary-comprehension
else:
assert False, "feat type must in int64, byte, float"
assert isinstance(csv_val, list)
dst_dict[key] = csv_val[0] if len(csv_val) == 1 else csv_val
return dst_dict
def int2bytes(digit, byte_len, byteorder='little'):
return int(digit).to_bytes(byte_len, byteorder)
def bytes2int(byte, byteorder='little'):
return int.from_bytes(byte, byteorder)
def gen_tmp_fpath(fdir):
return os.path.join(fdir, str(uuid.uuid1())+TmpFileSuffix)
def portal_kvstore_base_dir(portal_name):
return os.path.join('portal', portal_name)
def portal_job_kvstore_key(portal_name, job_id):
return os.path.join(portal_kvstore_base_dir(portal_name), 'job_dir',
'{:08}.pj'.format(job_id))
def portal_job_part_kvstore_key(portal_name, job_id, partition_id):
return os.path.join(portal_job_kvstore_key(portal_name, job_id),
partition_repr(partition_id))
def portal_map_output_dir(map_base_dir, job_id):
return os.path.join(map_base_dir, 'map_{:08}'.format(job_id))
def portal_reduce_output_dir(reduce_base_dir, job_id):
return os.path.join(reduce_base_dir, 'reduce_{:08}'.format(job_id))
def data_source_data_block_dir(data_source):
return os.path.join(data_source.output_base_dir, 'data_block')
def data_source_example_dumped_dir(data_source):
return os.path.join(data_source.output_base_dir, 'example_dump')
class Singleton(type):
_instances = {}
_lck = threading.Lock()
def __call__(cls, *args, **kwargs):
with cls._lck:
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args,
**kwargs)
return cls._instances[cls]
class _MemUsageProxy(object, metaclass=Singleton):
def __init__(self):
self._lock = threading.Lock()
self._mem_limit = int(os.environ.get('MEM_LIMIT', '17179869184'))
self._reserved_mem = int(self._mem_limit * 0.5)
if self._reserved_mem >= 2 << 30:
self._reserved_mem = 2 << 30
self._rss_mem_usage = 0
self._rss_updated_tm = 0
def check_heap_mem_water_level(self, heap_mem_usage, water_level_percent):
with self._lock:
avail_mem = self._mem_limit - self._reserved_mem
return heap_mem_usage >= avail_mem * water_level_percent
def check_rss_mem_water_level(self, water_level_percent):
avail_mem = self._mem_limit - self._reserved_mem
return self._update_rss_mem_usage() >= avail_mem * water_level_percent
def get_heap_mem_usage(self):
return hpy().heap().size
def _update_rss_mem_usage(self):
with self._lock:
if time.time() - self._rss_updated_tm >= 0.25:
self._rss_mem_usage = psutil.Process().memory_info().rss
self._rss_updated_tm = time.time()
return self._rss_mem_usage
def _get_mem_usage_proxy():
return _MemUsageProxy()
class _HeapMemStats(object, metaclass=Singleton):
class StatsRecord(object):
def __init__(self, potential_mem_incr, stats_expiration_time):
self._lock = threading.Lock()
self._potential_mem_incr = potential_mem_incr
self._stats_expiration_time = stats_expiration_time
self._stats_ts = 0
self._heap_mem_usage = 0
def stats_expiration(self):
return self._stats_ts <= 0 or \
(self._stats_expiration_time is not None and
time.time() - self._stats_ts >= \
self._stats_expiration_time)
def update_stats(self):
with self._lock:
if self.stats_expiration():
self._heap_mem_usage = \
_get_mem_usage_proxy().get_heap_mem_usage()
self._stats_ts = time.time()
def get_heap_mem_usage(self):
return self._heap_mem_usage + self._potential_mem_incr
def __init__(self, stats_expiration_time):
self._lock = threading.Lock()
self._stats_granular = 0
self._stats_start_key = None
self._stats_expiration_time = stats_expiration_time
self._stats_map = {}
def CheckOomRisk(self, stats_key,
water_level_percent,
potential_mem_incr=0):
if not self._need_heap_stats(stats_key):
return False
inner_key = self._gen_inner_stats_key(stats_key)
sr = None
with self._lock:
if inner_key not in self._stats_map:
inner_key = self._gen_inner_stats_key(stats_key)
self._stats_map[inner_key] = \
_HeapMemStats.StatsRecord(potential_mem_incr,
self._stats_expiration_time)
sr = self._stats_map[inner_key]
if not sr.stats_expiration():
return _get_mem_usage_proxy().check_heap_mem_water_level(
sr.get_heap_mem_usage(),
water_level_percent
)
assert sr is not None
sr.update_stats()
return _get_mem_usage_proxy().check_heap_mem_water_level(
sr.get_heap_mem_usage(), water_level_percent
)
def _gen_inner_stats_key(self, stats_key):
return int(stats_key // self._stats_granular * self._stats_granular)
def _need_heap_stats(self, stats_key):
with self._lock:
if self._stats_granular <= 0 and \
_get_mem_usage_proxy().check_rss_mem_water_level(0.5):
self._stats_granular = stats_key // 16
self._stats_start_key = stats_key // 2
if self._stats_granular <= 0:
self._stats_granular = 1
logging.warning('auto turing the heap stats granular as %d',
self._stats_granular)
return self._stats_granular > 0 and \
stats_key >= self._stats_start_key
def get_heap_mem_stats(stats_expiration_time):
return _HeapMemStats(stats_expiration_time)
def interval_to_timestamp(itv):
unit = ["Y", "M", "D", "H", "N", "S"]
multiple = [3600*24*30*12, 3600*24*30, 3600*24, 3600, 60, 1]
unit_order, unit_no = {}, {}
for i, item in enumerate(unit):
unit_order[item] = len(unit) - i
s_no = ""
prv_order = len(unit) + 1
for c in itv:
if c.isdigit():
s_no += c
else:
c = c.upper()
if c not in unit_order or prv_order <= unit_order[c]:
return None
unit_no[c] = s_no
prv_order = unit_order[c]
s_no = ""
tmstmp = 0
if len(s_no) > 0 and "S" not in unit_no:
unit_no["S"] = s_no
for i, item in enumerate(unit):
if item in unit_no:
tmstmp += int(unit_no[item]) * multiple[i]
return tmstmp
def timestamp_check_valid(iso_dt):
if iso_dt.year > 3000:
return False
return True
def convert_to_iso_format(value):
"""
Args:
value: bytes | str | int | float. Value to be converted. Expected to
be a numeric in the format of yyyymmdd or yyyymmddhhnnss.
Returns: str.
Try to convert a datetime str or numeric to iso format datetime str.
First try to convert based on the length of str. If it does not
match any datetime format supported, convert the value assuming it
is a timestamp. If the value is not a timestamp, return iso format
of timestamp=0.
"""
assert isinstance(value, (bytes, str, int, float))
if isinstance(value, bytes):
value = value.decode()
elif isinstance(value, (int, float)):
value = str(value)
# first try to parse datetime from value
try:
if len(value) == 8:
iso = datetime.strptime(value, '%Y%m%d').isoformat()
elif len(value) == 14:
iso = datetime.strptime(value, '%Y%m%d%H%M%S').isoformat()
else:
raise ValueError
return iso
except ValueError: # Not fitting any of above patterns
# then try to convert directly
try:
iso = datetime.fromtimestamp(float(value))
if not timestamp_check_valid(iso):
raise ValueError
except ValueError as e: # might be a non-number str
logging.warning('OPTIONAL_STATS: unable to parse event time %s, '
'defaults to 0. error: %s', value, repr(e))
iso = datetime.fromtimestamp(0)
return iso.isoformat()
def convert_to_str(value):
if isinstance(value, bytes):
value = value.decode()
return str(value)
def _parse_hh_mm_ss_ff(tstr):
# Parses things of the form HH[:MM[:SS[.fff[fff]]]]
len_str = len(tstr)
time_comps = [0, 0, 0, 0]
pos = 0
for comp in range(0, 3):
if (len_str - pos) < 2:
raise ValueError('Incomplete time component')
time_comps[comp] = int(tstr[pos:pos+2])
pos += 2
next_char = tstr[pos:pos+1]
if not next_char or comp >= 2:
break
if next_char != ':':
raise ValueError('Invalid time separator: %c' % next_char)
pos += 1
if pos < len_str:
#pylint: disable=no-else-raise
if tstr[pos] != '.':
raise ValueError('Invalid microsecond component')
else:
pos += 1
len_remainder = len_str - pos
if len_remainder not in (3, 6):
raise ValueError('Invalid microsecond component')
time_comps[3] = int(tstr[pos:])
if len_remainder == 3:
time_comps[3] *= 1000
return time_comps
def _parse_isoformat_time(tstr):
# Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]]
len_str = len(tstr)
if len_str < 2:
raise ValueError('Isoformat time too short')
# This is equivalent to re.search('[+-]', tstr), but faster
tz_pos = (tstr.find('-') + 1 or tstr.find('+') + 1)
timestr = tstr[:tz_pos-1] if tz_pos > 0 else tstr
time_comps = _parse_hh_mm_ss_ff(timestr)
tzi = None
if tz_pos > 0:
tzstr = tstr[tz_pos:]
# Valid time zone strings are:
# HH:MM len: 5
# HH:MM:SS len: 8
# HH:MM:SS.ffffff len: 15
if len(tzstr) not in (5, 8, 15):
raise ValueError('Malformed time zone string')
tz_comps = _parse_hh_mm_ss_ff(tzstr)
if all(x == 0 for x in tz_comps):
tzi = timezone.utc
else:
tzsign = -1 if tstr[tz_pos - 1] == '-' else 1
td = datetime.timedelta(hours=tz_comps[0], minutes=tz_comps[1],
seconds=tz_comps[2], microseconds=tz_comps[3])
tzi = timezone(tzsign * td)
time_comps.append(tzi)
return time_comps
# Helpers for parsing the result of isoformat()
def _parse_isoformat_date(dtstr):
# It is assumed that this function will only be called with a
# string of length exactly 10, and (though this is not used) ASCII-only
year = int(dtstr[0:4])
if dtstr[4] != '-':
raise ValueError('Invalid date separator: %s' % dtstr[4])
month = int(dtstr[5:7])
if dtstr[7] != '-':
raise ValueError('Invalid date separator')
day = int(dtstr[8:10])
return [year, month, day]
def datetime_from_isformat(date_string):
"""Construct a datetime from the output of datetime.isoformat().
backported from Python3.7
"""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
# Split this at the separator
dstr = date_string[0:10]
tstr = date_string[11:]
try:
date_components = _parse_isoformat_date(dstr)
except ValueError:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
if tstr:
try:
time_components = _parse_isoformat_time(tstr)
except ValueError:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
else:
time_components = [0, 0, 0, 0, None]
return datetime(*(date_components + time_components))
def time_diff(minuend, sub):
"""minuend and sub should be same time format and must be legal numeric.
"""
ts_minuend = datetime_from_isformat(
convert_to_iso_format(minuend)).timestamp()
ts_sub = datetime_from_isformat(convert_to_iso_format(sub)).timestamp()
return ts_minuend - ts_sub
|
the-stack_0_19246 | import argparse
import numpy as np
import os
import sys
import tabulate
import time
import torch
import torch.nn.functional as F
import data
import models
import utils
parser = argparse.ArgumentParser(description='FGE training')
parser.add_argument('--dir', type=str, default='/tmp/fge/', metavar='DIR',
help='training directory (default: /tmp/fge)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true',
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default=None, metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num-workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default=None, metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--ckpt', type=str, default=None, metavar='CKPT',
help='checkpoint to eval (default: None)')
parser.add_argument('--epochs', type=int, default=20, metavar='N',
help='number of epochs to train (default: 20)')
parser.add_argument('--cycle', type=int, default=4, metavar='N',
help='number of epochs to train (default: 4)')
parser.add_argument('--lr_1', type=float, default=0.05, metavar='LR1',
help='initial learning rate (default: 0.05)')
parser.add_argument('--lr_2', type=float, default=0.0001, metavar='LR2',
help='initial learning rate (default: 0.0001)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
args = parser.parse_args()
assert args.cycle % 2 == 0, 'Cycle length should be even'
os.makedirs(args.dir, exist_ok=True)
with open(os.path.join(args.dir, 'fge.sh'), 'w') as f:
f.write(' '.join(sys.argv))
f.write('\n')
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test
)
architecture = getattr(models, args.model)
model = architecture.base(num_classes=num_classes, **architecture.kwargs)
criterion = F.cross_entropy
checkpoint = torch.load(args.ckpt)
start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model_state'])
model.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
lr=args.lr_1,
momentum=args.momentum,
weight_decay=args.wd
)
optimizer.load_state_dict(checkpoint['optimizer_state'])
ensemble_size = 0
predictions_sum = np.zeros((len(loaders['test'].dataset), num_classes))
columns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'te_nll', 'te_acc', 'ens_acc', 'time']
for epoch in range(args.epochs):
time_ep = time.time()
lr_schedule = utils.cyclic_learning_rate(epoch, args.cycle, args.lr_1, args.lr_2)
train_res = utils.train(loaders['train'], model, optimizer, criterion, lr_schedule=lr_schedule)
test_res = utils.test(loaders['test'], model, criterion)
time_ep = time.time() - time_ep
predictions, targets = utils.predictions(loaders['test'], model)
ens_acc = None
if (epoch % args.cycle + 1) == args.cycle // 2:
ensemble_size += 1
predictions_sum += predictions
ens_acc = 100.0 * np.mean(np.argmax(predictions_sum, axis=1) == targets)
if (epoch + 1) % (args.cycle // 2) == 0:
utils.save_checkpoint(
args.dir,
start_epoch + epoch,
name='fge',
model_state=model.state_dict(),
optimizer_state=optimizer.state_dict()
)
values = [epoch, lr_schedule(1.0), train_res['loss'], train_res['accuracy'], test_res['nll'],
test_res['accuracy'], ens_acc, time_ep]
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='9.4f')
if epoch % 40 == 0:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
print(table)
|
the-stack_0_19247 | from pyspark.sql import SparkSession
from dask.distributed import Client
from DataBase import createSchema as createSchema
from Configuration import Settings as Settings
from Runner import runTest
from Utils import Execution
from EndToEndTests import aggregationsWithoutGroupByTest,\
unifyTablesTest, useLimitTest
from EndToEndTests import commonTableExpressionsTest
from EndToEndTests import fullOuterJoinsTest as fullOuterJoinsTest
from EndToEndTests import groupByTest as groupByTest
from EndToEndTests import orderbyTest as orderbyTest
from EndToEndTests import whereClauseTest as whereClauseTest
from EndToEndTests import innerJoinsTest as innerJoinsTest
from EndToEndTests import unionTest as unionTest
from EndToEndTests import leftOuterJoinsTest as leftOuterJoinsTest
from EndToEndTests import nonEquiJoinsTest
from EndToEndTests import unaryOpsTest as unaryOpsTest
from EndToEndTests import GroupByWitoutAggregations
from EndToEndTests import countWithoutGroupByTest
from EndToEndTests import coalesceTest as coalesceTest
from EndToEndTests import columnBasisTest as columnBasisTest
from EndToEndTests import nestedQueriesTest
from EndToEndTests import stringTests
from EndToEndTests import dateTest
from EndToEndTests import timestampTest
from EndToEndTests import predicatesWithNulls
#from EndToEndTests import countDistincTest
from EndToEndTests import tablesFromPandasTest
from EndToEndTests import loadDataTest
from EndToEndTests import booleanTest
from EndToEndTests import dirTest
from EndToEndTests import fileSystemLocalTest
from EndToEndTests import fileSystemS3Test
from EndToEndTests import fileSystemGSTest
from EndToEndTests import simpleDistributionTest
from EndToEndTests import wildCardTest
from EndToEndTests import caseTest
from EndToEndTests import bindableAliasTest
from EndToEndTests import castTest
from EndToEndTests import concatTest
from EndToEndTests import likeTest
from EndToEndTests import literalTest
from EndToEndTests import substringTest
from EndToEndTests import tpchQueriesTest
from EndToEndTests import timestampdiffTest
from EndToEndTests import concatTest
from EndToEndTests import roundTest
from Utils import gpuMemory, init_context
from pynvml import *
from blazingsql import BlazingContext
from Configuration import ExecutionMode
def main():
print('**init end2end**')
Execution.getArgs()
nvmlInit()
dir_data_file = Settings.data['TestSettings']['dataDirectory']
nRals = Settings.data['RunSettings']['nRals']
drill = "drill"
spark = "spark"
compareResults = True
if 'compare_results' in Settings.data['RunSettings']:
compareResults = Settings.data['RunSettings']['compare_results']
if (Settings.execution_mode == ExecutionMode.FULL and compareResults == "true") or Settings.execution_mode == ExecutionMode.GENERATOR:
# Create Table Drill ------------------------------------------------------------------------------------------------------
from pydrill.client import PyDrill
drill = PyDrill(host = 'localhost', port = 8047)
createSchema.init_drill_schema(drill, Settings.data['TestSettings']['dataDirectory'], bool_test=True)
# Create Table Spark ------------------------------------------------------------------------------------------------------
spark = SparkSession.builder.appName("allE2ETest").getOrCreate()
createSchema.init_spark_schema(spark, Settings.data['TestSettings']['dataDirectory'])
#Create Context For BlazingSQL
bc, dask_client = init_context()
targetTestGroups = Settings.data['RunSettings']['targetTestGroups']
runAllTests = (len(targetTestGroups) == 0) # if targetTestGroups was empty the user wants to run all the tests
if runAllTests or ("aggregationsWithoutGroupByTest" in targetTestGroups):
aggregationsWithoutGroupByTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("coalesceTest" in targetTestGroups):
coalesceTest.main(dask_client, drill, dir_data_file, bc, nRals) #we are not supporting coalesce yet
if runAllTests or ("columnBasisTest" in targetTestGroups):
columnBasisTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("commonTableExpressionsTest" in targetTestGroups):
commonTableExpressionsTest.main(dask_client, drill, dir_data_file, bc, nRals)
#countDistincTest.main(dask_client, drill, dir_data_file, bc) #we are not supporting count distinct yet
if runAllTests or ("countWithoutGroupByTest" in targetTestGroups):
countWithoutGroupByTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("dateTest" in targetTestGroups):
dateTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("timestampTest" in targetTestGroups):
timestampTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("fullOuterJoinsTest" in targetTestGroups):
fullOuterJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("groupByTest" in targetTestGroups):
groupByTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("GroupByWitoutAggregations" in targetTestGroups):
GroupByWitoutAggregations.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("innerJoinsTest" in targetTestGroups):
innerJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("" in targetTestGroups):
leftOuterJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("nonEquiJoinsTest" in targetTestGroups):
nonEquiJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
#loadDataTest.main(dask_client, bc) #check this
if runAllTests or ("nestedQueriesTest" in targetTestGroups):
nestedQueriesTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("orderbyTest" in targetTestGroups):
orderbyTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("predicatesWithNulls" in targetTestGroups):
predicatesWithNulls.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("stringTests" in targetTestGroups):
stringTests.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("tablesFromPandasTest" in targetTestGroups):
tablesFromPandasTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("unaryOpsTest" in targetTestGroups):
unaryOpsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("unifyTablesTest" in targetTestGroups):
unifyTablesTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("unionTest" in targetTestGroups):
unionTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("useLimitTest" in targetTestGroups):
useLimitTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("whereClauseTest" in targetTestGroups):
whereClauseTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("bindableAliasTest" in targetTestGroups):
bindableAliasTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("booleanTest" in targetTestGroups):
booleanTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("caseTest" in targetTestGroups):
caseTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("castTest" in targetTestGroups):
castTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("concatTest" in targetTestGroups):
concatTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("literalTest" in targetTestGroups):
literalTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("dirTest" in targetTestGroups):
dirTest.main(dask_client, drill, dir_data_file, bc, nRals)
#fileSystemHdfsTest.main(dask_client, drill, dir_data_file, bc) #HDFS is not working yet
#mixedFileSystemTest.main(dask_client, drill, dir_data_file, bc) #HDFS is not working yet
if runAllTests or ("likeTest" in targetTestGroups):
likeTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("simpleDistributionTest" in targetTestGroups):
simpleDistributionTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("substringTest" in targetTestGroups):
substringTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("wildCardTest" in targetTestGroups):
wildCardTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("tpchQueriesTest" in targetTestGroups):
tpchQueriesTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("roundTest" in targetTestGroups):
roundTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("fileSystemLocalTest" in targetTestGroups):
fileSystemLocalTest.main(dask_client, drill, dir_data_file, bc, nRals)
if Settings.execution_mode != ExecutionMode.GPUCI:
if runAllTests or ("fileSystemS3Test" in targetTestGroups):
fileSystemS3Test.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("fileSystemGSTest" in targetTestGroups):
fileSystemGSTest.main(dask_client, drill, dir_data_file, bc, nRals)
#timestampdiffTest.main(dask_client, spark, dir_data_file, bc, nRals)
if Settings.execution_mode != ExecutionMode.GENERATOR:
result, error_msgs = runTest.save_log(Settings.execution_mode == ExecutionMode.GPUCI)
max = 0
for i in range(0, len(Settings.memory_list)):
if (Settings.memory_list[i].delta) > max:
max = Settings.memory_list[i].delta
print("MAX DELTA: " + str(max))
print('*******************************************************************************')
for i in range(0, len(Settings.memory_list)):
print(Settings.memory_list[i].name + ":" +
" Start Mem: " + str(Settings.memory_list[i].start_mem) +
" End Mem: " + str(Settings.memory_list[i].end_mem) +
" Diff: " + str(Settings.memory_list[i].delta))
return result, error_msgs
return True, []
if __name__ == '__main__':
import time
start = time.time() # in seconds
result, error_msgs = main()
if Settings.execution_mode != ExecutionMode.GENERATOR:
# NOTE kahro william percy mario : here we tell to gpuci there was an error comparing with historic results
# TODO william kharoly felipe we should try to enable and use this function in the future
result = True
if result == False:
for error_msg in error_msgs:
print(error_msg)
import sys
end = time.time() # in seconds
elapsed = end - start # in seconds
time_delta_desc = str(elapsed/60) + " minutes and " + str(int(elapsed) % 60) + " seconds"
print("==>> E2E FAILED against previous run, total time was: " + time_delta_desc)
# TODO percy kharo willian: uncomment this line when gpuci has all the env vars set
#sys.exit(1) # return error exit status to the command prompt (shell)
|
the-stack_0_19248 | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import subprocess
from ote import REID_TOOLS
def collect_accuracy(path):
""" Collects accuracy values in log file. """
r1 = None
r5 = None
mAP = None
r1_content = 'Rank-1 '
r5_content = 'Rank-5 '
map_content = 'mAP:'
with open(path) as input_stream:
for line in input_stream:
candidate = line.strip()
if r1_content in candidate:
r1 = float(candidate.split(':')[-1].replace('%', ''))
elif r5_content in candidate:
r5 = float(candidate.split(':')[-1].replace('%', ''))
elif map_content in candidate:
mAP = float(candidate.split(':')[-1].replace('%', ''))
return r1, r5, mAP
def mean_accuracy_eval(config_path, work_dir, snapshot, update_config, **kwargs):
""" Computes mean accuracy. """
def get_topk_dict(value, k=1):
return {
'key': 'accuracy', 'value': value, 'unit': '%', 'display_name': f'Top-{k} accuracy'
}
outputs = []
if 'data.root' not in update_config:
logging.warning('Passed empty path to data root folder. '
'Skipping accuracy calculation.')
outputs.append(get_topk_dict(None, 1))
outputs.append(get_topk_dict(None, 5))
else:
test_py_stdout = os.path.join(work_dir, 'test_py_stdout')
subprocess.run(
f'python {REID_TOOLS}/main.py'
f' --config-file {config_path}'
f' test.evaluate True'
f' {update_config}'
f' | tee {test_py_stdout}',
check=True, shell=True
)
r1, r5, _ = collect_accuracy(os.path.join(work_dir, 'test_py_stdout'))
outputs.append(get_topk_dict(r1, 1))
outputs.append(get_topk_dict(r5, 5))
return outputs
|
the-stack_0_19252 | import torch
import numpy as np
import cv2
import os
from torchvision import transforms
from PIL import Image
# from .base_dataset import BaseDataset#, NoriBaseDataset
from torch.utils.data import Dataset, DataLoader
import pickle as pkl
from scipy import ndimage
import torchvision.transforms.functional as TF
import torch.nn.functional as F
import random
import PIL.ImageCms
ALLMASKTYPES = ['bbox', 'seg', 'random_bbox', 'random_free_form', 'val']
def transform_train(image, type):
if random.random() > 0.5:
image = TF.hflip(image)
angle = random.choice([0,90,180,270])
image = TF.rotate(image, angle, expand=True)
# Random crop 3/4 of the images
# if type == 'img':
if type == 'img' and random.random() <= 0.75:
w,h = image.size
width = random.randint(256,w)
height = random.randint(256,h)
# width = 256
# height = 256
left = random.randint(0,w-width)
top = random.randint(0,h-height)
image = TF.crop(image, top, left, height, width)
image = TF.to_tensor(image)
return image
class InpaintDataset(Dataset):
"""
Dataset for Inpainting task
Params:
img_flist_path(str): The file which contains img file path list (e.g. test.flist)
mask_flist_paths_dict(dict): The dict contain the files which contains the pkl or xml file path for
generate mask. And the key represent the mask type (e.g. {"bbox":"bbox_flist.txt", "seg":..., "random":None})
resize_shape(tuple): The shape of the final image (default:(256,256))
transforms_oprs(list) : Determine which transformation used on the imgae (default:['random_crop', 'to_tensor'])
random_bbox_shape(tuple): if use random bbox mask, it define the shape of the mask (default:(32,32))
random_bbox_margin(tuple): if use random bbox, it define the margin of the bbox which means the distance between the mask and the margin of the image
(default:(64,64))
Return:
img, *mask
"""
def __init__(self, img_flist_path, mode='val', img_size=256,
resize_shape=(256, 256),
transforms_oprs=['to_tensor'], #transforms_oprs=['random_crop', 'to_tensor'],
# random_bbox_shape=(32, 32), random_bbox_margin=(64, 64),
random_bbox_shape=(64, 64), random_bbox_margin=(1, 1),
random_ff_setting={'img_shape':[256,256],'mv':5, 'ma':4.0, 'ml':40, 'mbw':10}, random_bbox_number=6):
test_dir = sorted(os.listdir(img_flist_path))
self.img_paths = []
self.mask_paths = []
for file_name in test_dir:
if file_name.endswith('masked.jpg'):
self.img_paths.append(os.path.join(img_flist_path, file_name))
else:
self.mask_paths.append(os.path.join(img_flist_path, file_name))
# with open(img_flist_path, 'r') as f:
# self.img_paths = f.read().splitlines()
# self.mask_paths = {}
# for mask_type in mask_flist_paths_dict:
# assert mask_type in ALLMASKTYPES
# if 'random' in mask_type:
# self.mask_paths[mask_type] = ['' for i in self.img_paths]
# else:
# with open(mask_flist_paths_dict[mask_type]) as f:
# self.mask_paths[mask_type] = f.read().splitlines()
self.mode = mode
self.img_size = img_size
self.resize_shape = resize_shape
self.random_bbox_shape = random_bbox_shape
self.random_bbox_margin = random_bbox_margin
self.random_ff_setting = random_ff_setting
self.random_bbox_number = random_bbox_number
self.transform_val = transforms.Compose([
# transforms.ColorJitter(brightness=0, contrast=0, saturation=0.1, hue=0),
transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB
])
def loader(self, **args):
return DataLoader(dataset=self, **args)
def __len__(self):
return len(self.img_paths)
def __getitem__(self, index):
# create the paths for images and masks
img_path = self.img_paths[index]
if self.mode=='val':
mask_index = index
else: # Pick a random mask during training
mask_index = random.randint(0, len(self.img_paths)-1)
image_name = os.path.basename(img_path)
mask_paths = self.mask_paths[mask_index]
# mask_paths = {}
# for mask_type in self.mask_paths:
# mask_paths[mask_type] = self.mask_paths[mask_index]
if self.mode=='val':
img = Image.open(img_path)
img = self.transform_val(img)
# input_img = Image.open('color.jpg')
# img.save('recolor.jpg',
# format = 'JPEG', quality = 100, icc_profile = input_img.info.get('icc_profile',''))
# img = self.transform_val(Image.open('recolor.jpg'))
img = (img/0.5 - 1) # Make image in range [-1,1]
# mask, original_mask = self.read_mask(mask_paths['val'], 'val')
mask, original_mask = self.read_mask(mask_paths, 'val')
mask = self.transform_val(mask).type(torch.FloatTensor)[:1, :,:]
original_mask = self.transform_val(original_mask).type(torch.FloatTensor)[:1, :,:]
else:
img = transform_train(Image.open(img_path), 'img')
mask_type = random.choice(['val','random_bbox','random_free_form','val'])
# mask = self.read_mask('TrainImgs/train_masks/001.jpg', 'val')
# mask = self.read_mask(mask_paths['val'], 'random_free_form')
# mask.save('expanded_masks/'+image_name)
# mask = transform_train(self.read_mask('TrainImgs/train_masks/001.jpg', 'random_bbox'),'mask')
# mask = {mask_type:(transform_train(self.read_mask(mask_paths[mask_type], mask_type), 'mask')).type(torch.FloatTensor)[:1, :,:] for mask_type in mask_paths}
if(mask_type=='val'):
# mask, original_mask = self.read_mask(mask_paths[mask_type], mask_type)
mask, _ = self.read_mask(mask_paths[mask_type], mask_type)
mask = (transform_train(mask, 'mask')).type(torch.FloatTensor)[:1, :,:]
# original_mask = (transform_train(original_mask, 'mask')).type(torch.FloatTensor)[:1, :,:]
else:
mask = (transform_train(self.read_mask('', mask_type), 'mask')).type(torch.FloatTensor)[:1, :,:]
# original_mask = mask.copy()
# Rescale the images
img = torch.unsqueeze(img,0)
mask = torch.unsqueeze(mask,0)
# original_mask = torch.unsqueeze(original_mask,0)
align_corners = True
img = (img/0.5 - 1) # Make image in range [-1,1]
img = F.interpolate(img, self.img_size, mode='bicubic', align_corners=align_corners)
img = img.clamp(min=-1, max=1)
mask = F.interpolate(mask, self.img_size, mode='bicubic', align_corners=align_corners)
mask = (mask > 0).type(torch.FloatTensor)
# original_mask = F.interpolate(original_mask, self.img_size, mode='bicubic', align_corners=align_corners)
# original_mask = (original_mask > 0).type(torch.FloatTensor)
# original_mask = torch.squeeze(original_mask,0)
img = torch.squeeze(img, 0)
mask = torch.squeeze(mask,0)
# These parameters are not used during training
original_mask = 1
return img, mask, original_mask, image_name, img_path
def read_mask(self, path, mask_type):
"""
Read Masks now only support bbox
"""
if mask_type == 'random_bbox':
bboxs = []
for i in range(self.random_bbox_number):
bbox = InpaintDataset.random_bbox(self.resize_shape, self.random_bbox_margin, self.random_bbox_shape)
bboxs.append(bbox)
elif mask_type == 'random_free_form':
mask = InpaintDataset.random_ff_mask(self.random_ff_setting)
return Image.fromarray(np.tile(mask,(1,1,3)).astype(np.uint8))
elif 'val' in mask_type:
mask, original_mask = InpaintDataset.read_val_mask(path)
return Image.fromarray(np.tile(mask,(1,1,3)).astype(np.uint8)), Image.fromarray(np.tile(original_mask,(1,1,3)).astype(np.uint8))
mask = InpaintDataset.bbox2mask(bboxs, self.resize_shape)
return Image.fromarray(np.tile(mask,(1,1,3)).astype(np.uint8))
@staticmethod
def read_val_mask(path):
"""
Read masks from val mask data
"""
if path.endswith("pkl"):
mask = pkl.load(open(path, 'rb'))
else:
mask = Image.open(path)
mask = np.array(mask).astype(np.uint8)
# Keep it in range of [0,255], required for when using ToTensor()
mask[mask > 128] = 255
mask = np.invert(mask)
mask[mask > 0] = 1
original_mask = mask.copy()
# Remove small holes in mask
mask = ndimage.binary_dilation(mask).astype(mask.dtype)
mask = ndimage.binary_dilation(mask).astype(mask.dtype)
mask = ndimage.binary_erosion(mask).astype(mask.dtype)
mask = ndimage.binary_erosion(mask).astype(mask.dtype)
# Add images to keep information on he corner of the images.
mask = original_mask + mask
mask *= 255
original_mask *= 255
mask = np.expand_dims(mask, axis=2)
original_mask = np.expand_dims(original_mask, axis=2)
return mask, original_mask
@staticmethod
def read_bbox(path):
"""
The general method for read bbox file by juding the file type
Return:
bbox:[y, x, height, width], shape: (height, width)
"""
if filename[-3:] == 'pkl' and 'Human' in filename:
return InpaintDataset.read_bbox_ch(filename)
elif filename[-3:] == 'pkl' and 'COCO' in filename:
return InpaintDataset.read_bbox_pkl(filename)
else:
return InpaintDataset.read_bbox_xml(path)
@staticmethod
def read_bbox_xml(path):
"""
Read bbox for voc xml
Return:
bbox:[y,x,height, width], shape: (height, width)
"""
with open(filename, 'r') as reader:
xml = reader.read()
soup = BeautifulSoup(xml, 'xml')
size = {}
for tag in soup.size:
if tag.string != "\n":
size[tag.name] = int(tag.string)
objects = soup.find_all('object')
bndboxs = []
for obj in objects:
bndbox = {}
for tag in obj.bndbox:
if tag.string != '\n':
bndbox[tag.name] = int(tag.string)
bbox = [bndbox['ymin'], bndbox['xmin'], bndbox['ymax']-bndbox['ymin'], bndbox['xmax']-bndbox['xmin']]
bndboxs.append(bbox)
return bndboxs, (size['height'], size['width'])
@staticmethod
def read_bbox_pkl(path):
"""
Read bbox from coco pkl
Return:
bbox:[y,x,height, width], shape: (height, width)
"""
aux_dict = pkl.load(open(path, 'rb'))
bbox = aux_dict["bbox"]
shape = aux_dict["shape"]
#bbox = random.choice(bbox)
#fbox = bbox['fbox']
return [[int(bbox[1]), int(bbox[0]), int(bbox[3]), int(bbox[2])]], (shape[1], shape[0])
@staticmethod
def read_bbox_ch(path):
"""
Read bbox from crowd human pkl
Return:
bbox:[y,x,height, width], shape: (height, width)
"""
aux_dict = pkl.load(open(path, 'rb'))
bboxs = aux_dict["bbox"]
bbox = random.choice(bboxs)
extra = bbox['extra']
shape = aux_dict["shape"]
while 'ignore' in extra and extra['ignore'] == 1 and bbox['fbox'][0] < 0 and bbox['fbox'][1] < 0:
bbox = random.choice(bboxs)
extra = bbox['extra']
fbox = bbox['fbox']
return [[fbox[1],fbox[0],fbox[3],fbox[2]]], (shape[1], shape[0])
@staticmethod
def read_seg_img(path):
pass
@staticmethod
def random_bbox(shape, margin, bbox_shape):
"""Generate a random tlhw with configuration.
Args:
config: Config should have configuration including IMG_SHAPES,
VERTICAL_MARGIN, HEIGHT, HORIZONTAL_MARGIN, WIDTH.
Returns:
tuple: (top, left, height, width)
"""
img_height = shape[0]
img_width = shape[1]
height, width = bbox_shape
ver_margin, hor_margin = margin
maxt = img_height - ver_margin - height
maxl = img_width - hor_margin - width
t = np.random.randint(low=ver_margin, high=maxt)
l = np.random.randint(low=hor_margin, high=maxl)
h = height
w = width
return (t, l, h, w)
@staticmethod
def random_ff_mask(config):
"""Generate a random free form mask with configuration.
Args:
config: Config should have configuration including IMG_SHAPES,
VERTICAL_MARGIN, HEIGHT, HORIZONTAL_MARGIN, WIDTH.
Returns:
tuple: (top, left, height, width)
"""
h,w = config['img_shape']
mask = np.zeros((h,w))
num_v = 12+np.random.randint(config['mv'])#tf.random_uniform([], minval=0, maxval=config.MAXVERTEX, dtype=tf.int32)
for i in range(num_v):
start_x = np.random.randint(w)
start_y = np.random.randint(h)
for j in range(1+np.random.randint(5)):
angle = 0.01+np.random.randint(config['ma'])
if i % 2 == 0:
angle = 2 * 3.1415926 - angle
length = 10+np.random.randint(config['ml'])
brush_w = 10+np.random.randint(config['mbw'])
end_x = (start_x + length * np.sin(angle)).astype(np.int32)
end_y = (start_y + length * np.cos(angle)).astype(np.int32)
cv2.line(mask, (start_y, start_x), (end_y, end_x), 1.0, brush_w)
start_x, start_y = end_x, end_y
mask*=255
return mask.reshape(mask.shape+(1,)).astype(np.float32)
@staticmethod
def bbox2mask(bboxs, shape):
"""Generate mask tensor from bbox.
Args:
bbox: configuration tuple, (top, left, height, width)
config: Config should have configuration including IMG_SHAPES,
MAX_DELTA_HEIGHT, MAX_DELTA_WIDTH.
Returns:
tf.Tensor: output with shape [1, H, W, 1]
"""
height, width = shape
mask = np.zeros(( height, width), np.float32)
for bbox in bboxs:
h = int(0.1*bbox[2])+np.random.randint(int(bbox[2]*0.2+1))
w = int(0.1*bbox[3])+np.random.randint(int(bbox[3]*0.2)+1)
mask[bbox[0]+h:bbox[0]+bbox[2]-h,
bbox[1]+w:bbox[1]+bbox[3]-w] = 1.
mask*=255
return mask.reshape(mask.shape+(1,)).astype(np.float32)
|
the-stack_0_19254 | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
from collections import OrderedDict
from os import path as osp
from mmdet3d.core import show_multi_modality_result, show_result
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet.core import eval_map
from mmdet.datasets import DATASETS
from .custom_3d import Custom3DDataset
from .pipelines import Compose
@DATASETS.register_module()
class SUNRGBDDataset(Custom3DDataset):
r"""SUNRGBD Dataset.
This class serves as the API for experiments on the SUNRGBD Dataset.
See the `download page <http://rgbd.cs.princeton.edu/challenge.html>`_
for data downloading.
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
box_type_3d (str, optional): Type of 3D box of this dataset.
Based on the `box_type_3d`, the dataset will encapsulate the box
to its original format then converted them to `box_type_3d`.
Defaults to 'Depth' in this dataset. Available options includes
- 'LiDAR': Box in LiDAR coordinates.
- 'Depth': Box in depth coordinates, usually for indoor dataset.
- 'Camera': Box in camera coordinates.
filter_empty_gt (bool, optional): Whether to filter empty GT.
Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
"""
CLASSES = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser',
'night_stand', 'bookshelf', 'bathtub')
def __init__(self,
data_root,
ann_file,
pipeline=None,
classes=None,
modality=dict(use_camera=True, use_lidar=True),
box_type_3d='Depth',
filter_empty_gt=True,
test_mode=False):
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
modality=modality,
box_type_3d=box_type_3d,
filter_empty_gt=filter_empty_gt,
test_mode=test_mode)
assert 'use_camera' in self.modality and \
'use_lidar' in self.modality
assert self.modality['use_camera'] or self.modality['use_lidar']
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data \
preprocessing pipelines. It includes the following keys:
- sample_idx (str): Sample index.
- pts_filename (str, optional): Filename of point clouds.
- file_name (str, optional): Filename of point clouds.
- img_prefix (str | None, optional): Prefix of image files.
- img_info (dict, optional): Image info.
- calib (dict, optional): Camera calibration info.
- ann_info (dict): Annotation info.
"""
info = self.data_infos[index]
sample_idx = info['point_cloud']['lidar_idx']
assert info['point_cloud']['lidar_idx'] == info['image']['image_idx']
input_dict = dict(sample_idx=sample_idx)
if self.modality['use_lidar']:
pts_filename = osp.join(self.data_root, info['pts_path'])
input_dict['pts_filename'] = pts_filename
input_dict['file_name'] = pts_filename
if self.modality['use_camera']:
img_filename = osp.join(
osp.join(self.data_root, 'sunrgbd_trainval'),
info['image']['image_path'])
input_dict['img_prefix'] = None
input_dict['img_info'] = dict(filename=img_filename)
# calib = info['calib']
# rt_mat = calib['Rt']
# # follow Coord3DMode.convert_point
# rt_mat = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]
# ]) @ rt_mat.transpose(1, 0)
# depth2img = calib['K'] @ rt_mat
# input_dict['depth2img'] = depth2img
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
if self.filter_empty_gt and len(annos['gt_bboxes_3d']) == 0:
return None
return input_dict
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: annotation information consists of the following keys:
- gt_bboxes_3d (:obj:`DepthInstance3DBoxes`): \
3D ground truth bboxes
- gt_labels_3d (np.ndarray): Labels of ground truths.
- pts_instance_mask_path (str): Path of instance masks.
- pts_semantic_mask_path (str): Path of semantic masks.
"""
# Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index]
if info['annos']['gt_num'] != 0:
gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
np.float32) # k, 6
gt_labels_3d = info['annos']['class'].astype(np.long)
else:
gt_bboxes_3d = np.zeros((0, 7), dtype=np.float32)
gt_labels_3d = np.zeros((0, ), dtype=np.long)
# to target box structure
gt_bboxes_3d = DepthInstance3DBoxes(
gt_bboxes_3d, origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d, gt_labels_3d=gt_labels_3d)
if self.modality['use_camera']:
if info['annos']['gt_num'] != 0:
gt_bboxes_2d = info['annos']['bbox'].astype(np.float32)
else:
gt_bboxes_2d = np.zeros((0, 4), dtype=np.float32)
anns_results['bboxes'] = gt_bboxes_2d
anns_results['labels'] = gt_labels_3d
return anns_results
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2]),
dict(
type='DefaultFormatBundle3D',
class_names=self.CLASSES,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
if self.modality['use_camera']:
pipeline.insert(0, dict(type='LoadImageFromFile'))
return Compose(pipeline)
def show(self, results, out_dir, show=True, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Visualize the results online.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points, img_metas, img = self._extract_data(
i, pipeline, ['points', 'img_metas', 'img'])
# scale colors to [0, 255]
points = points.numpy()
points[:, 3:] *= 255
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy()
pred_bboxes = result['boxes_3d'].tensor.numpy()
show_result(points, gt_bboxes.copy(), pred_bboxes.copy(), out_dir,
file_name, show)
# multi-modality visualization
if self.modality['use_camera']:
img = img.numpy()
# need to transpose channel to first dim
img = img.transpose(1, 2, 0)
pred_bboxes = DepthInstance3DBoxes(
pred_bboxes, origin=(0.5, 0.5, 0))
gt_bboxes = DepthInstance3DBoxes(
gt_bboxes, origin=(0.5, 0.5, 0))
show_multi_modality_result(
img,
gt_bboxes,
pred_bboxes,
None,
out_dir,
file_name,
box_mode='depth',
img_metas=img_metas,
show=show)
def evaluate(self,
results,
metric=None,
iou_thr=(0.25, 0.5),
iou_thr_2d=(0.5, ),
logger=None,
show=False,
out_dir=None,
pipeline=None):
"""Evaluate.
Evaluation in indoor protocol.
Args:
results (list[dict]): List of results.
metric (str | list[str]): Metrics to be evaluated.
iou_thr (list[float]): AP IoU thresholds.
iou_thr_2d (list[float]): AP IoU thresholds for 2d evaluation.
show (bool): Whether to visualize.
Default: False.
out_dir (str): Path to save the visualization results.
Default: None.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
Returns:
dict: Evaluation results.
"""
# evaluate 3D detection performance
if isinstance(results[0], dict):
return super().evaluate(results, metric, iou_thr, logger, show,
out_dir, pipeline)
# evaluate 2D detection performance
else:
eval_results = OrderedDict()
annotations = [self.get_ann_info(i) for i in range(len(self))]
iou_thr_2d = (iou_thr_2d) if isinstance(iou_thr_2d,
float) else iou_thr_2d
for iou_thr_2d_single in iou_thr_2d:
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr_2d_single,
dataset=self.CLASSES,
logger=logger)
eval_results['mAP_' + str(iou_thr_2d_single)] = mean_ap
return eval_results
|
the-stack_0_19255 | from cryptotrade.settings import (
BITCOIN_CODE,
BLOCKIO_BITCOIN_API_KEY,
BLOCKIO_LITECOIN_API_KEY,
BLOCKIO_DOGECOIN_API_KEY,
BLOCKIO_SECRET_PIN,
)
from block_io import BlockIo
from finance.models import BlockIOWallet
import decimal
import requests
class BlockIOChecker:
def __init__(self, api_key, secret_pin):
self._api_key = api_key
self._secret_pin = secret_pin
self._version = 2
self._block_io = BlockIo(self._api_key, self._secret_pin, self._version)
self._set_wallets()
def _set_wallets(self):
self._wallets = BlockIOWallet.objects.filter(balance=0)
def _get_addresses(self):
return ','.join([i.wallet for i in self._wallets])
def get_wallets_data(self):
response = self._block_io.get_address_balance(addresses=self._get_addresses())
wallets_data_list = list()
if response['status'] == 'success':
balances = response['data']['balances']
for balance in balances:
wallet_info = balance['label'].split('_')
wallets_data_list.append({
'wallet_id': wallet_info[1],
'user_unique_number': wallet_info[0],
'available_balance': decimal.Decimal(balance['available_balance']),
'pending_received_balance': decimal.Decimal(balance['pending_received_balance']),
})
return wallets_data_list
def delete_wallets():
pass
def start_block_io_checker():
path = 'https://blockchain.info/ticker'
raw_response = requests.get(path)
if raw_response.status_code == 200:
response = raw_response.json()
course = decimal.Decimal(response['USD']['last'])
for api_key in [BLOCKIO_BITCOIN_API_KEY]:
checker = BlockIOChecker('9169-2eea-75f4-7384', 'vintkor71084')
data = checker.get_wallets_data()
for i in data:
if i['pending_received_balance'] > 0:
continue
elif i['available_balance'] > 0:
wallet = BlockIOWallet.objects.get(
id=i['wallet_id'],
user__unique_number=i['user_unique_number'],
)
wallet.balance = i['available_balance']
wallet.balance_usd = i['available_balance'] * course
wallet.save(update_fields=('balance', 'balance_usd',))
else:
continue
|
the-stack_0_19256 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request
import time, wiringpi
# -- 定数宣言 -- #
SPK_PIN = 5 # 圧電スピーカーのGPIO番号
# -- 関数 -- #
def init():
""" wiringpiとソフトウェアトーンの初期化 """
wiringpi.wiringPiSetupGpio() # wiringpi初期化
wiringpi.softToneCreate(SPK_PIN) # ソフトウェアトーン初期化
def play(index):
""" wiringpiによるソフトウェアトーン再生"""
melody_list = [
((262, 0.5), (294, 0.5), (330, 0.5), (349, 0.5), (392, 0.5), (440, 0.5), (494, 0.5), (525, 0.5)),
((525, 0.5), (494, 0.5), (440, 0.5), (392, 0.5), (349, 0.5), (330, 0.5), (294, 0.5), (262, 0.5)),
((262, 1), (294, 1), (330, 1), (349, 1), (392, 1), (440, 1), (494, 1), (525, 1)),
]
for v, play_time in melody_list[index]: # 指定されたメロディーの再生
wiringpi.softToneWrite(SPK_PIN, v) # トーン発生
time.sleep(play_time) # 同じ音を出力するために処理を遅延
play_stop()
def play_stop():
""" 再生終了 """
wiringpi.softToneWrite(SPK_PIN, 0) # 再生終了
def shutdown_server():
""" サーバー停止 """
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
# -- 初期化、 Flaskアプリの用意 -- #
init()
app = Flask(__name__) # アプリ本体用
# -- ルーティング -- #
@app.route('/speaker/', methods=['GET'])
def speaker():
"""リモート圧電スピーカー用メニュー表示"""
return render_template('app_07_speaker.html')
@app.route('/speaker/<int:id>', methods=['POST'])
def speaker_play(id):
"""リモート圧電スピーカー再生"""
play(id)
return render_template('app_07_speaker.html')
@app.route('/shutdown', methods=['POST'])
def shutdown():
play_stop()
shutdown_server()
return 'Server shutting down...'
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
the-stack_0_19257 | import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
from imutils import *
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
'''
BEST WORKING CODE
'''
# define values boundaries for color
lower_yellow = np.array([15,50,100],np.uint8)
upper_yellow = np.array([40,255,255],np.uint8)
# lower_white_hsv = np.array([0, 0, 150], np.uint8)
lower_white_hsv = np.array([0,0,120], np.uint8)
upper_white_hsv = np.array([255,30,255], np.uint8)
lower_white_rgb = np.array([190,190,190], np.uint8)
upper_white_rgb = np.array([255,255,255], np.uint8)
# hls_lower = np.array([0, 200, 0], np.uint8)
# hls_upper = np.array([255,255, 150], np.uint8)
def process_image():
count = 1
coeff_buffer = []
outlier_count = 0
while True:
######
# READ IMAGE AND BASIC PROCESSES
######
path = '../collected_images/5/mosaic/' + str(count) + '.jpg'
# path = '../collected_images/' + str(count) + '.jpg'
img = cv2.imread(path)
init_time = time.time()
img = GaussianBlur(img, 5)
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
yellow_mask = findColor(hsv_img, lower_yellow, upper_yellow)
kernel = np.ones((9,9),np.uint8)
yellow_mask = cv2.morphologyEx(yellow_mask, cv2.MORPH_OPEN, kernel)
# Split into two to obtain max contour
yellow_mask_1 = yellow_mask[:, 0:300]
yellow_mask_2 = yellow_mask[:,300:600]
edges = CannyEdge(img, 100, 200)
lines = Hough(edges)
# Get CONTOUR of masked image to filter out small blobs
_,contours1, _ = cv2.findContours(yellow_mask_1, 1,2)
_,contours2, _ = cv2.findContours(yellow_mask_2, 1,2)
# create new mask images
new_mask = np.zeros((600,600), dtype=np.uint8)
new_mask_temp = np.zeros((600,300),dtype=np.uint8)
if(len(contours1)!=0):
max_cont_1 = contours1[0]
max_area1 = 0
max_index1=0
for i in range(0,len(contours1)):
area = cv2.contourArea(contours1[i])
if (area > max_area1):
max_area1 = area
max_index1 = i
# fill in
cv2.drawContours(new_mask, contours1, max_index1, (255),cv2.FILLED)
if(len(contours2)!=0):
max_cont_2 = contours2[0]
max_area2 = 0
max_index2=0
for i in range(0,len(contours2)):
area = cv2.contourArea(contours2[i])
if (area > max_area2):
max_area2 = area
max_index2 = i
cv2.drawContours(new_mask_temp, contours2, max_index2, 255, cv2.FILLED)
new_mask[:, 300:600]=new_mask_temp
'''
LINE FITTING
Use 2nd order polynomial if available (if previous values and new values are similar)
Use Linear fitting if 2nd order condition is not met
'''
points_mask = np.where(new_mask>0)
x_vals = points_mask[1]
y_vals = points_mask[0]
if (x_vals.size!=0):
coefficients = np.polyfit(x_vals, y_vals,2)
poly_order = 2
else:
coefficients = np.array([0,0])
poly_order = 1
coeff_thresh = 0.0001 # Tune value?
if (abs(coefficients[0])>coeff_thresh ):
coefficients = np.polyfit(x_vals, y_vals,1)
poly_order = 1
# FILTERING BASED ON PREVIOUS VALUES
if(len(coeff_buffer)<3):
coeff_buffer.append(coefficients)
else:
y_prev_avg = (coeff_buffer[0][-1] + coeff_buffer[1][-1]+coeff_buffer[2][-1])/3
if(abs(y_prev_avg - coefficients[-1]) >100):
coefficients = coeff_buffer[2]
outlier_count +=1
poly_order = len(coefficients)-1
if(outlier_count >10):
outlier_count=0
coeff_buffer = []
else:
coeff_buffer[0:-1] = coeff_buffer[1:3]
coeff_buffer[2]=coefficients
poly_order = len(coefficients)-1
# CREATE FITTING, DRAW ON IMAGE
polypoints = np.zeros((600,2))
polypoints_right = np.zeros((600,2))
t = np.arange(0,600,1)
f = np.poly1d(coefficients)
if(coefficients.size ==3): # if 2nd order
slopes = t*coefficients[0]*2 + coefficients[1]
else:
slopes = coefficients[0]
# Create fake points for right lane
polypoints[:,0]=t
polypoints[:,1]=f(t)
theta = np.arctan2((slopes),1.0)
polypoints_right[:,0] = np.cos(theta-np.pi/2)*340+t
polypoints_right[:,1] = polypoints[:,1] + np.sin(theta - np.pi/2)*340
coeff_right = np.polyfit(polypoints_right[:,0], polypoints_right[:,1], poly_order)
f_right = np.poly1d(coeff_right)
# find gradient at each point and shift by lane width
polypoints_right[:,0] = t
polypoints_right[:,1] = f_right(t)
cv2.polylines(img, np.int32([polypoints]), False, (255,0,0),2)
cv2.polylines(img, np.int32([polypoints_right]), False, (0,0,255),2)
# cv2.circle(img, (np.int32(polypoints_right[:,0]), np.int32(polypoints_right[:,1])),2,(255,0,0),2)
'''
CREATE NEW MASK FOR PUBLISH, OUTSIDE LANE = WHITE
For efficient processing, for loop is avoided.
Create one array with y-values, and one with y=f(x) values
Mask using numpy masking which is worlds faster than for loop
'''
mask = np.arange(0,600,1) # create x values
mask1 = np.arange(0,600,1)
if (len(coefficients)==3):
mask = coefficients[0] * mask*mask + coefficients[1]*mask + coefficients[2]
mask = np.zeros((600,600)) + mask #broadcast into 2d
mask1 = coeff_right[0]*mask1*mask1 + coeff_right[1]*mask1 + coeff_right[2]
mask1 = np.zeros((600,600)) + mask1
else:
mask = coefficients[0]*mask + coefficients[1]
mask = np.zeros((600,600)) + mask #broadcast into 2d 600 by 600
mask1 = coeff_right[0]*mask1+ coeff_right[1]
mask1 = np.zeros((600,600)) + mask1
# create 2d array with y values
y_vals = np.arange(0,600,1)
y_vals = np.broadcast_to(y_vals, (600,600)).T
# boolean masking
masked_img = np.zeros((600,600),dtype='uint8')
masked_img[mask<y_vals] = 255
masked_img[mask1>y_vals]=255
print("Time: ", time.time()-init_time)
cv2.imshow('masked lane', masked_img)
#cv2.imshow('original', img)
count +=1 # iterate count to go through data
if cv2.waitKey(10) & 0xFF==ord('q'):
break
if __name__=="__main__":
process_image()
|
the-stack_0_19258 | from codecs import open
from os import path
import re
from setuptools import setup, find_packages
# Constants
NAME = "markyp-html"
ROOT = NAME.replace("-", "_")
# Get the long description from the README file
with open(path.join(path.abspath(path.dirname(__file__)), "README.md"), encoding="utf-8") as f:
readme = f.read()
# Get the version from the root __init__.py file.
with open(path.join(path.abspath(path.dirname(__file__)), ROOT, "__init__.py"), encoding="utf-8") as f:
content = f.read()
_author = re.search("__author__ = \"(.*?)\"", content).group(1)
_email = re.search("__email__ = \"(.*?)\"", content).group(1)
_license = re.search("__license__ = \"(.*?)\"", content).group(1)
_url = re.search("__url__ = \"(.*?)\"", content).group(1)
_version = re.search("__version__ = \"(.*?)\"", content).group(1)
# Get the requirements from requirements.txt.
req_filename = "requirements.txt"
exp = re.compile("(?P<req>[-_\\w]+)\\s*(?P<op>[<>=!~]+)\\s*(?P<ver>[\\w.]+)")
requirements = []
with open(path.join(path.dirname(path.abspath(__file__)), req_filename)) as req_file:
for line in req_file:
line = line.split("#", maxsplit=1)[0].strip()
match = exp.match(line) if line else None
if match is not None:
requirements.append(("".join((match["req"], match["op"], match["ver"]))))
requirements.sort(key=lambda s: s.casefold())
setup(
name=NAME,
version=_version,
description="HTML element implementations based on markyp.",
long_description=readme,
long_description_content_type="text/markdown",
url=_url,
author=_author,
author_email=_email,
license=_license,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Utilities",
"Typing :: Typed"
],
keywords="html markup generator utility",
packages=find_packages(exclude=["test"]),
python_requires=">=3.6",
install_requires=requirements
)
|
the-stack_0_19259 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright (C) 2008 Evan Martin <[email protected]>
"""A git-command for integrating reviews on Rietveld and Gerrit."""
from __future__ import print_function
from distutils.version import LooseVersion
from multiprocessing.pool import ThreadPool
import base64
import collections
import fnmatch
import httplib
import json
import logging
import multiprocessing
import optparse
import os
import re
import stat
import sys
import textwrap
import traceback
import urllib
import urllib2
import urlparse
import uuid
import webbrowser
import zlib
try:
import readline # pylint: disable=import-error,W0611
except ImportError:
pass
from third_party import colorama
from third_party import httplib2
from third_party import upload
import auth
import checkout
import clang_format
import commit_queue
import dart_format
import setup_color
import fix_encoding
import gclient_utils
import gerrit_util
import git_cache
import git_common
import git_footers
import owners
import owners_finder
import presubmit_support
import rietveld
import scm
import subcommand
import subprocess2
import watchlists
__version__ = '2.0'
COMMIT_BOT_EMAIL = '[email protected]'
DEFAULT_SERVER = 'https://codereview.chromium.org'
POSTUPSTREAM_HOOK = '.git/hooks/post-cl-land'
DESCRIPTION_BACKUP_FILE = '~/.git_cl_description_backup'
REFS_THAT_ALIAS_TO_OTHER_REFS = {
'refs/remotes/origin/lkgr': 'refs/remotes/origin/master',
'refs/remotes/origin/lkcr': 'refs/remotes/origin/master',
}
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
# Buildbucket master name prefix.
MASTER_PREFIX = 'master.'
# Shortcut since it quickly becomes redundant.
Fore = colorama.Fore
# Initialized in main()
settings = None
# Used by tests/git_cl_test.py to add extra logging.
# Inside the weirdly failing test, add this:
# >>> self.mock(git_cl, '_IS_BEING_TESTED', True)
# And scroll up to see the strack trace printed.
_IS_BEING_TESTED = False
def DieWithError(message):
print(message, file=sys.stderr)
sys.exit(1)
def GetNoGitPagerEnv():
env = os.environ.copy()
# 'cat' is a magical git string that disables pagers on all platforms.
env['GIT_PAGER'] = 'cat'
return env
def RunCommand(args, error_ok=False, error_message=None, shell=False, **kwargs):
try:
return subprocess2.check_output(args, shell=shell, **kwargs)
except subprocess2.CalledProcessError as e:
logging.debug('Failed running %s', args)
if not error_ok:
DieWithError(
'Command "%s" failed.\n%s' % (
' '.join(args), error_message or e.stdout or ''))
return e.stdout
def RunGit(args, **kwargs):
"""Returns stdout."""
return RunCommand(['git'] + args, **kwargs)
def RunGitWithCode(args, suppress_stderr=False):
"""Returns return code and stdout."""
if suppress_stderr:
stderr = subprocess2.VOID
else:
stderr = sys.stderr
try:
(out, _), code = subprocess2.communicate(['git'] + args,
env=GetNoGitPagerEnv(),
stdout=subprocess2.PIPE,
stderr=stderr)
return code, out
except subprocess2.CalledProcessError as e:
logging.debug('Failed running %s', args)
return e.returncode, e.stdout
def RunGitSilent(args):
"""Returns stdout, suppresses stderr and ignores the return code."""
return RunGitWithCode(args, suppress_stderr=True)[1]
def IsGitVersionAtLeast(min_version):
prefix = 'git version '
version = RunGit(['--version']).strip()
return (version.startswith(prefix) and
LooseVersion(version[len(prefix):]) >= LooseVersion(min_version))
def BranchExists(branch):
"""Return True if specified branch exists."""
code, _ = RunGitWithCode(['rev-parse', '--verify', branch],
suppress_stderr=True)
return not code
def time_sleep(seconds):
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
import time # Local import to discourage others from importing time globally.
return time.sleep(seconds)
def ask_for_data(prompt):
try:
return raw_input(prompt)
except KeyboardInterrupt:
# Hide the exception.
sys.exit(1)
def _git_branch_config_key(branch, key):
"""Helper method to return Git config key for a branch."""
assert branch, 'branch name is required to set git config for it'
return 'branch.%s.%s' % (branch, key)
def _git_get_branch_config_value(key, default=None, value_type=str,
branch=False):
"""Returns git config value of given or current branch if any.
Returns default in all other cases.
"""
assert value_type in (int, str, bool)
if branch is False: # Distinguishing default arg value from None.
branch = GetCurrentBranch()
if not branch:
return default
args = ['config']
if value_type == bool:
args.append('--bool')
# git config also has --int, but apparently git config suffers from integer
# overflows (http://crbug.com/640115), so don't use it.
args.append(_git_branch_config_key(branch, key))
code, out = RunGitWithCode(args)
if code == 0:
value = out.strip()
if value_type == int:
return int(value)
if value_type == bool:
return bool(value.lower() == 'true')
return value
return default
def _git_set_branch_config_value(key, value, branch=None, **kwargs):
"""Sets the value or unsets if it's None of a git branch config.
Valid, though not necessarily existing, branch must be provided,
otherwise currently checked out branch is used.
"""
if not branch:
branch = GetCurrentBranch()
assert branch, 'a branch name OR currently checked out branch is required'
args = ['config']
# Check for boolean first, because bool is int, but int is not bool.
if value is None:
args.append('--unset')
elif isinstance(value, bool):
args.append('--bool')
value = str(value).lower()
else:
# git config also has --int, but apparently git config suffers from integer
# overflows (http://crbug.com/640115), so don't use it.
value = str(value)
args.append(_git_branch_config_key(branch, key))
if value is not None:
args.append(value)
RunGit(args, **kwargs)
def _get_committer_timestamp(commit):
"""Returns unix timestamp as integer of a committer in a commit.
Commit can be whatever git show would recognize, such as HEAD, sha1 or ref.
"""
# Git also stores timezone offset, but it only affects visual display,
# actual point in time is defined by this timestamp only.
return int(RunGit(['show', '-s', '--format=%ct', commit]).strip())
def _git_amend_head(message, committer_timestamp):
"""Amends commit with new message and desired committer_timestamp.
Sets committer timezone to UTC.
"""
env = os.environ.copy()
env['GIT_COMMITTER_DATE'] = '%d+0000' % committer_timestamp
return RunGit(['commit', '--amend', '-m', message], env=env)
def add_git_similarity(parser):
parser.add_option(
'--similarity', metavar='SIM', type=int, action='store',
help='Sets the percentage that a pair of files need to match in order to'
' be considered copies (default 50)')
parser.add_option(
'--find-copies', action='store_true',
help='Allows git to look for copies.')
parser.add_option(
'--no-find-copies', action='store_false', dest='find_copies',
help='Disallows git from looking for copies.')
old_parser_args = parser.parse_args
def Parse(args):
options, args = old_parser_args(args)
if options.similarity is None:
options.similarity = _git_get_branch_config_value(
'git-cl-similarity', default=50, value_type=int)
else:
print('Note: Saving similarity of %d%% in git config.'
% options.similarity)
_git_set_branch_config_value('git-cl-similarity', options.similarity)
options.similarity = max(0, min(options.similarity, 100))
if options.find_copies is None:
options.find_copies = _git_get_branch_config_value(
'git-find-copies', default=True, value_type=bool)
else:
_git_set_branch_config_value('git-find-copies', bool(options.find_copies))
print('Using %d%% similarity for rename/copy detection. '
'Override with --similarity.' % options.similarity)
return options, args
parser.parse_args = Parse
def _get_properties_from_options(options):
properties = dict(x.split('=', 1) for x in options.properties)
for key, val in properties.iteritems():
try:
properties[key] = json.loads(val)
except ValueError:
pass # If a value couldn't be evaluated, treat it as a string.
return properties
def _prefix_master(master):
"""Convert user-specified master name to full master name.
Buildbucket uses full master name(master.tryserver.chromium.linux) as bucket
name, while the developers always use shortened master name
(tryserver.chromium.linux) by stripping off the prefix 'master.'. This
function does the conversion for buildbucket migration.
"""
if master.startswith(MASTER_PREFIX):
return master
return '%s%s' % (MASTER_PREFIX, master)
def _unprefix_master(bucket):
"""Convert bucket name to shortened master name.
Buildbucket uses full master name(master.tryserver.chromium.linux) as bucket
name, while the developers always use shortened master name
(tryserver.chromium.linux) by stripping off the prefix 'master.'. This
function does the conversion for buildbucket migration.
"""
if bucket.startswith(MASTER_PREFIX):
return bucket[len(MASTER_PREFIX):]
return bucket
def _buildbucket_retry(operation_name, http, *args, **kwargs):
"""Retries requests to buildbucket service and returns parsed json content."""
try_count = 0
while True:
response, content = http.request(*args, **kwargs)
try:
content_json = json.loads(content)
except ValueError:
content_json = None
# Buildbucket could return an error even if status==200.
if content_json and content_json.get('error'):
error = content_json.get('error')
if error.get('code') == 403:
raise BuildbucketResponseException(
'Access denied: %s' % error.get('message', ''))
msg = 'Error in response. Reason: %s. Message: %s.' % (
error.get('reason', ''), error.get('message', ''))
raise BuildbucketResponseException(msg)
if response.status == 200:
if not content_json:
raise BuildbucketResponseException(
'Buildbucket returns invalid json content: %s.\n'
'Please file bugs at http://crbug.com, label "Infra-BuildBucket".' %
content)
return content_json
if response.status < 500 or try_count >= 2:
raise httplib2.HttpLib2Error(content)
# status >= 500 means transient failures.
logging.debug('Transient errors when %s. Will retry.', operation_name)
time_sleep(0.5 + 1.5*try_count)
try_count += 1
assert False, 'unreachable'
def _get_bucket_map(changelist, options, option_parser):
"""Returns a dict mapping bucket names to builders and tests,
for triggering try jobs.
"""
# If no bots are listed, we try to get a set of builders and tests based
# on GetPreferredTryMasters functions in PRESUBMIT.py files.
if not options.bot:
change = changelist.GetChange(
changelist.GetCommonAncestorWithUpstream(), None)
# Get try masters from PRESUBMIT.py files.
masters = presubmit_support.DoGetTryMasters(
change=change,
changed_files=change.LocalPaths(),
repository_root=settings.GetRoot(),
default_presubmit=None,
project=None,
verbose=options.verbose,
output_stream=sys.stdout)
if masters is None:
return None
return {_prefix_master(m): b for m, b in masters.iteritems()}
if options.bucket:
return {options.bucket: {b: [] for b in options.bot}}
if options.master:
return {_prefix_master(options.master): {b: [] for b in options.bot}}
# If bots are listed but no master or bucket, then we need to find out
# the corresponding master for each bot.
bucket_map, error_message = _get_bucket_map_for_builders(options.bot)
if error_message:
option_parser.error(
'Tryserver master cannot be found because: %s\n'
'Please manually specify the tryserver master, e.g. '
'"-m tryserver.chromium.linux".' % error_message)
return bucket_map
def _get_bucket_map_for_builders(builders):
"""Returns a map of buckets to builders for the given builders."""
map_url = 'https://builders-map.appspot.com/'
try:
builders_map = json.load(urllib2.urlopen(map_url))
except urllib2.URLError as e:
return None, ('Failed to fetch builder-to-master map from %s. Error: %s.' %
(map_url, e))
except ValueError as e:
return None, ('Invalid json string from %s. Error: %s.' % (map_url, e))
if not builders_map:
return None, 'Failed to build master map.'
bucket_map = {}
for builder in builders:
masters = builders_map.get(builder, [])
if not masters:
return None, ('No matching master for builder %s.' % builder)
if len(masters) > 1:
return None, ('The builder name %s exists in multiple masters %s.' %
(builder, masters))
bucket = _prefix_master(masters[0])
bucket_map.setdefault(bucket, {})[builder] = []
return bucket_map, None
def _trigger_try_jobs(auth_config, changelist, buckets, options,
category='git_cl_try', patchset=None):
"""Sends a request to Buildbucket to trigger try jobs for a changelist.
Args:
auth_config: AuthConfig for Rietveld.
changelist: Changelist that the try jobs are associated with.
buckets: A nested dict mapping bucket names to builders to tests.
options: Command-line options.
"""
assert changelist.GetIssue(), 'CL must be uploaded first'
codereview_url = changelist.GetCodereviewServer()
assert codereview_url, 'CL must be uploaded first'
patchset = patchset or changelist.GetMostRecentPatchset()
assert patchset, 'CL must be uploaded first'
codereview_host = urlparse.urlparse(codereview_url).hostname
authenticator = auth.get_authenticator_for_host(codereview_host, auth_config)
http = authenticator.authorize(httplib2.Http())
http.force_exception_to_status_code = True
# TODO(tandrii): consider caching Gerrit CL details just like
# _RietveldChangelistImpl does, then caching values in these two variables
# won't be necessary.
owner_email = changelist.GetIssueOwner()
buildbucket_put_url = (
'https://{hostname}/_ah/api/buildbucket/v1/builds/batch'.format(
hostname=options.buildbucket_host))
buildset = 'patch/{codereview}/{hostname}/{issue}/{patch}'.format(
codereview='gerrit' if changelist.IsGerrit() else 'rietveld',
hostname=codereview_host,
issue=changelist.GetIssue(),
patch=patchset)
shared_parameters_properties = changelist.GetTryjobProperties(patchset)
shared_parameters_properties['category'] = category
if options.clobber:
shared_parameters_properties['clobber'] = True
extra_properties = _get_properties_from_options(options)
if extra_properties:
shared_parameters_properties.update(extra_properties)
batch_req_body = {'builds': []}
print_text = []
print_text.append('Tried jobs on:')
for bucket, builders_and_tests in sorted(buckets.iteritems()):
print_text.append('Bucket: %s' % bucket)
master = None
if bucket.startswith(MASTER_PREFIX):
master = _unprefix_master(bucket)
for builder, tests in sorted(builders_and_tests.iteritems()):
print_text.append(' %s: %s' % (builder, tests))
parameters = {
'builder_name': builder,
'changes': [{
'author': {'email': owner_email},
'revision': options.revision,
}],
'properties': shared_parameters_properties.copy(),
}
if 'presubmit' in builder.lower():
parameters['properties']['dry_run'] = 'true'
if tests:
parameters['properties']['testfilter'] = tests
tags = [
'builder:%s' % builder,
'buildset:%s' % buildset,
'user_agent:git_cl_try',
]
if master:
parameters['properties']['master'] = master
tags.append('master:%s' % master)
batch_req_body['builds'].append(
{
'bucket': bucket,
'parameters_json': json.dumps(parameters),
'client_operation_id': str(uuid.uuid4()),
'tags': tags,
}
)
_buildbucket_retry(
'triggering try jobs',
http,
buildbucket_put_url,
'PUT',
body=json.dumps(batch_req_body),
headers={'Content-Type': 'application/json'}
)
print_text.append('To see results here, run: git cl try-results')
print_text.append('To see results in browser, run: git cl web')
print('\n'.join(print_text))
def fetch_try_jobs(auth_config, changelist, buildbucket_host,
patchset=None):
"""Fetches try jobs from buildbucket.
Returns a map from build id to build info as a dictionary.
"""
assert buildbucket_host
assert changelist.GetIssue(), 'CL must be uploaded first'
assert changelist.GetCodereviewServer(), 'CL must be uploaded first'
patchset = patchset or changelist.GetMostRecentPatchset()
assert patchset, 'CL must be uploaded first'
codereview_url = changelist.GetCodereviewServer()
codereview_host = urlparse.urlparse(codereview_url).hostname
authenticator = auth.get_authenticator_for_host(codereview_host, auth_config)
if authenticator.has_cached_credentials():
http = authenticator.authorize(httplib2.Http())
else:
print('Warning: Some results might be missing because %s' %
# Get the message on how to login.
(auth.LoginRequiredError(codereview_host).message,))
http = httplib2.Http()
http.force_exception_to_status_code = True
buildset = 'patch/{codereview}/{hostname}/{issue}/{patch}'.format(
codereview='gerrit' if changelist.IsGerrit() else 'rietveld',
hostname=codereview_host,
issue=changelist.GetIssue(),
patch=patchset)
params = {'tag': 'buildset:%s' % buildset}
builds = {}
while True:
url = 'https://{hostname}/_ah/api/buildbucket/v1/search?{params}'.format(
hostname=buildbucket_host,
params=urllib.urlencode(params))
content = _buildbucket_retry('fetching try jobs', http, url, 'GET')
for build in content.get('builds', []):
builds[build['id']] = build
if 'next_cursor' in content:
params['start_cursor'] = content['next_cursor']
else:
break
return builds
def print_try_jobs(options, builds):
"""Prints nicely result of fetch_try_jobs."""
if not builds:
print('No try jobs scheduled')
return
# Make a copy, because we'll be modifying builds dictionary.
builds = builds.copy()
builder_names_cache = {}
def get_builder(b):
try:
return builder_names_cache[b['id']]
except KeyError:
try:
parameters = json.loads(b['parameters_json'])
name = parameters['builder_name']
except (ValueError, KeyError) as error:
print('WARNING: failed to get builder name for build %s: %s' % (
b['id'], error))
name = None
builder_names_cache[b['id']] = name
return name
def get_bucket(b):
bucket = b['bucket']
if bucket.startswith('master.'):
return bucket[len('master.'):]
return bucket
if options.print_master:
name_fmt = '%%-%ds %%-%ds' % (
max(len(str(get_bucket(b))) for b in builds.itervalues()),
max(len(str(get_builder(b))) for b in builds.itervalues()))
def get_name(b):
return name_fmt % (get_bucket(b), get_builder(b))
else:
name_fmt = '%%-%ds' % (
max(len(str(get_builder(b))) for b in builds.itervalues()))
def get_name(b):
return name_fmt % get_builder(b)
def sort_key(b):
return b['status'], b.get('result'), get_name(b), b.get('url')
def pop(title, f, color=None, **kwargs):
"""Pop matching builds from `builds` dict and print them."""
if not options.color or color is None:
colorize = str
else:
colorize = lambda x: '%s%s%s' % (color, x, Fore.RESET)
result = []
for b in builds.values():
if all(b.get(k) == v for k, v in kwargs.iteritems()):
builds.pop(b['id'])
result.append(b)
if result:
print(colorize(title))
for b in sorted(result, key=sort_key):
print(' ', colorize('\t'.join(map(str, f(b)))))
total = len(builds)
pop(status='COMPLETED', result='SUCCESS',
title='Successes:', color=Fore.GREEN,
f=lambda b: (get_name(b), b.get('url')))
pop(status='COMPLETED', result='FAILURE', failure_reason='INFRA_FAILURE',
title='Infra Failures:', color=Fore.MAGENTA,
f=lambda b: (get_name(b), b.get('url')))
pop(status='COMPLETED', result='FAILURE', failure_reason='BUILD_FAILURE',
title='Failures:', color=Fore.RED,
f=lambda b: (get_name(b), b.get('url')))
pop(status='COMPLETED', result='CANCELED',
title='Canceled:', color=Fore.MAGENTA,
f=lambda b: (get_name(b),))
pop(status='COMPLETED', result='FAILURE',
failure_reason='INVALID_BUILD_DEFINITION',
title='Wrong master/builder name:', color=Fore.MAGENTA,
f=lambda b: (get_name(b),))
pop(status='COMPLETED', result='FAILURE',
title='Other failures:',
f=lambda b: (get_name(b), b.get('failure_reason'), b.get('url')))
pop(status='COMPLETED',
title='Other finished:',
f=lambda b: (get_name(b), b.get('result'), b.get('url')))
pop(status='STARTED',
title='Started:', color=Fore.YELLOW,
f=lambda b: (get_name(b), b.get('url')))
pop(status='SCHEDULED',
title='Scheduled:',
f=lambda b: (get_name(b), 'id=%s' % b['id']))
# The last section is just in case buildbucket API changes OR there is a bug.
pop(title='Other:',
f=lambda b: (get_name(b), 'id=%s' % b['id']))
assert len(builds) == 0
print('Total: %d try jobs' % total)
def write_try_results_json(output_file, builds):
"""Writes a subset of the data from fetch_try_jobs to a file as JSON.
The input |builds| dict is assumed to be generated by Buildbucket.
Buildbucket documentation: http://goo.gl/G0s101
"""
def convert_build_dict(build):
return {
'buildbucket_id': build.get('id'),
'status': build.get('status'),
'result': build.get('result'),
'bucket': build.get('bucket'),
'builder_name': json.loads(
build.get('parameters_json', '{}')).get('builder_name'),
'failure_reason': build.get('failure_reason'),
'url': build.get('url'),
}
converted = []
for _, build in sorted(builds.items()):
converted.append(convert_build_dict(build))
write_json(output_file, converted)
def print_stats(similarity, find_copies, args):
"""Prints statistics about the change to the user."""
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = GetNoGitPagerEnv()
if 'GIT_EXTERNAL_DIFF' in env:
del env['GIT_EXTERNAL_DIFF']
if find_copies:
similarity_options = ['-l100000', '-C%s' % similarity]
else:
similarity_options = ['-M%s' % similarity]
try:
stdout = sys.stdout.fileno()
except AttributeError:
stdout = None
return subprocess2.call(
['git',
'diff', '--no-ext-diff', '--stat'] + similarity_options + args,
stdout=stdout, env=env)
class BuildbucketResponseException(Exception):
pass
class Settings(object):
def __init__(self):
self.default_server = None
self.cc = None
self.root = None
self.tree_status_url = None
self.viewvc_url = None
self.updated = False
self.is_gerrit = None
self.squash_gerrit_uploads = None
self.gerrit_skip_ensure_authenticated = None
self.git_editor = None
self.project = None
self.force_https_commit_url = None
self.pending_ref_prefix = None
def LazyUpdateIfNeeded(self):
"""Updates the settings from a codereview.settings file, if available."""
if not self.updated:
# The only value that actually changes the behavior is
# autoupdate = "false". Everything else means "true".
autoupdate = RunGit(['config', 'rietveld.autoupdate'],
error_ok=True
).strip().lower()
cr_settings_file = FindCodereviewSettingsFile()
if autoupdate != 'false' and cr_settings_file:
LoadCodereviewSettingsFromFile(cr_settings_file)
self.updated = True
def GetDefaultServerUrl(self, error_ok=False):
if not self.default_server:
self.LazyUpdateIfNeeded()
self.default_server = gclient_utils.UpgradeToHttps(
self._GetRietveldConfig('server', error_ok=True))
if error_ok:
return self.default_server
if not self.default_server:
error_message = ('Could not find settings file. You must configure '
'your review setup by running "git cl config".')
self.default_server = gclient_utils.UpgradeToHttps(
self._GetRietveldConfig('server', error_message=error_message))
return self.default_server
@staticmethod
def GetRelativeRoot():
return RunGit(['rev-parse', '--show-cdup']).strip()
def GetRoot(self):
if self.root is None:
self.root = os.path.abspath(self.GetRelativeRoot())
return self.root
def GetGitMirror(self, remote='origin'):
"""If this checkout is from a local git mirror, return a Mirror object."""
local_url = RunGit(['config', '--get', 'remote.%s.url' % remote]).strip()
if not os.path.isdir(local_url):
return None
git_cache.Mirror.SetCachePath(os.path.dirname(local_url))
remote_url = git_cache.Mirror.CacheDirToUrl(local_url)
# Use the /dev/null print_func to avoid terminal spew in WaitForRealCommit.
mirror = git_cache.Mirror(remote_url, print_func = lambda *args: None)
if mirror.exists():
return mirror
return None
def GetTreeStatusUrl(self, error_ok=False):
if not self.tree_status_url:
error_message = ('You must configure your tree status URL by running '
'"git cl config".')
self.tree_status_url = self._GetRietveldConfig(
'tree-status-url', error_ok=error_ok, error_message=error_message)
return self.tree_status_url
def GetViewVCUrl(self):
if not self.viewvc_url:
self.viewvc_url = self._GetRietveldConfig('viewvc-url', error_ok=True)
return self.viewvc_url
def GetBugPrefix(self):
return self._GetRietveldConfig('bug-prefix', error_ok=True)
def GetIsSkipDependencyUpload(self, branch_name):
"""Returns true if specified branch should skip dep uploads."""
return self._GetBranchConfig(branch_name, 'skip-deps-uploads',
error_ok=True)
def GetRunPostUploadHook(self):
run_post_upload_hook = self._GetRietveldConfig(
'run-post-upload-hook', error_ok=True)
return run_post_upload_hook == "True"
def GetDefaultCCList(self):
return self._GetRietveldConfig('cc', error_ok=True)
def GetDefaultPrivateFlag(self):
return self._GetRietveldConfig('private', error_ok=True)
def GetIsGerrit(self):
"""Return true if this repo is assosiated with gerrit code review system."""
if self.is_gerrit is None:
self.is_gerrit = self._GetConfig('gerrit.host', error_ok=True)
return self.is_gerrit
def GetSquashGerritUploads(self):
"""Return true if uploads to Gerrit should be squashed by default."""
if self.squash_gerrit_uploads is None:
self.squash_gerrit_uploads = self.GetSquashGerritUploadsOverride()
if self.squash_gerrit_uploads is None:
# Default is squash now (http://crbug.com/611892#c23).
self.squash_gerrit_uploads = not (
RunGit(['config', '--bool', 'gerrit.squash-uploads'],
error_ok=True).strip() == 'false')
return self.squash_gerrit_uploads
def GetSquashGerritUploadsOverride(self):
"""Return True or False if codereview.settings should be overridden.
Returns None if no override has been defined.
"""
# See also http://crbug.com/611892#c23
result = RunGit(['config', '--bool', 'gerrit.override-squash-uploads'],
error_ok=True).strip()
if result == 'true':
return True
if result == 'false':
return False
return None
def GetGerritSkipEnsureAuthenticated(self):
"""Return True if EnsureAuthenticated should not be done for Gerrit
uploads."""
if self.gerrit_skip_ensure_authenticated is None:
self.gerrit_skip_ensure_authenticated = (
RunGit(['config', '--bool', 'gerrit.skip-ensure-authenticated'],
error_ok=True).strip() == 'true')
return self.gerrit_skip_ensure_authenticated
def GetGitEditor(self):
"""Return the editor specified in the git config, or None if none is."""
if self.git_editor is None:
self.git_editor = self._GetConfig('core.editor', error_ok=True)
return self.git_editor or None
def GetLintRegex(self):
return (self._GetRietveldConfig('cpplint-regex', error_ok=True) or
DEFAULT_LINT_REGEX)
def GetLintIgnoreRegex(self):
return (self._GetRietveldConfig('cpplint-ignore-regex', error_ok=True) or
DEFAULT_LINT_IGNORE_REGEX)
def GetProject(self):
if not self.project:
self.project = self._GetRietveldConfig('project', error_ok=True)
return self.project
def GetPendingRefPrefix(self):
if not self.pending_ref_prefix:
self.pending_ref_prefix = self._GetRietveldConfig(
'pending-ref-prefix', error_ok=True)
return self.pending_ref_prefix
def _GetRietveldConfig(self, param, **kwargs):
return self._GetConfig('rietveld.' + param, **kwargs)
def _GetBranchConfig(self, branch_name, param, **kwargs):
return self._GetConfig('branch.' + branch_name + '.' + param, **kwargs)
def _GetConfig(self, param, **kwargs):
self.LazyUpdateIfNeeded()
return RunGit(['config', param], **kwargs).strip()
class _GitNumbererState(object):
KNOWN_PROJECTS_WHITELIST = [
'chromium/src',
'external/webrtc',
'v8/v8',
]
@classmethod
def load(cls, remote_url, remote_ref):
"""Figures out the state by fetching special refs from remote repo.
"""
assert remote_ref and remote_ref.startswith('refs/'), remote_ref
url_parts = urlparse.urlparse(remote_url)
project_name = url_parts.path.lstrip('/').rstrip('git./')
for known in cls.KNOWN_PROJECTS_WHITELIST:
if project_name.endswith(known):
break
else:
# Early exit to avoid extra fetches for repos that aren't using gnumbd.
return cls(cls._get_pending_prefix_fallback(), None)
# This pollutes local ref space, but the amount of objects is negligible.
error, _ = cls._run_git_with_code([
'fetch', remote_url,
'+refs/meta/config:refs/git_cl/meta/config',
'+refs/gnumbd-config/main:refs/git_cl/gnumbd-config/main'])
if error:
# Some ref doesn't exist or isn't accessible to current user.
# This shouldn't happen on production KNOWN_PROJECTS_WHITELIST
# with git-numberer.
cls._warn('failed to fetch gnumbd and project config for %s: %s',
remote_url, error)
return cls(cls._get_pending_prefix_fallback(), None)
return cls(cls._get_pending_prefix(remote_ref),
cls._is_validator_enabled(remote_ref))
@classmethod
def _get_pending_prefix(cls, ref):
error, gnumbd_config_data = cls._run_git_with_code(
['show', 'refs/git_cl/gnumbd-config/main:config.json'])
if error:
cls._warn('gnumbd config file not found')
return cls._get_pending_prefix_fallback()
try:
config = json.loads(gnumbd_config_data)
if cls.match_refglobs(ref, config['enabled_refglobs']):
return config['pending_ref_prefix']
return None
except KeyboardInterrupt:
raise
except Exception as e:
cls._warn('failed to parse gnumbd config: %s', e)
return cls._get_pending_prefix_fallback()
@staticmethod
def _get_pending_prefix_fallback():
global settings
if not settings:
settings = Settings()
return settings.GetPendingRefPrefix()
@classmethod
def _is_validator_enabled(cls, ref):
error, project_config_data = cls._run_git_with_code(
['show', 'refs/git_cl/meta/config:project.config'])
if error:
cls._warn('project.config file not found')
return False
# Gerrit's project.config is really a git config file.
# So, parse it as such.
with gclient_utils.temporary_directory() as tempdir:
project_config_file = os.path.join(tempdir, 'project.config')
gclient_utils.FileWrite(project_config_file, project_config_data)
def get_opts(x):
code, out = cls._run_git_with_code(
['config', '-f', project_config_file, '--get-all',
'plugin.git-numberer.validate-%s-refglob' % x])
if code == 0:
return out.strip().splitlines()
return []
enabled, disabled = map(get_opts, ['enabled', 'disabled'])
logging.info('validator config enabled %s disabled %s refglobs for '
'(this ref: %s)', enabled, disabled, ref)
if cls.match_refglobs(ref, disabled):
return False
return cls.match_refglobs(ref, enabled)
@staticmethod
def match_refglobs(ref, refglobs):
for refglob in refglobs:
if ref == refglob or fnmatch.fnmatch(ref, refglob):
return True
return False
@staticmethod
def _run_git_with_code(*args, **kwargs):
# The only reason for this wrapper is easy porting of this code to CQ
# codebase, which forked git_cl.py and checkouts.py long time ago.
return RunGitWithCode(*args, **kwargs)
@staticmethod
def _warn(msg, *args):
if args:
msg = msg % args
print('WARNING: %s' % msg)
def __init__(self, pending_prefix, validator_enabled):
# TODO(tandrii): remove pending_prefix after gnumbd is no more.
if pending_prefix:
if not pending_prefix.endswith('/'):
pending_prefix += '/'
self._pending_prefix = pending_prefix or None
self._validator_enabled = validator_enabled or False
logging.debug('_GitNumbererState(pending: %s, validator: %s)',
self._pending_prefix, self._validator_enabled)
@property
def pending_prefix(self):
return self._pending_prefix
@property
def should_add_git_number(self):
return self._validator_enabled and self._pending_prefix is None
def ShortBranchName(branch):
"""Convert a name like 'refs/heads/foo' to just 'foo'."""
return branch.replace('refs/heads/', '', 1)
def GetCurrentBranchRef():
"""Returns branch ref (e.g., refs/heads/master) or None."""
return RunGit(['symbolic-ref', 'HEAD'],
stderr=subprocess2.VOID, error_ok=True).strip() or None
def GetCurrentBranch():
"""Returns current branch or None.
For refs/heads/* branches, returns just last part. For others, full ref.
"""
branchref = GetCurrentBranchRef()
if branchref:
return ShortBranchName(branchref)
return None
class _CQState(object):
"""Enum for states of CL with respect to Commit Queue."""
NONE = 'none'
DRY_RUN = 'dry_run'
COMMIT = 'commit'
ALL_STATES = [NONE, DRY_RUN, COMMIT]
class _ParsedIssueNumberArgument(object):
def __init__(self, issue=None, patchset=None, hostname=None):
self.issue = issue
self.patchset = patchset
self.hostname = hostname
@property
def valid(self):
return self.issue is not None
def ParseIssueNumberArgument(arg):
"""Parses the issue argument and returns _ParsedIssueNumberArgument."""
fail_result = _ParsedIssueNumberArgument()
if arg.isdigit():
return _ParsedIssueNumberArgument(issue=int(arg))
if not arg.startswith('http'):
return fail_result
url = gclient_utils.UpgradeToHttps(arg)
try:
parsed_url = urlparse.urlparse(url)
except ValueError:
return fail_result
for cls in _CODEREVIEW_IMPLEMENTATIONS.itervalues():
tmp = cls.ParseIssueURL(parsed_url)
if tmp is not None:
return tmp
return fail_result
class GerritChangeNotExists(Exception):
def __init__(self, issue, url):
self.issue = issue
self.url = url
super(GerritChangeNotExists, self).__init__()
def __str__(self):
return 'change %s at %s does not exist or you have no access to it' % (
self.issue, self.url)
class Changelist(object):
"""Changelist works with one changelist in local branch.
Supports two codereview backends: Rietveld or Gerrit, selected at object
creation.
Notes:
* Not safe for concurrent multi-{thread,process} use.
* Caches values from current branch. Therefore, re-use after branch change
with great care.
"""
def __init__(self, branchref=None, issue=None, codereview=None, **kwargs):
"""Create a new ChangeList instance.
If issue is given, the codereview must be given too.
If `codereview` is given, it must be 'rietveld' or 'gerrit'.
Otherwise, it's decided based on current configuration of the local branch,
with default being 'rietveld' for backwards compatibility.
See _load_codereview_impl for more details.
**kwargs will be passed directly to codereview implementation.
"""
# Poke settings so we get the "configure your server" message if necessary.
global settings
if not settings:
# Happens when git_cl.py is used as a utility library.
settings = Settings()
if issue:
assert codereview, 'codereview must be known, if issue is known'
self.branchref = branchref
if self.branchref:
assert branchref.startswith('refs/heads/')
self.branch = ShortBranchName(self.branchref)
else:
self.branch = None
self.upstream_branch = None
self.lookedup_issue = False
self.issue = issue or None
self.has_description = False
self.description = None
self.lookedup_patchset = False
self.patchset = None
self.cc = None
self.watchers = ()
self._remote = None
self._codereview_impl = None
self._codereview = None
self._load_codereview_impl(codereview, **kwargs)
assert self._codereview_impl
assert self._codereview in _CODEREVIEW_IMPLEMENTATIONS
def _load_codereview_impl(self, codereview=None, **kwargs):
if codereview:
assert codereview in _CODEREVIEW_IMPLEMENTATIONS
cls = _CODEREVIEW_IMPLEMENTATIONS[codereview]
self._codereview = codereview
self._codereview_impl = cls(self, **kwargs)
return
# Automatic selection based on issue number set for a current branch.
# Rietveld takes precedence over Gerrit.
assert not self.issue
# Whether we find issue or not, we are doing the lookup.
self.lookedup_issue = True
if self.GetBranch():
for codereview, cls in _CODEREVIEW_IMPLEMENTATIONS.iteritems():
issue = _git_get_branch_config_value(
cls.IssueConfigKey(), value_type=int, branch=self.GetBranch())
if issue:
self._codereview = codereview
self._codereview_impl = cls(self, **kwargs)
self.issue = int(issue)
return
# No issue is set for this branch, so decide based on repo-wide settings.
return self._load_codereview_impl(
codereview='gerrit' if settings.GetIsGerrit() else 'rietveld',
**kwargs)
def IsGerrit(self):
return self._codereview == 'gerrit'
def GetCCList(self):
"""Return the users cc'd on this CL.
Return is a string suitable for passing to git cl with the --cc flag.
"""
if self.cc is None:
base_cc = settings.GetDefaultCCList()
more_cc = ','.join(self.watchers)
self.cc = ','.join(filter(None, (base_cc, more_cc))) or ''
return self.cc
def GetCCListWithoutDefault(self):
"""Return the users cc'd on this CL excluding default ones."""
if self.cc is None:
self.cc = ','.join(self.watchers)
return self.cc
def SetWatchers(self, watchers):
"""Set the list of email addresses that should be cc'd based on the changed
files in this CL.
"""
self.watchers = watchers
def GetBranch(self):
"""Returns the short branch name, e.g. 'master'."""
if not self.branch:
branchref = GetCurrentBranchRef()
if not branchref:
return None
self.branchref = branchref
self.branch = ShortBranchName(self.branchref)
return self.branch
def GetBranchRef(self):
"""Returns the full branch name, e.g. 'refs/heads/master'."""
self.GetBranch() # Poke the lazy loader.
return self.branchref
def ClearBranch(self):
"""Clears cached branch data of this object."""
self.branch = self.branchref = None
def _GitGetBranchConfigValue(self, key, default=None, **kwargs):
assert 'branch' not in kwargs, 'this CL branch is used automatically'
kwargs['branch'] = self.GetBranch()
return _git_get_branch_config_value(key, default, **kwargs)
def _GitSetBranchConfigValue(self, key, value, **kwargs):
assert 'branch' not in kwargs, 'this CL branch is used automatically'
assert self.GetBranch(), (
'this CL must have an associated branch to %sset %s%s' %
('un' if value is None else '',
key,
'' if value is None else ' to %r' % value))
kwargs['branch'] = self.GetBranch()
return _git_set_branch_config_value(key, value, **kwargs)
@staticmethod
def FetchUpstreamTuple(branch):
"""Returns a tuple containing remote and remote ref,
e.g. 'origin', 'refs/heads/master'
"""
remote = '.'
upstream_branch = _git_get_branch_config_value('merge', branch=branch)
if upstream_branch:
remote = _git_get_branch_config_value('remote', branch=branch)
else:
upstream_branch = RunGit(['config', 'rietveld.upstream-branch'],
error_ok=True).strip()
if upstream_branch:
remote = RunGit(['config', 'rietveld.upstream-remote']).strip()
else:
# Else, try to guess the origin remote.
remote_branches = RunGit(['branch', '-r']).split()
if 'origin/master' in remote_branches:
# Fall back on origin/master if it exits.
remote = 'origin'
upstream_branch = 'refs/heads/master'
else:
DieWithError(
'Unable to determine default branch to diff against.\n'
'Either pass complete "git diff"-style arguments, like\n'
' git cl upload origin/master\n'
'or verify this branch is set up to track another \n'
'(via the --track argument to "git checkout -b ...").')
return remote, upstream_branch
def GetCommonAncestorWithUpstream(self):
upstream_branch = self.GetUpstreamBranch()
if not BranchExists(upstream_branch):
DieWithError('The upstream for the current branch (%s) does not exist '
'anymore.\nPlease fix it and try again.' % self.GetBranch())
return git_common.get_or_create_merge_base(self.GetBranch(),
upstream_branch)
def GetUpstreamBranch(self):
if self.upstream_branch is None:
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
if remote is not '.':
upstream_branch = upstream_branch.replace('refs/heads/',
'refs/remotes/%s/' % remote)
upstream_branch = upstream_branch.replace('refs/branch-heads/',
'refs/remotes/branch-heads/')
self.upstream_branch = upstream_branch
return self.upstream_branch
def GetRemoteBranch(self):
if not self._remote:
remote, branch = None, self.GetBranch()
seen_branches = set()
while branch not in seen_branches:
seen_branches.add(branch)
remote, branch = self.FetchUpstreamTuple(branch)
branch = ShortBranchName(branch)
if remote != '.' or branch.startswith('refs/remotes'):
break
else:
remotes = RunGit(['remote'], error_ok=True).split()
if len(remotes) == 1:
remote, = remotes
elif 'origin' in remotes:
remote = 'origin'
logging.warn('Could not determine which remote this change is '
'associated with, so defaulting to "%s".' % self._remote)
else:
logging.warn('Could not determine which remote this change is '
'associated with.')
branch = 'HEAD'
if branch.startswith('refs/remotes'):
self._remote = (remote, branch)
elif branch.startswith('refs/branch-heads/'):
self._remote = (remote, branch.replace('refs/', 'refs/remotes/'))
else:
self._remote = (remote, 'refs/remotes/%s/%s' % (remote, branch))
return self._remote
def GitSanityChecks(self, upstream_git_obj):
"""Checks git repo status and ensures diff is from local commits."""
if upstream_git_obj is None:
if self.GetBranch() is None:
print('ERROR: unable to determine current branch (detached HEAD?)',
file=sys.stderr)
else:
print('ERROR: no upstream branch', file=sys.stderr)
return False
# Verify the commit we're diffing against is in our current branch.
upstream_sha = RunGit(['rev-parse', '--verify', upstream_git_obj]).strip()
common_ancestor = RunGit(['merge-base', upstream_sha, 'HEAD']).strip()
if upstream_sha != common_ancestor:
print('ERROR: %s is not in the current branch. You may need to rebase '
'your tracking branch' % upstream_sha, file=sys.stderr)
return False
# List the commits inside the diff, and verify they are all local.
commits_in_diff = RunGit(
['rev-list', '^%s' % upstream_sha, 'HEAD']).splitlines()
code, remote_branch = RunGitWithCode(['config', 'gitcl.remotebranch'])
remote_branch = remote_branch.strip()
if code != 0:
_, remote_branch = self.GetRemoteBranch()
commits_in_remote = RunGit(
['rev-list', '^%s' % upstream_sha, remote_branch]).splitlines()
common_commits = set(commits_in_diff) & set(commits_in_remote)
if common_commits:
print('ERROR: Your diff contains %d commits already in %s.\n'
'Run "git log --oneline %s..HEAD" to get a list of commits in '
'the diff. If you are using a custom git flow, you can override'
' the reference used for this check with "git config '
'gitcl.remotebranch <git-ref>".' % (
len(common_commits), remote_branch, upstream_git_obj),
file=sys.stderr)
return False
return True
def GetGitBaseUrlFromConfig(self):
"""Return the configured base URL from branch.<branchname>.baseurl.
Returns None if it is not set.
"""
return self._GitGetBranchConfigValue('base-url')
def GetRemoteUrl(self):
"""Return the configured remote URL, e.g. 'git://example.org/foo.git/'.
Returns None if there is no remote.
"""
remote, _ = self.GetRemoteBranch()
url = RunGit(['config', 'remote.%s.url' % remote], error_ok=True).strip()
# If URL is pointing to a local directory, it is probably a git cache.
if os.path.isdir(url):
url = RunGit(['config', 'remote.%s.url' % remote],
error_ok=True,
cwd=url).strip()
return url
def GetIssue(self):
"""Returns the issue number as a int or None if not set."""
if self.issue is None and not self.lookedup_issue:
self.issue = self._GitGetBranchConfigValue(
self._codereview_impl.IssueConfigKey(), value_type=int)
self.lookedup_issue = True
return self.issue
def GetIssueURL(self):
"""Get the URL for a particular issue."""
issue = self.GetIssue()
if not issue:
return None
return '%s/%s' % (self._codereview_impl.GetCodereviewServer(), issue)
def GetDescription(self, pretty=False):
if not self.has_description:
if self.GetIssue():
self.description = self._codereview_impl.FetchDescription()
self.has_description = True
if pretty:
# Set width to 72 columns + 2 space indent.
wrapper = textwrap.TextWrapper(width=74, replace_whitespace=True)
wrapper.initial_indent = wrapper.subsequent_indent = ' '
lines = self.description.splitlines()
return '\n'.join([wrapper.fill(line) for line in lines])
return self.description
def GetPatchset(self):
"""Returns the patchset number as a int or None if not set."""
if self.patchset is None and not self.lookedup_patchset:
self.patchset = self._GitGetBranchConfigValue(
self._codereview_impl.PatchsetConfigKey(), value_type=int)
self.lookedup_patchset = True
return self.patchset
def SetPatchset(self, patchset):
"""Set this branch's patchset. If patchset=0, clears the patchset."""
assert self.GetBranch()
if not patchset:
self.patchset = None
else:
self.patchset = int(patchset)
self._GitSetBranchConfigValue(
self._codereview_impl.PatchsetConfigKey(), self.patchset)
def SetIssue(self, issue=None):
"""Set this branch's issue. If issue isn't given, clears the issue."""
assert self.GetBranch()
if issue:
issue = int(issue)
self._GitSetBranchConfigValue(
self._codereview_impl.IssueConfigKey(), issue)
self.issue = issue
codereview_server = self._codereview_impl.GetCodereviewServer()
if codereview_server:
self._GitSetBranchConfigValue(
self._codereview_impl.CodereviewServerConfigKey(),
codereview_server)
else:
# Reset all of these just to be clean.
reset_suffixes = [
'last-upload-hash',
self._codereview_impl.IssueConfigKey(),
self._codereview_impl.PatchsetConfigKey(),
self._codereview_impl.CodereviewServerConfigKey(),
] + self._PostUnsetIssueProperties()
for prop in reset_suffixes:
self._GitSetBranchConfigValue(prop, None, error_ok=True)
self.issue = None
self.patchset = None
def GetChange(self, upstream_branch, author, local_description=False):
if not self.GitSanityChecks(upstream_branch):
DieWithError('\nGit sanity check failure')
root = settings.GetRelativeRoot()
if not root:
root = '.'
absroot = os.path.abspath(root)
# We use the sha1 of HEAD as a name of this change.
name = RunGitWithCode(['rev-parse', 'HEAD'])[1].strip()
# Need to pass a relative path for msysgit.
try:
files = scm.GIT.CaptureStatus([root], '.', upstream_branch)
except subprocess2.CalledProcessError:
DieWithError(
('\nFailed to diff against upstream branch %s\n\n'
'This branch probably doesn\'t exist anymore. To reset the\n'
'tracking branch, please run\n'
' git branch --set-upstream-to origin/master %s\n'
'or replace origin/master with the relevant branch') %
(upstream_branch, self.GetBranch()))
issue = self.GetIssue()
patchset = self.GetPatchset()
if issue and not local_description:
description = self.GetDescription()
else:
# If the change was never uploaded, use the log messages of all commits
# up to the branch point, as git cl upload will prefill the description
# with these log messages.
args = ['log', '--pretty=format:%s%n%n%b', '%s...' % (upstream_branch)]
description = RunGitWithCode(args)[1].strip()
if not author:
author = RunGit(['config', 'user.email']).strip() or None
return presubmit_support.GitChange(
name,
description,
absroot,
files,
issue,
patchset,
author,
upstream=upstream_branch)
def UpdateDescription(self, description, force=False):
self.description = description
return self._codereview_impl.UpdateDescriptionRemote(
description, force=force)
def RunHook(self, committing, may_prompt, verbose, change):
"""Calls sys.exit() if the hook fails; returns a HookResults otherwise."""
try:
return presubmit_support.DoPresubmitChecks(change, committing,
verbose=verbose, output_stream=sys.stdout, input_stream=sys.stdin,
default_presubmit=None, may_prompt=may_prompt,
rietveld_obj=self._codereview_impl.GetRieveldObjForPresubmit(),
gerrit_obj=self._codereview_impl.GetGerritObjForPresubmit())
except presubmit_support.PresubmitFailure as e:
DieWithError(
('%s\nMaybe your depot_tools is out of date?\n'
'If all fails, contact maruel@') % e)
def CMDPatchIssue(self, issue_arg, reject, nocommit, directory):
"""Fetches and applies the issue patch from codereview to local branch."""
if isinstance(issue_arg, (int, long)) or issue_arg.isdigit():
parsed_issue_arg = _ParsedIssueNumberArgument(int(issue_arg))
else:
# Assume url.
parsed_issue_arg = self._codereview_impl.ParseIssueURL(
urlparse.urlparse(issue_arg))
if not parsed_issue_arg or not parsed_issue_arg.valid:
DieWithError('Failed to parse issue argument "%s". '
'Must be an issue number or a valid URL.' % issue_arg)
return self._codereview_impl.CMDPatchWithParsedIssue(
parsed_issue_arg, reject, nocommit, directory)
def CMDUpload(self, options, git_diff_args, orig_args):
"""Uploads a change to codereview."""
if git_diff_args:
# TODO(ukai): is it ok for gerrit case?
base_branch = git_diff_args[0]
else:
if self.GetBranch() is None:
DieWithError('Can\'t upload from detached HEAD state. Get on a branch!')
# Default to diffing against common ancestor of upstream branch
base_branch = self.GetCommonAncestorWithUpstream()
git_diff_args = [base_branch, 'HEAD']
# Make sure authenticated to codereview before running potentially expensive
# hooks. It is a fast, best efforts check. Codereview still can reject the
# authentication during the actual upload.
self._codereview_impl.EnsureAuthenticated(force=options.force)
# Apply watchlists on upload.
change = self.GetChange(base_branch, None)
watchlist = watchlists.Watchlists(change.RepositoryRoot())
files = [f.LocalPath() for f in change.AffectedFiles()]
if not options.bypass_watchlists:
self.SetWatchers(watchlist.GetWatchersForPaths(files))
if not options.bypass_hooks:
if options.reviewers or options.tbr_owners:
# Set the reviewer list now so that presubmit checks can access it.
change_description = ChangeDescription(change.FullDescriptionText())
change_description.update_reviewers(options.reviewers,
options.tbr_owners,
change)
change.SetDescriptionText(change_description.description)
hook_results = self.RunHook(committing=False,
may_prompt=not options.force,
verbose=options.verbose,
change=change)
if not hook_results.should_continue():
return 1
if not options.reviewers and hook_results.reviewers:
options.reviewers = hook_results.reviewers.split(',')
# TODO(tandrii): Checking local patchset against remote patchset is only
# supported for Rietveld. Extend it to Gerrit or remove it completely.
if self.GetIssue() and not self.IsGerrit():
latest_patchset = self.GetMostRecentPatchset()
local_patchset = self.GetPatchset()
if (latest_patchset and local_patchset and
local_patchset != latest_patchset):
print('The last upload made from this repository was patchset #%d but '
'the most recent patchset on the server is #%d.'
% (local_patchset, latest_patchset))
print('Uploading will still work, but if you\'ve uploaded to this '
'issue from another machine or branch the patch you\'re '
'uploading now might not include those changes.')
ask_for_data('About to upload; enter to confirm.')
print_stats(options.similarity, options.find_copies, git_diff_args)
ret = self.CMDUploadChange(options, git_diff_args, change)
if not ret:
if options.use_commit_queue:
self.SetCQState(_CQState.COMMIT)
elif options.cq_dry_run:
self.SetCQState(_CQState.DRY_RUN)
_git_set_branch_config_value('last-upload-hash',
RunGit(['rev-parse', 'HEAD']).strip())
# Run post upload hooks, if specified.
if settings.GetRunPostUploadHook():
presubmit_support.DoPostUploadExecuter(
change,
self,
settings.GetRoot(),
options.verbose,
sys.stdout)
# Upload all dependencies if specified.
if options.dependencies:
print()
print('--dependencies has been specified.')
print('All dependent local branches will be re-uploaded.')
print()
# Remove the dependencies flag from args so that we do not end up in a
# loop.
orig_args.remove('--dependencies')
ret = upload_branch_deps(self, orig_args)
return ret
def SetCQState(self, new_state):
"""Update the CQ state for latest patchset.
Issue must have been already uploaded and known.
"""
assert new_state in _CQState.ALL_STATES
assert self.GetIssue()
return self._codereview_impl.SetCQState(new_state)
def TriggerDryRun(self):
"""Triggers a dry run and prints a warning on failure."""
# TODO(qyearsley): Either re-use this method in CMDset_commit
# and CMDupload, or change CMDtry to trigger dry runs with
# just SetCQState, and catch keyboard interrupt and other
# errors in that method.
try:
self.SetCQState(_CQState.DRY_RUN)
print('scheduled CQ Dry Run on %s' % self.GetIssueURL())
return 0
except KeyboardInterrupt:
raise
except:
print('WARNING: failed to trigger CQ Dry Run.\n'
'Either:\n'
' * your project has no CQ\n'
' * you don\'t have permission to trigger Dry Run\n'
' * bug in this code (see stack trace below).\n'
'Consider specifying which bots to trigger manually '
'or asking your project owners for permissions '
'or contacting Chrome Infrastructure team at '
'https://www.chromium.org/infra\n\n')
# Still raise exception so that stack trace is printed.
raise
# Forward methods to codereview specific implementation.
def CloseIssue(self):
return self._codereview_impl.CloseIssue()
def GetStatus(self):
return self._codereview_impl.GetStatus()
def GetCodereviewServer(self):
return self._codereview_impl.GetCodereviewServer()
def GetIssueOwner(self):
"""Get owner from codereview, which may differ from this checkout."""
return self._codereview_impl.GetIssueOwner()
def GetApprovingReviewers(self):
return self._codereview_impl.GetApprovingReviewers()
def GetMostRecentPatchset(self):
return self._codereview_impl.GetMostRecentPatchset()
def CannotTriggerTryJobReason(self):
"""Returns reason (str) if unable trigger tryjobs on this CL or None."""
return self._codereview_impl.CannotTriggerTryJobReason()
def GetTryjobProperties(self, patchset=None):
"""Returns dictionary of properties to launch tryjob."""
return self._codereview_impl.GetTryjobProperties(patchset=patchset)
def __getattr__(self, attr):
# This is because lots of untested code accesses Rietveld-specific stuff
# directly, and it's hard to fix for sure. So, just let it work, and fix
# on a case by case basis.
# Note that child method defines __getattr__ as well, and forwards it here,
# because _RietveldChangelistImpl is not cleaned up yet, and given
# deprecation of Rietveld, it should probably be just removed.
# Until that time, avoid infinite recursion by bypassing __getattr__
# of implementation class.
return self._codereview_impl.__getattribute__(attr)
class _ChangelistCodereviewBase(object):
"""Abstract base class encapsulating codereview specifics of a changelist."""
def __init__(self, changelist):
self._changelist = changelist # instance of Changelist
def __getattr__(self, attr):
# Forward methods to changelist.
# TODO(tandrii): maybe clean up _GerritChangelistImpl and
# _RietveldChangelistImpl to avoid this hack?
return getattr(self._changelist, attr)
def GetStatus(self):
"""Apply a rough heuristic to give a simple summary of an issue's review
or CQ status, assuming adherence to a common workflow.
Returns None if no issue for this branch, or specific string keywords.
"""
raise NotImplementedError()
def GetCodereviewServer(self):
"""Returns server URL without end slash, like "https://codereview.com"."""
raise NotImplementedError()
def FetchDescription(self):
"""Fetches and returns description from the codereview server."""
raise NotImplementedError()
@classmethod
def IssueConfigKey(cls):
"""Returns branch setting storing issue number."""
raise NotImplementedError()
@classmethod
def PatchsetConfigKey(cls):
"""Returns branch setting storing patchset number."""
raise NotImplementedError()
@classmethod
def CodereviewServerConfigKey(cls):
"""Returns branch setting storing codereview server."""
raise NotImplementedError()
def _PostUnsetIssueProperties(self):
"""Which branch-specific properties to erase when unsettin issue."""
return []
def GetRieveldObjForPresubmit(self):
# This is an unfortunate Rietveld-embeddedness in presubmit.
# For non-Rietveld codereviews, this probably should return a dummy object.
raise NotImplementedError()
def GetGerritObjForPresubmit(self):
# None is valid return value, otherwise presubmit_support.GerritAccessor.
return None
def UpdateDescriptionRemote(self, description, force=False):
"""Update the description on codereview site."""
raise NotImplementedError()
def CloseIssue(self):
"""Closes the issue."""
raise NotImplementedError()
def GetApprovingReviewers(self):
"""Returns a list of reviewers approving the change.
Note: not necessarily committers.
"""
raise NotImplementedError()
def GetMostRecentPatchset(self):
"""Returns the most recent patchset number from the codereview site."""
raise NotImplementedError()
def CMDPatchWithParsedIssue(self, parsed_issue_arg, reject, nocommit,
directory):
"""Fetches and applies the issue.
Arguments:
parsed_issue_arg: instance of _ParsedIssueNumberArgument.
reject: if True, reject the failed patch instead of switching to 3-way
merge. Rietveld only.
nocommit: do not commit the patch, thus leave the tree dirty. Rietveld
only.
directory: switch to directory before applying the patch. Rietveld only.
"""
raise NotImplementedError()
@staticmethod
def ParseIssueURL(parsed_url):
"""Parses url and returns instance of _ParsedIssueNumberArgument or None if
failed."""
raise NotImplementedError()
def EnsureAuthenticated(self, force):
"""Best effort check that user is authenticated with codereview server.
Arguments:
force: whether to skip confirmation questions.
"""
raise NotImplementedError()
def CMDUploadChange(self, options, args, change):
"""Uploads a change to codereview."""
raise NotImplementedError()
def SetCQState(self, new_state):
"""Update the CQ state for latest patchset.
Issue must have been already uploaded and known.
"""
raise NotImplementedError()
def CannotTriggerTryJobReason(self):
"""Returns reason (str) if unable trigger tryjobs on this CL or None."""
raise NotImplementedError()
def GetIssueOwner(self):
raise NotImplementedError()
def GetTryjobProperties(self, patchset=None):
raise NotImplementedError()
class _RietveldChangelistImpl(_ChangelistCodereviewBase):
def __init__(self, changelist, auth_config=None, rietveld_server=None):
super(_RietveldChangelistImpl, self).__init__(changelist)
assert settings, 'must be initialized in _ChangelistCodereviewBase'
if not rietveld_server:
settings.GetDefaultServerUrl()
self._rietveld_server = rietveld_server
self._auth_config = auth_config
self._props = None
self._rpc_server = None
def GetCodereviewServer(self):
if not self._rietveld_server:
# If we're on a branch then get the server potentially associated
# with that branch.
if self.GetIssue():
self._rietveld_server = gclient_utils.UpgradeToHttps(
self._GitGetBranchConfigValue(self.CodereviewServerConfigKey()))
if not self._rietveld_server:
self._rietveld_server = settings.GetDefaultServerUrl()
return self._rietveld_server
def EnsureAuthenticated(self, force):
"""Best effort check that user is authenticated with Rietveld server."""
if self._auth_config.use_oauth2:
authenticator = auth.get_authenticator_for_host(
self.GetCodereviewServer(), self._auth_config)
if not authenticator.has_cached_credentials():
raise auth.LoginRequiredError(self.GetCodereviewServer())
def FetchDescription(self):
issue = self.GetIssue()
assert issue
try:
return self.RpcServer().get_description(issue).strip()
except urllib2.HTTPError as e:
if e.code == 404:
DieWithError(
('\nWhile fetching the description for issue %d, received a '
'404 (not found)\n'
'error. It is likely that you deleted this '
'issue on the server. If this is the\n'
'case, please run\n\n'
' git cl issue 0\n\n'
'to clear the association with the deleted issue. Then run '
'this command again.') % issue)
else:
DieWithError(
'\nFailed to fetch issue description. HTTP error %d' % e.code)
except urllib2.URLError as e:
print('Warning: Failed to retrieve CL description due to network '
'failure.', file=sys.stderr)
return ''
def GetMostRecentPatchset(self):
return self.GetIssueProperties()['patchsets'][-1]
def GetIssueProperties(self):
if self._props is None:
issue = self.GetIssue()
if not issue:
self._props = {}
else:
self._props = self.RpcServer().get_issue_properties(issue, True)
return self._props
def CannotTriggerTryJobReason(self):
props = self.GetIssueProperties()
if not props:
return 'Rietveld doesn\'t know about your issue %s' % self.GetIssue()
if props.get('closed'):
return 'CL %s is closed' % self.GetIssue()
if props.get('private'):
return 'CL %s is private' % self.GetIssue()
return None
def GetTryjobProperties(self, patchset=None):
"""Returns dictionary of properties to launch tryjob."""
project = (self.GetIssueProperties() or {}).get('project')
return {
'issue': self.GetIssue(),
'patch_project': project,
'patch_storage': 'rietveld',
'patchset': patchset or self.GetPatchset(),
'rietveld': self.GetCodereviewServer(),
}
def GetApprovingReviewers(self):
return get_approving_reviewers(self.GetIssueProperties())
def GetIssueOwner(self):
return (self.GetIssueProperties() or {}).get('owner_email')
def AddComment(self, message):
return self.RpcServer().add_comment(self.GetIssue(), message)
def GetStatus(self):
"""Apply a rough heuristic to give a simple summary of an issue's review
or CQ status, assuming adherence to a common workflow.
Returns None if no issue for this branch, or one of the following keywords:
* 'error' - error from review tool (including deleted issues)
* 'unsent' - not sent for review
* 'waiting' - waiting for review
* 'reply' - waiting for owner to reply to review
* 'lgtm' - LGTM from at least one approved reviewer
* 'commit' - in the commit queue
* 'closed' - closed
"""
if not self.GetIssue():
return None
try:
props = self.GetIssueProperties()
except urllib2.HTTPError:
return 'error'
if props.get('closed'):
# Issue is closed.
return 'closed'
if props.get('commit') and not props.get('cq_dry_run', False):
# Issue is in the commit queue.
return 'commit'
try:
reviewers = self.GetApprovingReviewers()
except urllib2.HTTPError:
return 'error'
if reviewers:
# Was LGTM'ed.
return 'lgtm'
messages = props.get('messages') or []
# Skip CQ messages that don't require owner's action.
while messages and messages[-1]['sender'] == COMMIT_BOT_EMAIL:
if 'Dry run:' in messages[-1]['text']:
messages.pop()
elif 'The CQ bit was unchecked' in messages[-1]['text']:
# This message always follows prior messages from CQ,
# so skip this too.
messages.pop()
else:
# This is probably a CQ messages warranting user attention.
break
if not messages:
# No message was sent.
return 'unsent'
if messages[-1]['sender'] != props.get('owner_email'):
# Non-LGTM reply from non-owner and not CQ bot.
return 'reply'
return 'waiting'
def UpdateDescriptionRemote(self, description, force=False):
return self.RpcServer().update_description(
self.GetIssue(), self.description)
def CloseIssue(self):
return self.RpcServer().close_issue(self.GetIssue())
def SetFlag(self, flag, value):
return self.SetFlags({flag: value})
def SetFlags(self, flags):
"""Sets flags on this CL/patchset in Rietveld.
"""
patchset = self.GetPatchset() or self.GetMostRecentPatchset()
try:
return self.RpcServer().set_flags(
self.GetIssue(), patchset, flags)
except urllib2.HTTPError as e:
if e.code == 404:
DieWithError('The issue %s doesn\'t exist.' % self.GetIssue())
if e.code == 403:
DieWithError(
('Access denied to issue %s. Maybe the patchset %s doesn\'t '
'match?') % (self.GetIssue(), patchset))
raise
def RpcServer(self):
"""Returns an upload.RpcServer() to access this review's rietveld instance.
"""
if not self._rpc_server:
self._rpc_server = rietveld.CachingRietveld(
self.GetCodereviewServer(),
self._auth_config or auth.make_auth_config())
return self._rpc_server
@classmethod
def IssueConfigKey(cls):
return 'rietveldissue'
@classmethod
def PatchsetConfigKey(cls):
return 'rietveldpatchset'
@classmethod
def CodereviewServerConfigKey(cls):
return 'rietveldserver'
def GetRieveldObjForPresubmit(self):
return self.RpcServer()
def SetCQState(self, new_state):
props = self.GetIssueProperties()
if props.get('private'):
DieWithError('Cannot set-commit on private issue')
if new_state == _CQState.COMMIT:
self.SetFlags({'commit': '1', 'cq_dry_run': '0'})
elif new_state == _CQState.NONE:
self.SetFlags({'commit': '0', 'cq_dry_run': '0'})
else:
assert new_state == _CQState.DRY_RUN
self.SetFlags({'commit': '1', 'cq_dry_run': '1'})
def CMDPatchWithParsedIssue(self, parsed_issue_arg, reject, nocommit,
directory):
# PatchIssue should never be called with a dirty tree. It is up to the
# caller to check this, but just in case we assert here since the
# consequences of the caller not checking this could be dire.
assert(not git_common.is_dirty_git_tree('apply'))
assert(parsed_issue_arg.valid)
self._changelist.issue = parsed_issue_arg.issue
if parsed_issue_arg.hostname:
self._rietveld_server = 'https://%s' % parsed_issue_arg.hostname
patchset = parsed_issue_arg.patchset or self.GetMostRecentPatchset()
patchset_object = self.RpcServer().get_patch(self.GetIssue(), patchset)
scm_obj = checkout.GitCheckout(settings.GetRoot(), None, None, None, None)
try:
scm_obj.apply_patch(patchset_object)
except Exception as e:
print(str(e))
return 1
# If we had an issue, commit the current state and register the issue.
if not nocommit:
RunGit(['commit', '-m', (self.GetDescription() + '\n\n' +
'patch from issue %(i)s at patchset '
'%(p)s (http://crrev.com/%(i)s#ps%(p)s)'
% {'i': self.GetIssue(), 'p': patchset})])
self.SetIssue(self.GetIssue())
self.SetPatchset(patchset)
print('Committed patch locally.')
else:
print('Patch applied to index.')
return 0
@staticmethod
def ParseIssueURL(parsed_url):
if not parsed_url.scheme or not parsed_url.scheme.startswith('http'):
return None
# Rietveld patch: https://domain/<number>/#ps<patchset>
match = re.match(r'/(\d+)/$', parsed_url.path)
match2 = re.match(r'ps(\d+)$', parsed_url.fragment)
if match and match2:
return _ParsedIssueNumberArgument(
issue=int(match.group(1)),
patchset=int(match2.group(1)),
hostname=parsed_url.netloc)
# Typical url: https://domain/<issue_number>[/[other]]
match = re.match('/(\d+)(/.*)?$', parsed_url.path)
if match:
return _ParsedIssueNumberArgument(
issue=int(match.group(1)),
hostname=parsed_url.netloc)
# Rietveld patch: https://domain/download/issue<number>_<patchset>.diff
match = re.match(r'/download/issue(\d+)_(\d+).diff$', parsed_url.path)
if match:
return _ParsedIssueNumberArgument(
issue=int(match.group(1)),
patchset=int(match.group(2)),
hostname=parsed_url.netloc)
return None
def CMDUploadChange(self, options, args, change):
"""Upload the patch to Rietveld."""
upload_args = ['--assume_yes'] # Don't ask about untracked files.
upload_args.extend(['--server', self.GetCodereviewServer()])
upload_args.extend(auth.auth_config_to_command_options(self._auth_config))
if options.emulate_svn_auto_props:
upload_args.append('--emulate_svn_auto_props')
change_desc = None
if options.email is not None:
upload_args.extend(['--email', options.email])
if self.GetIssue():
if options.title is not None:
upload_args.extend(['--title', options.title])
if options.message:
upload_args.extend(['--message', options.message])
upload_args.extend(['--issue', str(self.GetIssue())])
print('This branch is associated with issue %s. '
'Adding patch to that issue.' % self.GetIssue())
else:
if options.title is not None:
upload_args.extend(['--title', options.title])
if options.message:
message = options.message
else:
message = CreateDescriptionFromLog(args)
if options.title:
message = options.title + '\n\n' + message
change_desc = ChangeDescription(message)
if options.reviewers or options.tbr_owners:
change_desc.update_reviewers(options.reviewers,
options.tbr_owners,
change)
if not options.force:
change_desc.prompt(bug=options.bug)
if not change_desc.description:
print('Description is empty; aborting.')
return 1
upload_args.extend(['--message', change_desc.description])
if change_desc.get_reviewers():
upload_args.append('--reviewers=%s' % ','.join(
change_desc.get_reviewers()))
if options.send_mail:
if not change_desc.get_reviewers():
DieWithError("Must specify reviewers to send email.")
upload_args.append('--send_mail')
# We check this before applying rietveld.private assuming that in
# rietveld.cc only addresses which we can send private CLs to are listed
# if rietveld.private is set, and so we should ignore rietveld.cc only
# when --private is specified explicitly on the command line.
if options.private:
logging.warn('rietveld.cc is ignored since private flag is specified. '
'You need to review and add them manually if necessary.')
cc = self.GetCCListWithoutDefault()
else:
cc = self.GetCCList()
cc = ','.join(filter(None, (cc, ','.join(options.cc))))
if change_desc.get_cced():
cc = ','.join(filter(None, (cc, ','.join(change_desc.get_cced()))))
if cc:
upload_args.extend(['--cc', cc])
if options.private or settings.GetDefaultPrivateFlag() == "True":
upload_args.append('--private')
upload_args.extend(['--git_similarity', str(options.similarity)])
if not options.find_copies:
upload_args.extend(['--git_no_find_copies'])
# Include the upstream repo's URL in the change -- this is useful for
# projects that have their source spread across multiple repos.
remote_url = self.GetGitBaseUrlFromConfig()
if not remote_url:
if self.GetRemoteUrl() and '/' in self.GetUpstreamBranch():
remote_url = '%s@%s' % (self.GetRemoteUrl(),
self.GetUpstreamBranch().split('/')[-1])
if remote_url:
remote, remote_branch = self.GetRemoteBranch()
target_ref = GetTargetRef(remote, remote_branch, options.target_branch,
pending_prefix_check=True,
remote_url=self.GetRemoteUrl())
if target_ref:
upload_args.extend(['--target_ref', target_ref])
# Look for dependent patchsets. See crbug.com/480453 for more details.
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
upstream_branch = ShortBranchName(upstream_branch)
if remote is '.':
# A local branch is being tracked.
local_branch = upstream_branch
if settings.GetIsSkipDependencyUpload(local_branch):
print()
print('Skipping dependency patchset upload because git config '
'branch.%s.skip-deps-uploads is set to True.' % local_branch)
print()
else:
auth_config = auth.extract_auth_config_from_options(options)
branch_cl = Changelist(branchref='refs/heads/'+local_branch,
auth_config=auth_config)
branch_cl_issue_url = branch_cl.GetIssueURL()
branch_cl_issue = branch_cl.GetIssue()
branch_cl_patchset = branch_cl.GetPatchset()
if branch_cl_issue_url and branch_cl_issue and branch_cl_patchset:
upload_args.extend(
['--depends_on_patchset', '%s:%s' % (
branch_cl_issue, branch_cl_patchset)])
print(
'\n'
'The current branch (%s) is tracking a local branch (%s) with '
'an associated CL.\n'
'Adding %s/#ps%s as a dependency patchset.\n'
'\n' % (self.GetBranch(), local_branch, branch_cl_issue_url,
branch_cl_patchset))
project = settings.GetProject()
if project:
upload_args.extend(['--project', project])
try:
upload_args = ['upload'] + upload_args + args
logging.info('upload.RealMain(%s)', upload_args)
issue, patchset = upload.RealMain(upload_args)
issue = int(issue)
patchset = int(patchset)
except KeyboardInterrupt:
sys.exit(1)
except:
# If we got an exception after the user typed a description for their
# change, back up the description before re-raising.
if change_desc:
backup_path = os.path.expanduser(DESCRIPTION_BACKUP_FILE)
print('\nGot exception while uploading -- saving description to %s\n' %
backup_path)
backup_file = open(backup_path, 'w')
backup_file.write(change_desc.description)
backup_file.close()
raise
if not self.GetIssue():
self.SetIssue(issue)
self.SetPatchset(patchset)
return 0
class _GerritChangelistImpl(_ChangelistCodereviewBase):
def __init__(self, changelist, auth_config=None):
# auth_config is Rietveld thing, kept here to preserve interface only.
super(_GerritChangelistImpl, self).__init__(changelist)
self._change_id = None
# Lazily cached values.
self._gerrit_server = None # e.g. https://chromium-review.googlesource.com
self._gerrit_host = None # e.g. chromium-review.googlesource.com
def _GetGerritHost(self):
# Lazy load of configs.
self.GetCodereviewServer()
if self._gerrit_host and '.' not in self._gerrit_host:
# Abbreviated domain like "chromium" instead of chromium.googlesource.com.
# This happens for internal stuff http://crbug.com/614312.
parsed = urlparse.urlparse(self.GetRemoteUrl())
if parsed.scheme == 'sso':
print('WARNING: using non https URLs for remote is likely broken\n'
' Your current remote is: %s' % self.GetRemoteUrl())
self._gerrit_host = '%s.googlesource.com' % self._gerrit_host
self._gerrit_server = 'https://%s' % self._gerrit_host
return self._gerrit_host
def _GetGitHost(self):
"""Returns git host to be used when uploading change to Gerrit."""
return urlparse.urlparse(self.GetRemoteUrl()).netloc
def GetCodereviewServer(self):
if not self._gerrit_server:
# If we're on a branch then get the server potentially associated
# with that branch.
if self.GetIssue():
self._gerrit_server = self._GitGetBranchConfigValue(
self.CodereviewServerConfigKey())
if self._gerrit_server:
self._gerrit_host = urlparse.urlparse(self._gerrit_server).netloc
if not self._gerrit_server:
# We assume repo to be hosted on Gerrit, and hence Gerrit server
# has "-review" suffix for lowest level subdomain.
parts = self._GetGitHost().split('.')
parts[0] = parts[0] + '-review'
self._gerrit_host = '.'.join(parts)
self._gerrit_server = 'https://%s' % self._gerrit_host
return self._gerrit_server
@classmethod
def IssueConfigKey(cls):
return 'gerritissue'
@classmethod
def PatchsetConfigKey(cls):
return 'gerritpatchset'
@classmethod
def CodereviewServerConfigKey(cls):
return 'gerritserver'
def EnsureAuthenticated(self, force):
"""Best effort check that user is authenticated with Gerrit server."""
if settings.GetGerritSkipEnsureAuthenticated():
# For projects with unusual authentication schemes.
# See http://crbug.com/603378.
return
# Lazy-loader to identify Gerrit and Git hosts.
if gerrit_util.GceAuthenticator.is_gce():
return
self.GetCodereviewServer()
git_host = self._GetGitHost()
assert self._gerrit_server and self._gerrit_host
cookie_auth = gerrit_util.CookiesAuthenticator()
gerrit_auth = cookie_auth.get_auth_header(self._gerrit_host)
git_auth = cookie_auth.get_auth_header(git_host)
if gerrit_auth and git_auth:
if gerrit_auth == git_auth:
return
print((
'WARNING: you have different credentials for Gerrit and git hosts.\n'
' Check your %s or %s file for credentials of hosts:\n'
' %s\n'
' %s\n'
' %s') %
(cookie_auth.get_gitcookies_path(), cookie_auth.get_netrc_path(),
git_host, self._gerrit_host,
cookie_auth.get_new_password_message(git_host)))
if not force:
ask_for_data('If you know what you are doing, press Enter to continue, '
'Ctrl+C to abort.')
return
else:
missing = (
[] if gerrit_auth else [self._gerrit_host] +
[] if git_auth else [git_host])
DieWithError('Credentials for the following hosts are required:\n'
' %s\n'
'These are read from %s (or legacy %s)\n'
'%s' % (
'\n '.join(missing),
cookie_auth.get_gitcookies_path(),
cookie_auth.get_netrc_path(),
cookie_auth.get_new_password_message(git_host)))
def _PostUnsetIssueProperties(self):
"""Which branch-specific properties to erase when unsetting issue."""
return ['gerritsquashhash']
def GetRieveldObjForPresubmit(self):
class ThisIsNotRietveldIssue(object):
def __nonzero__(self):
# This is a hack to make presubmit_support think that rietveld is not
# defined, yet still ensure that calls directly result in a decent
# exception message below.
return False
def __getattr__(self, attr):
print(
'You aren\'t using Rietveld at the moment, but Gerrit.\n'
'Using Rietveld in your PRESUBMIT scripts won\'t work.\n'
'Please, either change your PRESUBIT to not use rietveld_obj.%s,\n'
'or use Rietveld for codereview.\n'
'See also http://crbug.com/579160.' % attr)
raise NotImplementedError()
return ThisIsNotRietveldIssue()
def GetGerritObjForPresubmit(self):
return presubmit_support.GerritAccessor(self._GetGerritHost())
def GetStatus(self):
"""Apply a rough heuristic to give a simple summary of an issue's review
or CQ status, assuming adherence to a common workflow.
Returns None if no issue for this branch, or one of the following keywords:
* 'error' - error from review tool (including deleted issues)
* 'unsent' - no reviewers added
* 'waiting' - waiting for review
* 'reply' - waiting for owner to reply to review
* 'not lgtm' - Code-Review disapproval from at least one valid reviewer
* 'lgtm' - Code-Review approval from at least one valid reviewer
* 'commit' - in the commit queue
* 'closed' - abandoned
"""
if not self.GetIssue():
return None
try:
data = self._GetChangeDetail(['DETAILED_LABELS', 'CURRENT_REVISION'])
except (httplib.HTTPException, GerritChangeNotExists):
return 'error'
if data['status'] in ('ABANDONED', 'MERGED'):
return 'closed'
cq_label = data['labels'].get('Commit-Queue', {})
if cq_label:
votes = cq_label.get('all', [])
highest_vote = 0
for v in votes:
highest_vote = max(highest_vote, v.get('value', 0))
vote_value = str(highest_vote)
if vote_value != '0':
# Add a '+' if the value is not 0 to match the values in the label.
# The cq_label does not have negatives.
vote_value = '+' + vote_value
vote_text = cq_label.get('values', {}).get(vote_value, '')
if vote_text.lower() == 'commit':
return 'commit'
lgtm_label = data['labels'].get('Code-Review', {})
if lgtm_label:
if 'rejected' in lgtm_label:
return 'not lgtm'
if 'approved' in lgtm_label:
return 'lgtm'
if not data.get('reviewers', {}).get('REVIEWER', []):
return 'unsent'
messages = data.get('messages', [])
if messages:
owner = data['owner'].get('_account_id')
last_message_author = messages[-1].get('author', {}).get('_account_id')
if owner != last_message_author:
# Some reply from non-owner.
return 'reply'
return 'waiting'
def GetMostRecentPatchset(self):
data = self._GetChangeDetail(['CURRENT_REVISION'])
return data['revisions'][data['current_revision']]['_number']
def FetchDescription(self):
data = self._GetChangeDetail(['CURRENT_REVISION'])
current_rev = data['current_revision']
url = data['revisions'][current_rev]['fetch']['http']['url']
return gerrit_util.GetChangeDescriptionFromGitiles(url, current_rev)
def UpdateDescriptionRemote(self, description, force=False):
if gerrit_util.HasPendingChangeEdit(self._GetGerritHost(), self.GetIssue()):
if not force:
ask_for_data(
'The description cannot be modified while the issue has a pending '
'unpublished edit. Either publish the edit in the Gerrit web UI '
'or delete it.\n\n'
'Press Enter to delete the unpublished edit, Ctrl+C to abort.')
gerrit_util.DeletePendingChangeEdit(self._GetGerritHost(),
self.GetIssue())
gerrit_util.SetCommitMessage(self._GetGerritHost(), self.GetIssue(),
description, notify='NONE')
def CloseIssue(self):
gerrit_util.AbandonChange(self._GetGerritHost(), self.GetIssue(), msg='')
def GetApprovingReviewers(self):
"""Returns a list of reviewers approving the change.
Note: not necessarily committers.
"""
raise NotImplementedError()
def SubmitIssue(self, wait_for_merge=True):
gerrit_util.SubmitChange(self._GetGerritHost(), self.GetIssue(),
wait_for_merge=wait_for_merge)
def _GetChangeDetail(self, options=None, issue=None):
options = options or []
issue = issue or self.GetIssue()
assert issue, 'issue is required to query Gerrit'
try:
data = gerrit_util.GetChangeDetail(self._GetGerritHost(), str(issue),
options, ignore_404=False)
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise GerritChangeNotExists(issue, self.GetCodereviewServer())
raise
return data
def _GetChangeCommit(self, issue=None):
issue = issue or self.GetIssue()
assert issue, 'issue is required to query Gerrit'
data = gerrit_util.GetChangeCommit(self._GetGerritHost(), str(issue))
if not data:
raise GerritChangeNotExists(issue, self.GetCodereviewServer())
return data
def CMDLand(self, force, bypass_hooks, verbose):
if git_common.is_dirty_git_tree('land'):
return 1
detail = self._GetChangeDetail(['CURRENT_REVISION', 'LABELS'])
if u'Commit-Queue' in detail.get('labels', {}):
if not force:
ask_for_data('\nIt seems this repository has a Commit Queue, '
'which can test and land changes for you. '
'Are you sure you wish to bypass it?\n'
'Press Enter to continue, Ctrl+C to abort.')
differs = True
last_upload = self._GitGetBranchConfigValue('gerritsquashhash')
# Note: git diff outputs nothing if there is no diff.
if not last_upload or RunGit(['diff', last_upload]).strip():
print('WARNING: some changes from local branch haven\'t been uploaded')
else:
if detail['current_revision'] == last_upload:
differs = False
else:
print('WARNING: local branch contents differ from latest uploaded '
'patchset')
if differs:
if not force:
ask_for_data(
'Do you want to submit latest Gerrit patchset and bypass hooks?\n'
'Press Enter to continue, Ctrl+C to abort.')
print('WARNING: bypassing hooks and submitting latest uploaded patchset')
elif not bypass_hooks:
hook_results = self.RunHook(
committing=True,
may_prompt=not force,
verbose=verbose,
change=self.GetChange(self.GetCommonAncestorWithUpstream(), None))
if not hook_results.should_continue():
return 1
self.SubmitIssue(wait_for_merge=True)
print('Issue %s has been submitted.' % self.GetIssueURL())
links = self._GetChangeCommit().get('web_links', [])
for link in links:
if link.get('name') == 'gitiles' and link.get('url'):
print('Landed as %s' % link.get('url'))
break
return 0
def CMDPatchWithParsedIssue(self, parsed_issue_arg, reject, nocommit,
directory):
assert not reject
assert not nocommit
assert not directory
assert parsed_issue_arg.valid
self._changelist.issue = parsed_issue_arg.issue
if parsed_issue_arg.hostname:
self._gerrit_host = parsed_issue_arg.hostname
self._gerrit_server = 'https://%s' % self._gerrit_host
try:
detail = self._GetChangeDetail(['ALL_REVISIONS'])
except GerritChangeNotExists as e:
DieWithError(str(e))
if not parsed_issue_arg.patchset:
# Use current revision by default.
revision_info = detail['revisions'][detail['current_revision']]
patchset = int(revision_info['_number'])
else:
patchset = parsed_issue_arg.patchset
for revision_info in detail['revisions'].itervalues():
if int(revision_info['_number']) == parsed_issue_arg.patchset:
break
else:
DieWithError('Couldn\'t find patchset %i in change %i' %
(parsed_issue_arg.patchset, self.GetIssue()))
fetch_info = revision_info['fetch']['http']
RunGit(['fetch', fetch_info['url'], fetch_info['ref']])
RunGit(['cherry-pick', 'FETCH_HEAD'])
self.SetIssue(self.GetIssue())
self.SetPatchset(patchset)
print('Committed patch for change %i patchset %i locally' %
(self.GetIssue(), self.GetPatchset()))
return 0
@staticmethod
def ParseIssueURL(parsed_url):
if not parsed_url.scheme or not parsed_url.scheme.startswith('http'):
return None
# Gerrit's new UI is https://domain/c/<issue_number>[/[patchset]]
# But current GWT UI is https://domain/#/c/<issue_number>[/[patchset]]
# Short urls like https://domain/<issue_number> can be used, but don't allow
# specifying the patchset (you'd 404), but we allow that here.
if parsed_url.path == '/':
part = parsed_url.fragment
else:
part = parsed_url.path
match = re.match('(/c)?/(\d+)(/(\d+)?/?)?$', part)
if match:
return _ParsedIssueNumberArgument(
issue=int(match.group(2)),
patchset=int(match.group(4)) if match.group(4) else None,
hostname=parsed_url.netloc)
return None
def _GerritCommitMsgHookCheck(self, offer_removal):
hook = os.path.join(settings.GetRoot(), '.git', 'hooks', 'commit-msg')
if not os.path.exists(hook):
return
# Crude attempt to distinguish Gerrit Codereview hook from potentially
# custom developer made one.
data = gclient_utils.FileRead(hook)
if not('From Gerrit Code Review' in data and 'add_ChangeId()' in data):
return
print('Warning: you have Gerrit commit-msg hook installed.\n'
'It is not necessary for uploading with git cl in squash mode, '
'and may interfere with it in subtle ways.\n'
'We recommend you remove the commit-msg hook.')
if offer_removal:
reply = ask_for_data('Do you want to remove it now? [Yes/No]')
if reply.lower().startswith('y'):
gclient_utils.rm_file_or_tree(hook)
print('Gerrit commit-msg hook removed.')
else:
print('OK, will keep Gerrit commit-msg hook in place.')
def CMDUploadChange(self, options, args, change):
"""Upload the current branch to Gerrit."""
if options.squash and options.no_squash:
DieWithError('Can only use one of --squash or --no-squash')
if not options.squash and not options.no_squash:
# Load default for user, repo, squash=true, in this order.
options.squash = settings.GetSquashGerritUploads()
elif options.no_squash:
options.squash = False
# We assume the remote called "origin" is the one we want.
# It is probably not worthwhile to support different workflows.
gerrit_remote = 'origin'
remote, remote_branch = self.GetRemoteBranch()
# Gerrit will not support pending prefix at all.
branch = GetTargetRef(remote, remote_branch, options.target_branch,
pending_prefix_check=False)
# This may be None; default fallback value is determined in logic below.
title = options.title
if options.squash:
self._GerritCommitMsgHookCheck(offer_removal=not options.force)
if self.GetIssue():
# Try to get the message from a previous upload.
message = self.GetDescription()
if not message:
DieWithError(
'failed to fetch description from current Gerrit change %d\n'
'%s' % (self.GetIssue(), self.GetIssueURL()))
if not title:
default_title = RunGit(['show', '-s', '--format=%s', 'HEAD']).strip()
title = ask_for_data(
'Title for patchset [%s]: ' % default_title) or default_title
change_id = self._GetChangeDetail()['change_id']
while True:
footer_change_ids = git_footers.get_footer_change_id(message)
if footer_change_ids == [change_id]:
break
if not footer_change_ids:
message = git_footers.add_footer_change_id(message, change_id)
print('WARNING: appended missing Change-Id to change description')
continue
# There is already a valid footer but with different or several ids.
# Doing this automatically is non-trivial as we don't want to lose
# existing other footers, yet we want to append just 1 desired
# Change-Id. Thus, just create a new footer, but let user verify the
# new description.
message = '%s\n\nChange-Id: %s' % (message, change_id)
print(
'WARNING: change %s has Change-Id footer(s):\n'
' %s\n'
'but change has Change-Id %s, according to Gerrit.\n'
'Please, check the proposed correction to the description, '
'and edit it if necessary but keep the "Change-Id: %s" footer\n'
% (self.GetIssue(), '\n '.join(footer_change_ids), change_id,
change_id))
ask_for_data('Press enter to edit now, Ctrl+C to abort')
if not options.force:
change_desc = ChangeDescription(message)
change_desc.prompt(bug=options.bug)
message = change_desc.description
if not message:
DieWithError("Description is empty. Aborting...")
# Continue the while loop.
# Sanity check of this code - we should end up with proper message
# footer.
assert [change_id] == git_footers.get_footer_change_id(message)
change_desc = ChangeDescription(message)
else: # if not self.GetIssue()
if options.message:
message = options.message
else:
message = CreateDescriptionFromLog(args)
if options.title:
message = options.title + '\n\n' + message
change_desc = ChangeDescription(message)
if not options.force:
change_desc.prompt(bug=options.bug)
# On first upload, patchset title is always this string, while
# --title flag gets converted to first line of message.
title = 'Initial upload'
if not change_desc.description:
DieWithError("Description is empty. Aborting...")
message = change_desc.description
change_ids = git_footers.get_footer_change_id(message)
if len(change_ids) > 1:
DieWithError('too many Change-Id footers, at most 1 allowed.')
if not change_ids:
# Generate the Change-Id automatically.
message = git_footers.add_footer_change_id(
message, GenerateGerritChangeId(message))
change_desc.set_description(message)
change_ids = git_footers.get_footer_change_id(message)
assert len(change_ids) == 1
change_id = change_ids[0]
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
if remote is '.':
# If our upstream branch is local, we base our squashed commit on its
# squashed version.
upstream_branch_name = scm.GIT.ShortBranchName(upstream_branch)
# Check the squashed hash of the parent.
parent = RunGit(['config',
'branch.%s.gerritsquashhash' % upstream_branch_name],
error_ok=True).strip()
# Verify that the upstream branch has been uploaded too, otherwise
# Gerrit will create additional CLs when uploading.
if not parent or (RunGitSilent(['rev-parse', upstream_branch + ':']) !=
RunGitSilent(['rev-parse', parent + ':'])):
DieWithError(
'\nUpload upstream branch %s first.\n'
'It is likely that this branch has been rebased since its last '
'upload, so you just need to upload it again.\n'
'(If you uploaded it with --no-squash, then branch dependencies '
'are not supported, and you should reupload with --squash.)'
% upstream_branch_name)
else:
parent = self.GetCommonAncestorWithUpstream()
tree = RunGit(['rev-parse', 'HEAD:']).strip()
ref_to_push = RunGit(['commit-tree', tree, '-p', parent,
'-m', message]).strip()
else:
change_desc = ChangeDescription(
options.message or CreateDescriptionFromLog(args))
if not change_desc.description:
DieWithError("Description is empty. Aborting...")
if not git_footers.get_footer_change_id(change_desc.description):
DownloadGerritHook(False)
change_desc.set_description(self._AddChangeIdToCommitMessage(options,
args))
ref_to_push = 'HEAD'
parent = '%s/%s' % (gerrit_remote, branch)
change_id = git_footers.get_footer_change_id(change_desc.description)[0]
assert change_desc
commits = RunGitSilent(['rev-list', '%s..%s' % (parent,
ref_to_push)]).splitlines()
if len(commits) > 1:
print('WARNING: This will upload %d commits. Run the following command '
'to see which commits will be uploaded: ' % len(commits))
print('git log %s..%s' % (parent, ref_to_push))
print('You can also use `git squash-branch` to squash these into a '
'single commit.')
ask_for_data('About to upload; enter to confirm.')
if options.reviewers or options.tbr_owners:
change_desc.update_reviewers(options.reviewers, options.tbr_owners,
change)
# Extra options that can be specified at push time. Doc:
# https://gerrit-review.googlesource.com/Documentation/user-upload.html
refspec_opts = []
if change_desc.get_reviewers(tbr_only=True):
print('Adding self-LGTM (Code-Review +1) because of TBRs')
refspec_opts.append('l=Code-Review+1')
if title:
if not re.match(r'^[\w ]+$', title):
title = re.sub(r'[^\w ]', '', title)
print('WARNING: Patchset title may only contain alphanumeric chars '
'and spaces. Cleaned up title:\n%s' % title)
if not options.force:
ask_for_data('Press enter to continue, Ctrl+C to abort')
# Per doc, spaces must be converted to underscores, and Gerrit will do the
# reverse on its side.
refspec_opts.append('m=' + title.replace(' ', '_'))
if options.send_mail:
if not change_desc.get_reviewers():
DieWithError('Must specify reviewers to send email.')
refspec_opts.append('notify=ALL')
else:
refspec_opts.append('notify=NONE')
reviewers = change_desc.get_reviewers()
if reviewers:
refspec_opts.extend('r=' + email.strip() for email in reviewers)
if options.private:
refspec_opts.append('draft')
if options.topic:
# Documentation on Gerrit topics is here:
# https://gerrit-review.googlesource.com/Documentation/user-upload.html#topic
refspec_opts.append('topic=%s' % options.topic)
refspec_suffix = ''
if refspec_opts:
refspec_suffix = '%' + ','.join(refspec_opts)
assert ' ' not in refspec_suffix, (
'spaces not allowed in refspec: "%s"' % refspec_suffix)
refspec = '%s:refs/for/%s%s' % (ref_to_push, branch, refspec_suffix)
try:
push_stdout = gclient_utils.CheckCallAndFilter(
['git', 'push', gerrit_remote, refspec],
print_stdout=True,
# Flush after every line: useful for seeing progress when running as
# recipe.
filter_fn=lambda _: sys.stdout.flush())
except subprocess2.CalledProcessError:
DieWithError('Failed to create a change. Please examine output above '
'for the reason of the failure. ')
if options.squash:
regex = re.compile(r'remote:\s+https?://[\w\-\.\/]*/(\d+)\s.*')
change_numbers = [m.group(1)
for m in map(regex.match, push_stdout.splitlines())
if m]
if len(change_numbers) != 1:
DieWithError(
('Created|Updated %d issues on Gerrit, but only 1 expected.\n'
'Change-Id: %s') % (len(change_numbers), change_id))
self.SetIssue(change_numbers[0])
self._GitSetBranchConfigValue('gerritsquashhash', ref_to_push)
# Add cc's from the CC_LIST and --cc flag (if any).
cc = self.GetCCList().split(',')
if options.cc:
cc.extend(options.cc)
cc = filter(None, [email.strip() for email in cc])
if change_desc.get_cced():
cc.extend(change_desc.get_cced())
if cc:
gerrit_util.AddReviewers(
self._GetGerritHost(), self.GetIssue(), cc,
is_reviewer=False, notify=bool(options.send_mail))
return 0
def _AddChangeIdToCommitMessage(self, options, args):
"""Re-commits using the current message, assumes the commit hook is in
place.
"""
log_desc = options.message or CreateDescriptionFromLog(args)
git_command = ['commit', '--amend', '-m', log_desc]
RunGit(git_command)
new_log_desc = CreateDescriptionFromLog(args)
if git_footers.get_footer_change_id(new_log_desc):
print('git-cl: Added Change-Id to commit message.')
return new_log_desc
else:
DieWithError('ERROR: Gerrit commit-msg hook not installed.')
def SetCQState(self, new_state):
"""Sets the Commit-Queue label assuming canonical CQ config for Gerrit."""
vote_map = {
_CQState.NONE: 0,
_CQState.DRY_RUN: 1,
_CQState.COMMIT : 2,
}
kwargs = {'labels': {'Commit-Queue': vote_map[new_state]}}
if new_state == _CQState.DRY_RUN:
# Don't spam everybody reviewer/owner.
kwargs['notify'] = 'NONE'
gerrit_util.SetReview(self._GetGerritHost(), self.GetIssue(), **kwargs)
def CannotTriggerTryJobReason(self):
try:
data = self._GetChangeDetail()
except GerritChangeNotExists:
return 'Gerrit doesn\'t know about your change %s' % self.GetIssue()
if data['status'] in ('ABANDONED', 'MERGED'):
return 'CL %s is closed' % self.GetIssue()
def GetTryjobProperties(self, patchset=None):
"""Returns dictionary of properties to launch tryjob."""
data = self._GetChangeDetail(['ALL_REVISIONS'])
patchset = int(patchset or self.GetPatchset())
assert patchset
revision_data = None # Pylint wants it to be defined.
for revision_data in data['revisions'].itervalues():
if int(revision_data['_number']) == patchset:
break
else:
raise Exception('Patchset %d is not known in Gerrit change %d' %
(patchset, self.GetIssue()))
return {
'patch_issue': self.GetIssue(),
'patch_set': patchset or self.GetPatchset(),
'patch_project': data['project'],
'patch_storage': 'gerrit',
'patch_ref': revision_data['fetch']['http']['ref'],
'patch_repository_url': revision_data['fetch']['http']['url'],
'patch_gerrit_url': self.GetCodereviewServer(),
}
def GetIssueOwner(self):
return self._GetChangeDetail(['DETAILED_ACCOUNTS'])['owner']['email']
_CODEREVIEW_IMPLEMENTATIONS = {
'rietveld': _RietveldChangelistImpl,
'gerrit': _GerritChangelistImpl,
}
def _add_codereview_issue_select_options(parser, extra=""):
_add_codereview_select_options(parser)
text = ('Operate on this issue number instead of the current branch\'s '
'implicit issue.')
if extra:
text += ' '+extra
parser.add_option('-i', '--issue', type=int, help=text)
def _process_codereview_issue_select_options(parser, options):
_process_codereview_select_options(parser, options)
if options.issue is not None and not options.forced_codereview:
parser.error('--issue must be specified with either --rietveld or --gerrit')
def _add_codereview_select_options(parser):
"""Appends --gerrit and --rietveld options to force specific codereview."""
parser.codereview_group = optparse.OptionGroup(
parser, 'EXPERIMENTAL! Codereview override options')
parser.add_option_group(parser.codereview_group)
parser.codereview_group.add_option(
'--gerrit', action='store_true',
help='Force the use of Gerrit for codereview')
parser.codereview_group.add_option(
'--rietveld', action='store_true',
help='Force the use of Rietveld for codereview')
def _process_codereview_select_options(parser, options):
if options.gerrit and options.rietveld:
parser.error('Options --gerrit and --rietveld are mutually exclusive')
options.forced_codereview = None
if options.gerrit:
options.forced_codereview = 'gerrit'
elif options.rietveld:
options.forced_codereview = 'rietveld'
def _get_bug_line_values(default_project, bugs):
"""Given default_project and comma separated list of bugs, yields bug line
values.
Each bug can be either:
* a number, which is combined with default_project
* string, which is left as is.
This function may produce more than one line, because bugdroid expects one
project per line.
>>> list(_get_bug_line_values('v8', '123,chromium:789'))
['v8:123', 'chromium:789']
"""
default_bugs = []
others = []
for bug in bugs.split(','):
bug = bug.strip()
if bug:
try:
default_bugs.append(int(bug))
except ValueError:
others.append(bug)
if default_bugs:
default_bugs = ','.join(map(str, default_bugs))
if default_project:
yield '%s:%s' % (default_project, default_bugs)
else:
yield default_bugs
for other in sorted(others):
# Don't bother finding common prefixes, CLs with >2 bugs are very very rare.
yield other
class ChangeDescription(object):
"""Contains a parsed form of the change description."""
R_LINE = r'^[ \t]*(TBR|R)[ \t]*=[ \t]*(.*?)[ \t]*$'
CC_LINE = r'^[ \t]*(CC)[ \t]*=[ \t]*(.*?)[ \t]*$'
BUG_LINE = r'^[ \t]*(BUG)[ \t]*=[ \t]*(.*?)[ \t]*$'
CHERRY_PICK_LINE = r'^\(cherry picked from commit [a-fA-F0-9]{40}\)$'
def __init__(self, description):
self._description_lines = (description or '').strip().splitlines()
@property # www.logilab.org/ticket/89786
def description(self): # pylint: disable=method-hidden
return '\n'.join(self._description_lines)
def set_description(self, desc):
if isinstance(desc, basestring):
lines = desc.splitlines()
else:
lines = [line.rstrip() for line in desc]
while lines and not lines[0]:
lines.pop(0)
while lines and not lines[-1]:
lines.pop(-1)
self._description_lines = lines
def update_reviewers(self, reviewers, add_owners_tbr=False, change=None):
"""Rewrites the R=/TBR= line(s) as a single line each."""
assert isinstance(reviewers, list), reviewers
if not reviewers and not add_owners_tbr:
return
reviewers = reviewers[:]
# Get the set of R= and TBR= lines and remove them from the desciption.
regexp = re.compile(self.R_LINE)
matches = [regexp.match(line) for line in self._description_lines]
new_desc = [l for i, l in enumerate(self._description_lines)
if not matches[i]]
self.set_description(new_desc)
# Construct new unified R= and TBR= lines.
r_names = []
tbr_names = []
for match in matches:
if not match:
continue
people = cleanup_list([match.group(2).strip()])
if match.group(1) == 'TBR':
tbr_names.extend(people)
else:
r_names.extend(people)
for name in r_names:
if name not in reviewers:
reviewers.append(name)
if add_owners_tbr:
owners_db = owners.Database(change.RepositoryRoot(),
fopen=file, os_path=os.path)
all_reviewers = set(tbr_names + reviewers)
missing_files = owners_db.files_not_covered_by(change.LocalPaths(),
all_reviewers)
tbr_names.extend(owners_db.reviewers_for(missing_files,
change.author_email))
new_r_line = 'R=' + ', '.join(reviewers) if reviewers else None
new_tbr_line = 'TBR=' + ', '.join(tbr_names) if tbr_names else None
# Put the new lines in the description where the old first R= line was.
line_loc = next((i for i, match in enumerate(matches) if match), -1)
if 0 <= line_loc < len(self._description_lines):
if new_tbr_line:
self._description_lines.insert(line_loc, new_tbr_line)
if new_r_line:
self._description_lines.insert(line_loc, new_r_line)
else:
if new_r_line:
self.append_footer(new_r_line)
if new_tbr_line:
self.append_footer(new_tbr_line)
def prompt(self, bug=None):
"""Asks the user to update the description."""
self.set_description([
'# Enter a description of the change.',
'# This will be displayed on the codereview site.',
'# The first line will also be used as the subject of the review.',
'#--------------------This line is 72 characters long'
'--------------------',
] + self._description_lines)
regexp = re.compile(self.BUG_LINE)
if not any((regexp.match(line) for line in self._description_lines)):
prefix = settings.GetBugPrefix()
values = list(_get_bug_line_values(prefix, bug or '')) or [prefix]
for value in values:
# TODO(tandrii): change this to 'Bug: xxx' to be a proper Gerrit footer.
self.append_footer('BUG=%s' % value)
content = gclient_utils.RunEditor(self.description, True,
git_editor=settings.GetGitEditor())
if not content:
DieWithError('Running editor failed')
lines = content.splitlines()
# Strip off comments.
clean_lines = [line.rstrip() for line in lines if not line.startswith('#')]
if not clean_lines:
DieWithError('No CL description, aborting')
self.set_description(clean_lines)
def append_footer(self, line):
"""Adds a footer line to the description.
Differentiates legacy "KEY=xxx" footers (used to be called tags) and
Gerrit's footers in the form of "Footer-Key: footer any value" and ensures
that Gerrit footers are always at the end.
"""
parsed_footer_line = git_footers.parse_footer(line)
if parsed_footer_line:
# Line is a gerrit footer in the form: Footer-Key: any value.
# Thus, must be appended observing Gerrit footer rules.
self.set_description(
git_footers.add_footer(self.description,
key=parsed_footer_line[0],
value=parsed_footer_line[1]))
return
if not self._description_lines:
self._description_lines.append(line)
return
top_lines, gerrit_footers, _ = git_footers.split_footers(self.description)
if gerrit_footers:
# git_footers.split_footers ensures that there is an empty line before
# actual (gerrit) footers, if any. We have to keep it that way.
assert top_lines and top_lines[-1] == ''
top_lines, separator = top_lines[:-1], top_lines[-1:]
else:
separator = [] # No need for separator if there are no gerrit_footers.
prev_line = top_lines[-1] if top_lines else ''
if (not presubmit_support.Change.TAG_LINE_RE.match(prev_line) or
not presubmit_support.Change.TAG_LINE_RE.match(line)):
top_lines.append('')
top_lines.append(line)
self._description_lines = top_lines + separator + gerrit_footers
def get_reviewers(self, tbr_only=False):
"""Retrieves the list of reviewers."""
matches = [re.match(self.R_LINE, line) for line in self._description_lines]
reviewers = [match.group(2).strip()
for match in matches
if match and (not tbr_only or match.group(1).upper() == 'TBR')]
return cleanup_list(reviewers)
def get_cced(self):
"""Retrieves the list of reviewers."""
matches = [re.match(self.CC_LINE, line) for line in self._description_lines]
cced = [match.group(2).strip() for match in matches if match]
return cleanup_list(cced)
def update_with_git_number_footers(self, parent_hash, parent_msg, dest_ref):
"""Updates this commit description given the parent.
This is essentially what Gnumbd used to do.
Consult https://goo.gl/WMmpDe for more details.
"""
assert parent_msg # No, orphan branch creation isn't supported.
assert parent_hash
assert dest_ref
parent_footer_map = git_footers.parse_footers(parent_msg)
# This will also happily parse svn-position, which GnumbD is no longer
# supporting. While we'd generate correct footers, the verifier plugin
# installed in Gerrit will block such commit (ie git push below will fail).
parent_position = git_footers.get_position(parent_footer_map)
# Cherry-picks may have last line obscuring their prior footers,
# from git_footers perspective. This is also what Gnumbd did.
cp_line = None
if (self._description_lines and
re.match(self.CHERRY_PICK_LINE, self._description_lines[-1])):
cp_line = self._description_lines.pop()
top_lines, _, parsed_footers = git_footers.split_footers(self.description)
# Original-ify all Cr- footers, to avoid re-lands, cherry-picks, or just
# user interference with actual footers we'd insert below.
for i, (k, v) in enumerate(parsed_footers):
if k.startswith('Cr-'):
parsed_footers[i] = (k.replace('Cr-', 'Cr-Original-'), v)
# Add Position and Lineage footers based on the parent.
lineage = list(reversed(parent_footer_map.get('Cr-Branched-From', [])))
if parent_position[0] == dest_ref:
# Same branch as parent.
number = int(parent_position[1]) + 1
else:
number = 1 # New branch, and extra lineage.
lineage.insert(0, '%s-%s@{#%d}' % (parent_hash, parent_position[0],
int(parent_position[1])))
parsed_footers.append(('Cr-Commit-Position',
'%s@{#%d}' % (dest_ref, number)))
parsed_footers.extend(('Cr-Branched-From', v) for v in lineage)
self._description_lines = top_lines
if cp_line:
self._description_lines.append(cp_line)
if self._description_lines[-1] != '':
self._description_lines.append('') # Ensure footer separator.
self._description_lines.extend('%s: %s' % kv for kv in parsed_footers)
def get_approving_reviewers(props):
"""Retrieves the reviewers that approved a CL from the issue properties with
messages.
Note that the list may contain reviewers that are not committer, thus are not
considered by the CQ.
"""
return sorted(
set(
message['sender']
for message in props['messages']
if message['approval'] and message['sender'] in props['reviewers']
)
)
def FindCodereviewSettingsFile(filename='codereview.settings'):
"""Finds the given file starting in the cwd and going up.
Only looks up to the top of the repository unless an
'inherit-review-settings-ok' file exists in the root of the repository.
"""
inherit_ok_file = 'inherit-review-settings-ok'
cwd = os.getcwd()
root = settings.GetRoot()
if os.path.isfile(os.path.join(root, inherit_ok_file)):
root = '/'
while True:
if filename in os.listdir(cwd):
if os.path.isfile(os.path.join(cwd, filename)):
return open(os.path.join(cwd, filename))
if cwd == root:
break
cwd = os.path.dirname(cwd)
def LoadCodereviewSettingsFromFile(fileobj):
"""Parse a codereview.settings file and updates hooks."""
keyvals = gclient_utils.ParseCodereviewSettingsContent(fileobj.read())
def SetProperty(name, setting, unset_error_ok=False):
fullname = 'rietveld.' + name
if setting in keyvals:
RunGit(['config', fullname, keyvals[setting]])
else:
RunGit(['config', '--unset-all', fullname], error_ok=unset_error_ok)
if not keyvals.get('GERRIT_HOST', False):
SetProperty('server', 'CODE_REVIEW_SERVER')
# Only server setting is required. Other settings can be absent.
# In that case, we ignore errors raised during option deletion attempt.
SetProperty('cc', 'CC_LIST', unset_error_ok=True)
SetProperty('private', 'PRIVATE', unset_error_ok=True)
SetProperty('tree-status-url', 'STATUS', unset_error_ok=True)
SetProperty('viewvc-url', 'VIEW_VC', unset_error_ok=True)
SetProperty('bug-prefix', 'BUG_PREFIX', unset_error_ok=True)
SetProperty('cpplint-regex', 'LINT_REGEX', unset_error_ok=True)
SetProperty('cpplint-ignore-regex', 'LINT_IGNORE_REGEX', unset_error_ok=True)
SetProperty('project', 'PROJECT', unset_error_ok=True)
SetProperty('pending-ref-prefix', 'PENDING_REF_PREFIX', unset_error_ok=True)
SetProperty('run-post-upload-hook', 'RUN_POST_UPLOAD_HOOK',
unset_error_ok=True)
if 'GERRIT_HOST' in keyvals:
RunGit(['config', 'gerrit.host', keyvals['GERRIT_HOST']])
if 'GERRIT_SQUASH_UPLOADS' in keyvals:
RunGit(['config', 'gerrit.squash-uploads',
keyvals['GERRIT_SQUASH_UPLOADS']])
if 'GERRIT_SKIP_ENSURE_AUTHENTICATED' in keyvals:
RunGit(['config', 'gerrit.skip-ensure-authenticated',
keyvals['GERRIT_SKIP_ENSURE_AUTHENTICATED']])
if 'PUSH_URL_CONFIG' in keyvals and 'ORIGIN_URL_CONFIG' in keyvals:
#should be of the form
#PUSH_URL_CONFIG: url.ssh://gitrw.chromium.org.pushinsteadof
#ORIGIN_URL_CONFIG: http://src.chromium.org/git
RunGit(['config', keyvals['PUSH_URL_CONFIG'],
keyvals['ORIGIN_URL_CONFIG']])
def urlretrieve(source, destination):
"""urllib is broken for SSL connections via a proxy therefore we
can't use urllib.urlretrieve()."""
with open(destination, 'w') as f:
f.write(urllib2.urlopen(source).read())
def hasSheBang(fname):
"""Checks fname is a #! script."""
with open(fname) as f:
return f.read(2).startswith('#!')
# TODO(bpastene) Remove once a cleaner fix to crbug.com/600473 presents itself.
def DownloadHooks(*args, **kwargs):
pass
def DownloadGerritHook(force):
"""Download and install Gerrit commit-msg hook.
Args:
force: True to update hooks. False to install hooks if not present.
"""
if not settings.GetIsGerrit():
return
src = 'https://gerrit-review.googlesource.com/tools/hooks/commit-msg'
dst = os.path.join(settings.GetRoot(), '.git', 'hooks', 'commit-msg')
if not os.access(dst, os.X_OK):
if os.path.exists(dst):
if not force:
return
try:
urlretrieve(src, dst)
if not hasSheBang(dst):
DieWithError('Not a script: %s\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % (dst, src))
os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except Exception:
if os.path.exists(dst):
os.remove(dst)
DieWithError('\nFailed to download hooks.\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % src)
def GetRietveldCodereviewSettingsInteractively():
"""Prompt the user for settings."""
server = settings.GetDefaultServerUrl(error_ok=True)
prompt = 'Rietveld server (host[:port])'
prompt += ' [%s]' % (server or DEFAULT_SERVER)
newserver = ask_for_data(prompt + ':')
if not server and not newserver:
newserver = DEFAULT_SERVER
if newserver:
newserver = gclient_utils.UpgradeToHttps(newserver)
if newserver != server:
RunGit(['config', 'rietveld.server', newserver])
def SetProperty(initial, caption, name, is_url):
prompt = caption
if initial:
prompt += ' ("x" to clear) [%s]' % initial
new_val = ask_for_data(prompt + ':')
if new_val == 'x':
RunGit(['config', '--unset-all', 'rietveld.' + name], error_ok=True)
elif new_val:
if is_url:
new_val = gclient_utils.UpgradeToHttps(new_val)
if new_val != initial:
RunGit(['config', 'rietveld.' + name, new_val])
SetProperty(settings.GetDefaultCCList(), 'CC list', 'cc', False)
SetProperty(settings.GetDefaultPrivateFlag(),
'Private flag (rietveld only)', 'private', False)
SetProperty(settings.GetTreeStatusUrl(error_ok=True), 'Tree status URL',
'tree-status-url', False)
SetProperty(settings.GetViewVCUrl(), 'ViewVC URL', 'viewvc-url', True)
SetProperty(settings.GetBugPrefix(), 'Bug Prefix', 'bug-prefix', False)
SetProperty(settings.GetRunPostUploadHook(), 'Run Post Upload Hook',
'run-post-upload-hook', False)
@subcommand.usage('[repo root containing codereview.settings]')
def CMDconfig(parser, args):
"""Edits configuration for this tree."""
print('WARNING: git cl config works for Rietveld only')
# TODO(tandrii): remove this once we switch to Gerrit.
# See bugs http://crbug.com/637561 and http://crbug.com/600469.
parser.add_option('--activate-update', action='store_true',
help='activate auto-updating [rietveld] section in '
'.git/config')
parser.add_option('--deactivate-update', action='store_true',
help='deactivate auto-updating [rietveld] section in '
'.git/config')
options, args = parser.parse_args(args)
if options.deactivate_update:
RunGit(['config', 'rietveld.autoupdate', 'false'])
return
if options.activate_update:
RunGit(['config', '--unset', 'rietveld.autoupdate'])
return
if len(args) == 0:
GetRietveldCodereviewSettingsInteractively()
return 0
url = args[0]
if not url.endswith('codereview.settings'):
url = os.path.join(url, 'codereview.settings')
# Load code review settings and download hooks (if available).
LoadCodereviewSettingsFromFile(urllib2.urlopen(url))
return 0
def CMDbaseurl(parser, args):
"""Gets or sets base-url for this branch."""
branchref = RunGit(['symbolic-ref', 'HEAD']).strip()
branch = ShortBranchName(branchref)
_, args = parser.parse_args(args)
if not args:
print('Current base-url:')
return RunGit(['config', 'branch.%s.base-url' % branch],
error_ok=False).strip()
else:
print('Setting base-url to %s' % args[0])
return RunGit(['config', 'branch.%s.base-url' % branch, args[0]],
error_ok=False).strip()
def color_for_status(status):
"""Maps a Changelist status to color, for CMDstatus and other tools."""
return {
'unsent': Fore.RED,
'waiting': Fore.BLUE,
'reply': Fore.YELLOW,
'lgtm': Fore.GREEN,
'commit': Fore.MAGENTA,
'closed': Fore.CYAN,
'error': Fore.WHITE,
}.get(status, Fore.WHITE)
def get_cl_statuses(changes, fine_grained, max_processes=None):
"""Returns a blocking iterable of (cl, status) for given branches.
If fine_grained is true, this will fetch CL statuses from the server.
Otherwise, simply indicate if there's a matching url for the given branches.
If max_processes is specified, it is used as the maximum number of processes
to spawn to fetch CL status from the server. Otherwise 1 process per branch is
spawned.
See GetStatus() for a list of possible statuses.
"""
# Silence upload.py otherwise it becomes unwieldy.
upload.verbosity = 0
if fine_grained:
# Process one branch synchronously to work through authentication, then
# spawn processes to process all the other branches in parallel.
if changes:
def fetch(cl):
try:
return (cl, cl.GetStatus())
except:
# See http://crbug.com/629863.
logging.exception('failed to fetch status for %s:', cl)
raise
yield fetch(changes[0])
changes_to_fetch = changes[1:]
if not changes_to_fetch:
# Exit early if there was only one branch to fetch.
return
pool = ThreadPool(
min(max_processes, len(changes_to_fetch))
if max_processes is not None
else max(len(changes_to_fetch), 1))
fetched_cls = set()
it = pool.imap_unordered(fetch, changes_to_fetch).__iter__()
while True:
try:
row = it.next(timeout=5)
except multiprocessing.TimeoutError:
break
fetched_cls.add(row[0])
yield row
# Add any branches that failed to fetch.
for cl in set(changes_to_fetch) - fetched_cls:
yield (cl, 'error')
else:
# Do not use GetApprovingReviewers(), since it requires an HTTP request.
for cl in changes:
yield (cl, 'waiting' if cl.GetIssueURL() else 'error')
def upload_branch_deps(cl, args):
"""Uploads CLs of local branches that are dependents of the current branch.
If the local branch dependency tree looks like:
test1 -> test2.1 -> test3.1
-> test3.2
-> test2.2 -> test3.3
and you run "git cl upload --dependencies" from test1 then "git cl upload" is
run on the dependent branches in this order:
test2.1, test3.1, test3.2, test2.2, test3.3
Note: This function does not rebase your local dependent branches. Use it when
you make a change to the parent branch that will not conflict with its
dependent branches, and you would like their dependencies updated in
Rietveld.
"""
if git_common.is_dirty_git_tree('upload-branch-deps'):
return 1
root_branch = cl.GetBranch()
if root_branch is None:
DieWithError('Can\'t find dependent branches from detached HEAD state. '
'Get on a branch!')
if not cl.GetIssue() or not cl.GetPatchset():
DieWithError('Current branch does not have an uploaded CL. We cannot set '
'patchset dependencies without an uploaded CL.')
branches = RunGit(['for-each-ref',
'--format=%(refname:short) %(upstream:short)',
'refs/heads'])
if not branches:
print('No local branches found.')
return 0
# Create a dictionary of all local branches to the branches that are dependent
# on it.
tracked_to_dependents = collections.defaultdict(list)
for b in branches.splitlines():
tokens = b.split()
if len(tokens) == 2:
branch_name, tracked = tokens
tracked_to_dependents[tracked].append(branch_name)
print()
print('The dependent local branches of %s are:' % root_branch)
dependents = []
def traverse_dependents_preorder(branch, padding=''):
dependents_to_process = tracked_to_dependents.get(branch, [])
padding += ' '
for dependent in dependents_to_process:
print('%s%s' % (padding, dependent))
dependents.append(dependent)
traverse_dependents_preorder(dependent, padding)
traverse_dependents_preorder(root_branch)
print()
if not dependents:
print('There are no dependent local branches for %s' % root_branch)
return 0
print('This command will checkout all dependent branches and run '
'"git cl upload".')
ask_for_data('[Press enter to continue or ctrl-C to quit]')
# Add a default patchset title to all upload calls in Rietveld.
if not cl.IsGerrit():
args.extend(['-t', 'Updated patchset dependency'])
# Record all dependents that failed to upload.
failures = {}
# Go through all dependents, checkout the branch and upload.
try:
for dependent_branch in dependents:
print()
print('--------------------------------------')
print('Running "git cl upload" from %s:' % dependent_branch)
RunGit(['checkout', '-q', dependent_branch])
print()
try:
if CMDupload(OptionParser(), args) != 0:
print('Upload failed for %s!' % dependent_branch)
failures[dependent_branch] = 1
except: # pylint: disable=bare-except
failures[dependent_branch] = 1
print()
finally:
# Swap back to the original root branch.
RunGit(['checkout', '-q', root_branch])
print()
print('Upload complete for dependent branches!')
for dependent_branch in dependents:
upload_status = 'failed' if failures.get(dependent_branch) else 'succeeded'
print(' %s : %s' % (dependent_branch, upload_status))
print()
return 0
def CMDarchive(parser, args):
"""Archives and deletes branches associated with closed changelists."""
parser.add_option(
'-j', '--maxjobs', action='store', type=int,
help='The maximum number of jobs to use when retrieving review status.')
parser.add_option(
'-f', '--force', action='store_true',
help='Bypasses the confirmation prompt.')
parser.add_option(
'-d', '--dry-run', action='store_true',
help='Skip the branch tagging and removal steps.')
parser.add_option(
'-t', '--notags', action='store_true',
help='Do not tag archived branches. '
'Note: local commit history may be lost.')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
if args:
parser.error('Unsupported args: %s' % ' '.join(args))
auth_config = auth.extract_auth_config_from_options(options)
branches = RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
if not branches:
return 0
print('Finding all branches associated with closed issues...')
changes = [Changelist(branchref=b, auth_config=auth_config)
for b in branches.splitlines()]
alignment = max(5, max(len(c.GetBranch()) for c in changes))
statuses = get_cl_statuses(changes,
fine_grained=True,
max_processes=options.maxjobs)
proposal = [(cl.GetBranch(),
'git-cl-archived-%s-%s' % (cl.GetIssue(), cl.GetBranch()))
for cl, status in statuses
if status == 'closed']
proposal.sort()
if not proposal:
print('No branches with closed codereview issues found.')
return 0
current_branch = GetCurrentBranch()
print('\nBranches with closed issues that will be archived:\n')
if options.notags:
for next_item in proposal:
print(' ' + next_item[0])
else:
print('%*s | %s' % (alignment, 'Branch name', 'Archival tag name'))
for next_item in proposal:
print('%*s %s' % (alignment, next_item[0], next_item[1]))
# Quit now on precondition failure or if instructed by the user, either
# via an interactive prompt or by command line flags.
if options.dry_run:
print('\nNo changes were made (dry run).\n')
return 0
elif any(branch == current_branch for branch, _ in proposal):
print('You are currently on a branch \'%s\' which is associated with a '
'closed codereview issue, so archive cannot proceed. Please '
'checkout another branch and run this command again.' %
current_branch)
return 1
elif not options.force:
answer = ask_for_data('\nProceed with deletion (Y/n)? ').lower()
if answer not in ('y', ''):
print('Aborted.')
return 1
for branch, tagname in proposal:
if not options.notags:
RunGit(['tag', tagname, branch])
RunGit(['branch', '-D', branch])
print('\nJob\'s done!')
return 0
def CMDstatus(parser, args):
"""Show status of changelists.
Colors are used to tell the state of the CL unless --fast is used:
- Red not sent for review or broken
- Blue waiting for review
- Yellow waiting for you to reply to review
- Green LGTM'ed
- Magenta in the commit queue
- Cyan was committed, branch can be deleted
Also see 'git cl comments'.
"""
parser.add_option('--field',
help='print only specific field (desc|id|patch|status|url)')
parser.add_option('-f', '--fast', action='store_true',
help='Do not retrieve review status')
parser.add_option(
'-j', '--maxjobs', action='store', type=int,
help='The maximum number of jobs to use when retrieving review status')
auth.add_auth_options(parser)
_add_codereview_issue_select_options(
parser, 'Must be in conjunction with --field.')
options, args = parser.parse_args(args)
_process_codereview_issue_select_options(parser, options)
if args:
parser.error('Unsupported args: %s' % args)
auth_config = auth.extract_auth_config_from_options(options)
if options.issue is not None and not options.field:
parser.error('--field must be specified with --issue')
if options.field:
cl = Changelist(auth_config=auth_config, issue=options.issue,
codereview=options.forced_codereview)
if options.field.startswith('desc'):
print(cl.GetDescription())
elif options.field == 'id':
issueid = cl.GetIssue()
if issueid:
print(issueid)
elif options.field == 'patch':
patchset = cl.GetPatchset()
if patchset:
print(patchset)
elif options.field == 'status':
print(cl.GetStatus())
elif options.field == 'url':
url = cl.GetIssueURL()
if url:
print(url)
return 0
branches = RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
if not branches:
print('No local branch found.')
return 0
changes = [
Changelist(branchref=b, auth_config=auth_config)
for b in branches.splitlines()]
print('Branches associated with reviews:')
output = get_cl_statuses(changes,
fine_grained=not options.fast,
max_processes=options.maxjobs)
branch_statuses = {}
alignment = max(5, max(len(ShortBranchName(c.GetBranch())) for c in changes))
for cl in sorted(changes, key=lambda c: c.GetBranch()):
branch = cl.GetBranch()
while branch not in branch_statuses:
c, status = output.next()
branch_statuses[c.GetBranch()] = status
status = branch_statuses.pop(branch)
url = cl.GetIssueURL()
if url and (not status or status == 'error'):
# The issue probably doesn't exist anymore.
url += ' (broken)'
color = color_for_status(status)
reset = Fore.RESET
if not setup_color.IS_TTY:
color = ''
reset = ''
status_str = '(%s)' % status if status else ''
print(' %*s : %s%s %s%s' % (
alignment, ShortBranchName(branch), color, url,
status_str, reset))
cl = Changelist(auth_config=auth_config)
print()
print('Current branch:',)
print(cl.GetBranch())
if not cl.GetIssue():
print('No issue assigned.')
return 0
print('Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL()))
if not options.fast:
print('Issue description:')
print(cl.GetDescription(pretty=True))
return 0
def colorize_CMDstatus_doc():
"""To be called once in main() to add colors to git cl status help."""
colors = [i for i in dir(Fore) if i[0].isupper()]
def colorize_line(line):
for color in colors:
if color in line.upper():
# Extract whitespaces first and the leading '-'.
indent = len(line) - len(line.lstrip(' ')) + 1
return line[:indent] + getattr(Fore, color) + line[indent:] + Fore.RESET
return line
lines = CMDstatus.__doc__.splitlines()
CMDstatus.__doc__ = '\n'.join(colorize_line(l) for l in lines)
def write_json(path, contents):
with open(path, 'w') as f:
json.dump(contents, f)
@subcommand.usage('[issue_number]')
def CMDissue(parser, args):
"""Sets or displays the current code review issue number.
Pass issue number 0 to clear the current issue.
"""
parser.add_option('-r', '--reverse', action='store_true',
help='Lookup the branch(es) for the specified issues. If '
'no issues are specified, all branches with mapped '
'issues will be listed.')
parser.add_option('--json', help='Path to JSON output file.')
_add_codereview_select_options(parser)
options, args = parser.parse_args(args)
_process_codereview_select_options(parser, options)
if options.reverse:
branches = RunGit(['for-each-ref', 'refs/heads',
'--format=%(refname:short)']).splitlines()
# Reverse issue lookup.
issue_branch_map = {}
for branch in branches:
cl = Changelist(branchref=branch)
issue_branch_map.setdefault(cl.GetIssue(), []).append(branch)
if not args:
args = sorted(issue_branch_map.iterkeys())
result = {}
for issue in args:
if not issue:
continue
result[int(issue)] = issue_branch_map.get(int(issue))
print('Branch for issue number %s: %s' % (
issue, ', '.join(issue_branch_map.get(int(issue)) or ('None',))))
if options.json:
write_json(options.json, result)
else:
cl = Changelist(codereview=options.forced_codereview)
if len(args) > 0:
try:
issue = int(args[0])
except ValueError:
DieWithError('Pass a number to set the issue or none to list it.\n'
'Maybe you want to run git cl status?')
cl.SetIssue(issue)
print('Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL()))
if options.json:
write_json(options.json, {
'issue': cl.GetIssue(),
'issue_url': cl.GetIssueURL(),
})
return 0
def CMDcomments(parser, args):
"""Shows or posts review comments for any changelist."""
parser.add_option('-a', '--add-comment', dest='comment',
help='comment to add to an issue')
parser.add_option('-i', dest='issue',
help="review issue id (defaults to current issue)")
parser.add_option('-j', '--json-file',
help='File to write JSON summary to')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
issue = None
if options.issue:
try:
issue = int(options.issue)
except ValueError:
DieWithError('A review issue id is expected to be a number')
cl = Changelist(issue=issue, codereview='rietveld', auth_config=auth_config)
if options.comment:
cl.AddComment(options.comment)
return 0
data = cl.GetIssueProperties()
summary = []
for message in sorted(data.get('messages', []), key=lambda x: x['date']):
summary.append({
'date': message['date'],
'lgtm': False,
'message': message['text'],
'not_lgtm': False,
'sender': message['sender'],
})
if message['disapproval']:
color = Fore.RED
summary[-1]['not lgtm'] = True
elif message['approval']:
color = Fore.GREEN
summary[-1]['lgtm'] = True
elif message['sender'] == data['owner_email']:
color = Fore.MAGENTA
else:
color = Fore.BLUE
print('\n%s%s %s%s' % (
color, message['date'].split('.', 1)[0], message['sender'],
Fore.RESET))
if message['text'].strip():
print('\n'.join(' ' + l for l in message['text'].splitlines()))
if options.json_file:
with open(options.json_file, 'wb') as f:
json.dump(summary, f)
return 0
@subcommand.usage('[codereview url or issue id]')
def CMDdescription(parser, args):
"""Brings up the editor for the current CL's description."""
parser.add_option('-d', '--display', action='store_true',
help='Display the description instead of opening an editor')
parser.add_option('-n', '--new-description',
help='New description to set for this issue (- for stdin, '
'+ to load from local commit HEAD)')
parser.add_option('-f', '--force', action='store_true',
help='Delete any unpublished Gerrit edits for this issue '
'without prompting')
_add_codereview_select_options(parser)
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
_process_codereview_select_options(parser, options)
target_issue = None
if len(args) > 0:
target_issue = ParseIssueNumberArgument(args[0])
if not target_issue.valid:
parser.print_help()
return 1
auth_config = auth.extract_auth_config_from_options(options)
kwargs = {
'auth_config': auth_config,
'codereview': options.forced_codereview,
}
if target_issue:
kwargs['issue'] = target_issue.issue
if options.forced_codereview == 'rietveld':
kwargs['rietveld_server'] = target_issue.hostname
cl = Changelist(**kwargs)
if not cl.GetIssue():
DieWithError('This branch has no associated changelist.')
description = ChangeDescription(cl.GetDescription())
if options.display:
print(description.description)
return 0
if options.new_description:
text = options.new_description
if text == '-':
text = '\n'.join(l.rstrip() for l in sys.stdin)
elif text == '+':
base_branch = cl.GetCommonAncestorWithUpstream()
change = cl.GetChange(base_branch, None, local_description=True)
text = change.FullDescriptionText()
description.set_description(text)
else:
description.prompt()
if cl.GetDescription() != description.description:
cl.UpdateDescription(description.description, force=options.force)
return 0
def CreateDescriptionFromLog(args):
"""Pulls out the commit log to use as a base for the CL description."""
log_args = []
if len(args) == 1 and not args[0].endswith('.'):
log_args = [args[0] + '..']
elif len(args) == 1 and args[0].endswith('...'):
log_args = [args[0][:-1]]
elif len(args) == 2:
log_args = [args[0] + '..' + args[1]]
else:
log_args = args[:] # Hope for the best!
return RunGit(['log', '--pretty=format:%s\n\n%b'] + log_args)
def CMDlint(parser, args):
"""Runs cpplint on the current changelist."""
parser.add_option('--filter', action='append', metavar='-x,+y',
help='Comma-separated list of cpplint\'s category-filters')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
try:
import cpplint
import cpplint_chromium
except ImportError:
print('Your depot_tools is missing cpplint.py and/or cpplint_chromium.py.')
return 1
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(settings.GetRoot())
try:
cl = Changelist(auth_config=auth_config)
change = cl.GetChange(cl.GetCommonAncestorWithUpstream(), None)
files = [f.LocalPath() for f in change.AffectedFiles()]
if not files:
print('Cannot lint an empty CL')
return 1
# Process cpplints arguments if any.
command = args + files
if options.filter:
command = ['--filter=' + ','.join(options.filter)] + command
filenames = cpplint.ParseArguments(command)
white_regex = re.compile(settings.GetLintRegex())
black_regex = re.compile(settings.GetLintIgnoreRegex())
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if white_regex.match(filename):
if black_regex.match(filename):
print('Ignoring file %s' % filename)
else:
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
else:
print('Skipping file %s' % filename)
finally:
os.chdir(previous_cwd)
print('Total errors found: %d\n' % cpplint._cpplint_state.error_count)
if cpplint._cpplint_state.error_count != 0:
return 1
return 0
def CMDpresubmit(parser, args):
"""Runs presubmit tests on the current changelist."""
parser.add_option('-u', '--upload', action='store_true',
help='Run upload hook instead of the push hook')
parser.add_option('-f', '--force', action='store_true',
help='Run checks even if tree is dirty')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if not options.force and git_common.is_dirty_git_tree('presubmit'):
print('use --force to check even if tree is dirty.')
return 1
cl = Changelist(auth_config=auth_config)
if args:
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
cl.RunHook(
committing=not options.upload,
may_prompt=False,
verbose=options.verbose,
change=cl.GetChange(base_branch, None))
return 0
def GenerateGerritChangeId(message):
"""Returns Ixxxxxx...xxx change id.
Works the same way as
https://gerrit-review.googlesource.com/tools/hooks/commit-msg
but can be called on demand on all platforms.
The basic idea is to generate git hash of a state of the tree, original commit
message, author/committer info and timestamps.
"""
lines = []
tree_hash = RunGitSilent(['write-tree'])
lines.append('tree %s' % tree_hash.strip())
code, parent = RunGitWithCode(['rev-parse', 'HEAD~0'], suppress_stderr=False)
if code == 0:
lines.append('parent %s' % parent.strip())
author = RunGitSilent(['var', 'GIT_AUTHOR_IDENT'])
lines.append('author %s' % author.strip())
committer = RunGitSilent(['var', 'GIT_COMMITTER_IDENT'])
lines.append('committer %s' % committer.strip())
lines.append('')
# Note: Gerrit's commit-hook actually cleans message of some lines and
# whitespace. This code is not doing this, but it clearly won't decrease
# entropy.
lines.append(message)
change_hash = RunCommand(['git', 'hash-object', '-t', 'commit', '--stdin'],
stdin='\n'.join(lines))
return 'I%s' % change_hash.strip()
def GetTargetRef(remote, remote_branch, target_branch, pending_prefix_check,
remote_url=None):
"""Computes the remote branch ref to use for the CL.
Args:
remote (str): The git remote for the CL.
remote_branch (str): The git remote branch for the CL.
target_branch (str): The target branch specified by the user.
pending_prefix_check (bool): If true, determines if pending_prefix should be
used.
remote_url (str): Only used for checking if pending_prefix should be used.
"""
if not (remote and remote_branch):
return None
if target_branch:
# Cannonicalize branch references to the equivalent local full symbolic
# refs, which are then translated into the remote full symbolic refs
# below.
if '/' not in target_branch:
remote_branch = 'refs/remotes/%s/%s' % (remote, target_branch)
else:
prefix_replacements = (
('^((refs/)?remotes/)?branch-heads/', 'refs/remotes/branch-heads/'),
('^((refs/)?remotes/)?%s/' % remote, 'refs/remotes/%s/' % remote),
('^(refs/)?heads/', 'refs/remotes/%s/' % remote),
)
match = None
for regex, replacement in prefix_replacements:
match = re.search(regex, target_branch)
if match:
remote_branch = target_branch.replace(match.group(0), replacement)
break
if not match:
# This is a branch path but not one we recognize; use as-is.
remote_branch = target_branch
elif remote_branch in REFS_THAT_ALIAS_TO_OTHER_REFS:
# Handle the refs that need to land in different refs.
remote_branch = REFS_THAT_ALIAS_TO_OTHER_REFS[remote_branch]
# Create the true path to the remote branch.
# Does the following translation:
# * refs/remotes/origin/refs/diff/test -> refs/diff/test
# * refs/remotes/origin/master -> refs/heads/master
# * refs/remotes/branch-heads/test -> refs/branch-heads/test
if remote_branch.startswith('refs/remotes/%s/refs/' % remote):
remote_branch = remote_branch.replace('refs/remotes/%s/' % remote, '')
elif remote_branch.startswith('refs/remotes/%s/' % remote):
remote_branch = remote_branch.replace('refs/remotes/%s/' % remote,
'refs/heads/')
elif remote_branch.startswith('refs/remotes/branch-heads'):
remote_branch = remote_branch.replace('refs/remotes/', 'refs/')
if pending_prefix_check:
# If a pending prefix exists then replace refs/ with it.
state = _GitNumbererState.load(remote_url, remote_branch)
if state.pending_prefix:
remote_branch = remote_branch.replace('refs/', state.pending_prefix)
return remote_branch
def cleanup_list(l):
"""Fixes a list so that comma separated items are put as individual items.
So that "--reviewers joe@c,john@c --reviewers joa@c" results in
options.reviewers == sorted(['joe@c', 'john@c', 'joa@c']).
"""
items = sum((i.split(',') for i in l), [])
stripped_items = (i.strip() for i in items)
return sorted(filter(None, stripped_items))
@subcommand.usage('[args to "git diff"]')
def CMDupload(parser, args):
"""Uploads the current changelist to codereview.
Can skip dependency patchset uploads for a branch by running:
git config branch.branch_name.skip-deps-uploads True
To unset run:
git config --unset branch.branch_name.skip-deps-uploads
Can also set the above globally by using the --global flag.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('--bypass-watchlists', action='store_true',
dest='bypass_watchlists',
help='bypass watchlists auto CC-ing reviewers')
parser.add_option('-f', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('--message', '-m', dest='message',
help='message for patchset')
parser.add_option('-b', '--bug',
help='pre-populate the bug number(s) for this issue. '
'If several, separate with commas')
parser.add_option('--message-file', dest='message_file',
help='file which contains message for patchset')
parser.add_option('--title', '-t', dest='title',
help='title for patchset')
parser.add_option('-r', '--reviewers',
action='append', default=[],
help='reviewer email addresses')
parser.add_option('--cc',
action='append', default=[],
help='cc email addresses')
parser.add_option('-s', '--send-mail', action='store_true',
help='send email to reviewer(s) and cc(s) immediately')
parser.add_option('--emulate_svn_auto_props',
'--emulate-svn-auto-props',
action="store_true",
dest="emulate_svn_auto_props",
help="Emulate Subversion's auto properties feature.")
parser.add_option('-c', '--use-commit-queue', action='store_true',
help='tell the commit queue to commit this patchset')
parser.add_option('--private', action='store_true',
help='set the review private (rietveld only)')
parser.add_option('--target_branch',
'--target-branch',
metavar='TARGET',
help='Apply CL to remote ref TARGET. ' +
'Default: remote branch head, or master')
parser.add_option('--squash', action='store_true',
help='Squash multiple commits into one (Gerrit only)')
parser.add_option('--no-squash', action='store_true',
help='Don\'t squash multiple commits into one ' +
'(Gerrit only)')
parser.add_option('--topic', default=None,
help='Topic to specify when uploading (Gerrit only)')
parser.add_option('--email', default=None,
help='email address to use to connect to Rietveld')
parser.add_option('--tbr-owners', dest='tbr_owners', action='store_true',
help='add a set of OWNERS to TBR')
parser.add_option('-d', '--cq-dry-run', dest='cq_dry_run',
action='store_true',
help='Send the patchset to do a CQ dry run right after '
'upload.')
parser.add_option('--dependencies', action='store_true',
help='Uploads CLs of all the local branches that depend on '
'the current branch')
orig_args = args
add_git_similarity(parser)
auth.add_auth_options(parser)
_add_codereview_select_options(parser)
(options, args) = parser.parse_args(args)
_process_codereview_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
if git_common.is_dirty_git_tree('upload'):
return 1
options.reviewers = cleanup_list(options.reviewers)
options.cc = cleanup_list(options.cc)
if options.message_file:
if options.message:
parser.error('only one of --message and --message-file allowed.')
options.message = gclient_utils.FileRead(options.message_file)
options.message_file = None
if options.cq_dry_run and options.use_commit_queue:
parser.error('only one of --use-commit-queue and --cq-dry-run allowed.')
# For sanity of test expectations, do this otherwise lazy-loading *now*.
settings.GetIsGerrit()
cl = Changelist(auth_config=auth_config, codereview=options.forced_codereview)
return cl.CMDUpload(options, args, orig_args)
def WaitForRealCommit(remote, pushed_commit, local_base_ref, real_ref):
print()
print('Waiting for commit to be landed on %s...' % real_ref)
print('(If you are impatient, you may Ctrl-C once without harm)')
target_tree = RunGit(['rev-parse', '%s:' % pushed_commit]).strip()
current_rev = RunGit(['rev-parse', local_base_ref]).strip()
mirror = settings.GetGitMirror(remote)
loop = 0
while True:
sys.stdout.write('fetching (%d)... \r' % loop)
sys.stdout.flush()
loop += 1
if mirror:
mirror.populate()
RunGit(['retry', 'fetch', remote, real_ref], stderr=subprocess2.VOID)
to_rev = RunGit(['rev-parse', 'FETCH_HEAD']).strip()
commits = RunGit(['rev-list', '%s..%s' % (current_rev, to_rev)])
for commit in commits.splitlines():
if RunGit(['rev-parse', '%s:' % commit]).strip() == target_tree:
print('Found commit on %s' % real_ref)
return commit
current_rev = to_rev
def PushToGitPending(remote, pending_ref):
"""Fetches pending_ref, cherry-picks current HEAD on top of it, pushes.
Returns:
(retcode of last operation, output log of last operation).
"""
assert pending_ref.startswith('refs/'), pending_ref
local_pending_ref = 'refs/git-cl/' + pending_ref[len('refs/'):]
cherry = RunGit(['rev-parse', 'HEAD']).strip()
code = 0
out = ''
max_attempts = 3
attempts_left = max_attempts
while attempts_left:
if attempts_left != max_attempts:
print('Retrying, %d attempts left...' % (attempts_left - 1,))
attempts_left -= 1
# Fetch. Retry fetch errors.
print('Fetching pending ref %s...' % pending_ref)
code, out = RunGitWithCode(
['retry', 'fetch', remote, '+%s:%s' % (pending_ref, local_pending_ref)])
if code:
print('Fetch failed with exit code %d.' % code)
if out.strip():
print(out.strip())
continue
# Try to cherry pick. Abort on merge conflicts.
print('Cherry-picking commit on top of pending ref...')
RunGitWithCode(['checkout', local_pending_ref], suppress_stderr=True)
code, out = RunGitWithCode(['cherry-pick', cherry])
if code:
print('Your patch doesn\'t apply cleanly to ref \'%s\', '
'the following files have merge conflicts:' % pending_ref)
print(RunGit(['diff', '--name-status', '--diff-filter=U']).strip())
print('Please rebase your patch and try again.')
RunGitWithCode(['cherry-pick', '--abort'])
return code, out
# Applied cleanly, try to push now. Retry on error (flake or non-ff push).
print('Pushing commit to %s... It can take a while.' % pending_ref)
code, out = RunGitWithCode(
['retry', 'push', '--porcelain', remote, 'HEAD:%s' % pending_ref])
if code == 0:
# Success.
print('Commit pushed to pending ref successfully!')
return code, out
print('Push failed with exit code %d.' % code)
if out.strip():
print(out.strip())
if IsFatalPushFailure(out):
print('Fatal push error. Make sure your .netrc credentials and git '
'user.email are correct and you have push access to the repo.')
return code, out
print('All attempts to push to pending ref failed.')
return code, out
def IsFatalPushFailure(push_stdout):
"""True if retrying push won't help."""
return '(prohibited by Gerrit)' in push_stdout
@subcommand.usage('DEPRECATED')
def CMDdcommit(parser, args):
"""DEPRECATED: Used to commit the current changelist via git-svn."""
message = ('git-cl no longer supports committing to SVN repositories via '
'git-svn. You probably want to use `git cl land` instead.')
print(message)
return 1
@subcommand.usage('[upstream branch to apply against]')
def CMDland(parser, args):
"""Commits the current changelist via git.
In case of Gerrit, uses Gerrit REST api to "submit" the issue, which pushes
upstream and closes the issue automatically and atomically.
Otherwise (in case of Rietveld):
Squashes branch into a single commit.
Updates commit message with metadata (e.g. pointer to review).
Pushes the code upstream.
Updates review and closes.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('-m', dest='message',
help="override review description")
parser.add_option('-f', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('-c', dest='contributor',
help="external contributor for patch (appended to " +
"description and used as author for git). Should be " +
"formatted as 'First Last <[email protected]>'")
add_git_similarity(parser)
auth.add_auth_options(parser)
(options, args) = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
cl = Changelist(auth_config=auth_config)
# TODO(tandrii): refactor this into _RietveldChangelistImpl method.
if cl.IsGerrit():
if options.message:
# This could be implemented, but it requires sending a new patch to
# Gerrit, as Gerrit unlike Rietveld versions messages with patchsets.
# Besides, Gerrit has the ability to change the commit message on submit
# automatically, thus there is no need to support this option (so far?).
parser.error('-m MESSAGE option is not supported for Gerrit.')
if options.contributor:
parser.error(
'-c CONTRIBUTOR option is not supported for Gerrit.\n'
'Before uploading a commit to Gerrit, ensure it\'s author field is '
'the contributor\'s "name <email>". If you can\'t upload such a '
'commit for review, contact your repository admin and request'
'"Forge-Author" permission.')
if not cl.GetIssue():
DieWithError('You must upload the change first to Gerrit.\n'
' If you would rather have `git cl land` upload '
'automatically for you, see http://crbug.com/642759')
return cl._codereview_impl.CMDLand(options.force, options.bypass_hooks,
options.verbose)
current = cl.GetBranch()
remote, upstream_branch = cl.FetchUpstreamTuple(cl.GetBranch())
if remote == '.':
print()
print('Attempting to push branch %r into another local branch!' % current)
print()
print('Either reparent this branch on top of origin/master:')
print(' git reparent-branch --root')
print()
print('OR run `git rebase-update` if you think the parent branch is ')
print('already committed.')
print()
print(' Current parent: %r' % upstream_branch)
return 1
if not args:
# Default to merging against our best guess of the upstream branch.
args = [cl.GetUpstreamBranch()]
if options.contributor:
if not re.match('^.*\s<\S+@\S+>$', options.contributor):
print("Please provide contibutor as 'First Last <[email protected]>'")
return 1
base_branch = args[0]
if git_common.is_dirty_git_tree('land'):
return 1
# This rev-list syntax means "show all commits not in my branch that
# are in base_branch".
upstream_commits = RunGit(['rev-list', '^' + cl.GetBranchRef(),
base_branch]).splitlines()
if upstream_commits:
print('Base branch "%s" has %d commits '
'not in this branch.' % (base_branch, len(upstream_commits)))
print('Run "git merge %s" before attempting to land.' % base_branch)
return 1
merge_base = RunGit(['merge-base', base_branch, 'HEAD']).strip()
if not options.bypass_hooks:
author = None
if options.contributor:
author = re.search(r'\<(.*)\>', options.contributor).group(1)
hook_results = cl.RunHook(
committing=True,
may_prompt=not options.force,
verbose=options.verbose,
change=cl.GetChange(merge_base, author))
if not hook_results.should_continue():
return 1
# Check the tree status if the tree status URL is set.
status = GetTreeStatus()
if 'closed' == status:
print('The tree is closed. Please wait for it to reopen. Use '
'"git cl land --bypass-hooks" to commit on a closed tree.')
return 1
elif 'unknown' == status:
print('Unable to determine tree status. Please verify manually and '
'use "git cl land --bypass-hooks" to commit on a closed tree.')
return 1
change_desc = ChangeDescription(options.message)
if not change_desc.description and cl.GetIssue():
change_desc = ChangeDescription(cl.GetDescription())
if not change_desc.description:
if not cl.GetIssue() and options.bypass_hooks:
change_desc = ChangeDescription(CreateDescriptionFromLog([merge_base]))
else:
print('No description set.')
print('Visit %s/edit to set it.' % (cl.GetIssueURL()))
return 1
# Keep a separate copy for the commit message, because the commit message
# contains the link to the Rietveld issue, while the Rietveld message contains
# the commit viewvc url.
if cl.GetIssue():
change_desc.update_reviewers(cl.GetApprovingReviewers())
commit_desc = ChangeDescription(change_desc.description)
if cl.GetIssue():
# Xcode won't linkify this URL unless there is a non-whitespace character
# after it. Add a period on a new line to circumvent this. Also add a space
# before the period to make sure that Gitiles continues to correctly resolve
# the URL.
commit_desc.append_footer('Review-Url: %s .' % cl.GetIssueURL())
if options.contributor:
commit_desc.append_footer('Patch from %s.' % options.contributor)
print('Description:')
print(commit_desc.description)
branches = [merge_base, cl.GetBranchRef()]
if not options.force:
print_stats(options.similarity, options.find_copies, branches)
# We want to squash all this branch's commits into one commit with the proper
# description. We do this by doing a "reset --soft" to the base branch (which
# keeps the working copy the same), then landing that.
MERGE_BRANCH = 'git-cl-commit'
CHERRY_PICK_BRANCH = 'git-cl-cherry-pick'
# Delete the branches if they exist.
for branch in [MERGE_BRANCH, CHERRY_PICK_BRANCH]:
showref_cmd = ['show-ref', '--quiet', '--verify', 'refs/heads/%s' % branch]
result = RunGitWithCode(showref_cmd)
if result[0] == 0:
RunGit(['branch', '-D', branch])
# We might be in a directory that's present in this branch but not in the
# trunk. Move up to the top of the tree so that git commands that expect a
# valid CWD won't fail after we check out the merge branch.
rel_base_path = settings.GetRelativeRoot()
if rel_base_path:
os.chdir(rel_base_path)
# Stuff our change into the merge branch.
# We wrap in a try...finally block so if anything goes wrong,
# we clean up the branches.
retcode = -1
pushed_to_pending = False
pending_ref = None
revision = None
try:
RunGit(['checkout', '-q', '-b', MERGE_BRANCH])
RunGit(['reset', '--soft', merge_base])
if options.contributor:
RunGit(
[
'commit', '--author', options.contributor,
'-m', commit_desc.description,
])
else:
RunGit(['commit', '-m', commit_desc.description])
remote, branch = cl.FetchUpstreamTuple(cl.GetBranch())
mirror = settings.GetGitMirror(remote)
if mirror:
pushurl = mirror.url
git_numberer = _GitNumbererState.load(pushurl, branch)
else:
pushurl = remote # Usually, this is 'origin'.
git_numberer = _GitNumbererState.load(
RunGit(['config', 'remote.%s.url' % remote]).strip(), branch)
if git_numberer.should_add_git_number:
# TODO(tandrii): run git fetch in a loop + autorebase when there there
# is no pending ref to push to?
logging.debug('Adding git number footers')
parent_msg = RunGit(['show', '-s', '--format=%B', merge_base]).strip()
commit_desc.update_with_git_number_footers(merge_base, parent_msg,
branch)
# Ensure timestamps are monotonically increasing.
timestamp = max(1 + _get_committer_timestamp(merge_base),
_get_committer_timestamp('HEAD'))
_git_amend_head(commit_desc.description, timestamp)
change_desc = ChangeDescription(commit_desc.description)
# If gnumbd is sitll ON and we ultimately push to branch with
# pending_prefix, gnumbd will modify footers we've just inserted with
# 'Original-', which is annoying but still technically correct.
pending_prefix = git_numberer.pending_prefix
if not pending_prefix or branch.startswith(pending_prefix):
# If not using refs/pending/heads/* at all, or target ref is already set
# to pending, then push to the target ref directly.
# NB(tandrii): I think branch.startswith(pending_prefix) never happens
# in practise. I really tried to create a new branch tracking
# refs/pending/heads/master directly and git cl land failed long before
# reaching this. Disagree? Comment on http://crbug.com/642493.
if pending_prefix:
print('\n\nYOU GOT A CHANCE TO WIN A FREE GIFT!\n\n'
'Grab your .git/config, add instructions how to reproduce '
'this, and post it to http://crbug.com/642493.\n'
'The first reporter gets a free "Black Swan" book from '
'tandrii@\n\n')
retcode, output = RunGitWithCode(
['push', '--porcelain', pushurl, 'HEAD:%s' % branch])
pushed_to_pending = pending_prefix and branch.startswith(pending_prefix)
else:
# Cherry-pick the change on top of pending ref and then push it.
assert branch.startswith('refs/'), branch
assert pending_prefix[-1] == '/', pending_prefix
pending_ref = pending_prefix + branch[len('refs/'):]
retcode, output = PushToGitPending(pushurl, pending_ref)
pushed_to_pending = (retcode == 0)
if retcode == 0:
revision = RunGit(['rev-parse', 'HEAD']).strip()
logging.debug(output)
except: # pylint: disable=bare-except
if _IS_BEING_TESTED:
logging.exception('this is likely your ACTUAL cause of test failure.\n'
+ '-' * 30 + '8<' + '-' * 30)
logging.error('\n' + '-' * 30 + '8<' + '-' * 30 + '\n\n\n')
raise
finally:
# And then swap back to the original branch and clean up.
RunGit(['checkout', '-q', cl.GetBranch()])
RunGit(['branch', '-D', MERGE_BRANCH])
if not revision:
print('Failed to push. If this persists, please file a bug.')
return 1
killed = False
if pushed_to_pending:
try:
revision = WaitForRealCommit(remote, revision, base_branch, branch)
# We set pushed_to_pending to False, since it made it all the way to the
# real ref.
pushed_to_pending = False
except KeyboardInterrupt:
killed = True
if cl.GetIssue():
to_pending = ' to pending queue' if pushed_to_pending else ''
viewvc_url = settings.GetViewVCUrl()
if not to_pending:
if viewvc_url and revision:
change_desc.append_footer(
'Committed: %s%s' % (viewvc_url, revision))
elif revision:
change_desc.append_footer('Committed: %s' % (revision,))
print('Closing issue '
'(you may be prompted for your codereview password)...')
cl.UpdateDescription(change_desc.description)
cl.CloseIssue()
props = cl.GetIssueProperties()
patch_num = len(props['patchsets'])
comment = "Committed patchset #%d (id:%d)%s manually as %s" % (
patch_num, props['patchsets'][-1], to_pending, revision)
if options.bypass_hooks:
comment += ' (tree was closed).' if GetTreeStatus() == 'closed' else '.'
else:
comment += ' (presubmit successful).'
cl.RpcServer().add_comment(cl.GetIssue(), comment)
if pushed_to_pending:
_, branch = cl.FetchUpstreamTuple(cl.GetBranch())
print('The commit is in the pending queue (%s).' % pending_ref)
print('It will show up on %s in ~1 min, once it gets a Cr-Commit-Position '
'footer.' % branch)
if os.path.isfile(POSTUPSTREAM_HOOK):
RunCommand([POSTUPSTREAM_HOOK, merge_base], error_ok=True)
return 1 if killed else 0
@subcommand.usage('<patch url or issue id or issue url>')
def CMDpatch(parser, args):
"""Patches in a code review."""
parser.add_option('-b', dest='newbranch',
help='create a new branch off trunk for the patch')
parser.add_option('-f', '--force', action='store_true',
help='with -b, clobber any existing branch')
parser.add_option('-d', '--directory', action='store', metavar='DIR',
help='Change to the directory DIR immediately, '
'before doing anything else. Rietveld only.')
parser.add_option('--reject', action='store_true',
help='failed patches spew .rej files rather than '
'attempting a 3-way merge. Rietveld only.')
parser.add_option('-n', '--no-commit', action='store_true', dest='nocommit',
help='don\'t commit after patch applies. Rietveld only.')
group = optparse.OptionGroup(
parser,
'Options for continuing work on the current issue uploaded from a '
'different clone (e.g. different machine). Must be used independently '
'from the other options. No issue number should be specified, and the '
'branch must have an issue number associated with it')
group.add_option('--reapply', action='store_true', dest='reapply',
help='Reset the branch and reapply the issue.\n'
'CAUTION: This will undo any local changes in this '
'branch')
group.add_option('--pull', action='store_true', dest='pull',
help='Performs a pull before reapplying.')
parser.add_option_group(group)
auth.add_auth_options(parser)
_add_codereview_select_options(parser)
(options, args) = parser.parse_args(args)
_process_codereview_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
if options.reapply :
if options.newbranch:
parser.error('--reapply works on the current branch only')
if len(args) > 0:
parser.error('--reapply implies no additional arguments')
cl = Changelist(auth_config=auth_config,
codereview=options.forced_codereview)
if not cl.GetIssue():
parser.error('current branch must have an associated issue')
upstream = cl.GetUpstreamBranch()
if upstream == None:
parser.error('No upstream branch specified. Cannot reset branch')
RunGit(['reset', '--hard', upstream])
if options.pull:
RunGit(['pull'])
return cl.CMDPatchIssue(cl.GetIssue(), options.reject, options.nocommit,
options.directory)
if len(args) != 1 or not args[0]:
parser.error('Must specify issue number or url')
# We don't want uncommitted changes mixed up with the patch.
if git_common.is_dirty_git_tree('patch'):
return 1
if options.newbranch:
if options.force:
RunGit(['branch', '-D', options.newbranch],
stderr=subprocess2.PIPE, error_ok=True)
RunGit(['new-branch', options.newbranch])
elif not GetCurrentBranch():
DieWithError('A branch is required to apply patch. Hint: use -b option.')
cl = Changelist(auth_config=auth_config, codereview=options.forced_codereview)
if cl.IsGerrit():
if options.reject:
parser.error('--reject is not supported with Gerrit codereview.')
if options.nocommit:
parser.error('--nocommit is not supported with Gerrit codereview.')
if options.directory:
parser.error('--directory is not supported with Gerrit codereview.')
return cl.CMDPatchIssue(args[0], options.reject, options.nocommit,
options.directory)
def GetTreeStatus(url=None):
"""Fetches the tree status and returns either 'open', 'closed',
'unknown' or 'unset'."""
url = url or settings.GetTreeStatusUrl(error_ok=True)
if url:
status = urllib2.urlopen(url).read().lower()
if status.find('closed') != -1 or status == '0':
return 'closed'
elif status.find('open') != -1 or status == '1':
return 'open'
return 'unknown'
return 'unset'
def GetTreeStatusReason():
"""Fetches the tree status from a json url and returns the message
with the reason for the tree to be opened or closed."""
url = settings.GetTreeStatusUrl()
json_url = urlparse.urljoin(url, '/current?format=json')
connection = urllib2.urlopen(json_url)
status = json.loads(connection.read())
connection.close()
return status['message']
def CMDtree(parser, args):
"""Shows the status of the tree."""
_, args = parser.parse_args(args)
status = GetTreeStatus()
if 'unset' == status:
print('You must configure your tree status URL by running "git cl config".')
return 2
print('The tree is %s' % status)
print()
print(GetTreeStatusReason())
if status != 'open':
return 1
return 0
def CMDtry(parser, args):
"""Triggers try jobs using either BuildBucket or CQ dry run."""
group = optparse.OptionGroup(parser, 'Try job options')
group.add_option(
'-b', '--bot', action='append',
help=('IMPORTANT: specify ONE builder per --bot flag. Use it multiple '
'times to specify multiple builders. ex: '
'"-b win_rel -b win_layout". See '
'the try server waterfall for the builders name and the tests '
'available.'))
group.add_option(
'-B', '--bucket', default='',
help=('Buildbucket bucket to send the try requests.'))
group.add_option(
'-m', '--master', default='',
help=('Specify a try master where to run the tries.'))
group.add_option(
'-r', '--revision',
help='Revision to use for the try job; default: the revision will '
'be determined by the try recipe that builder runs, which usually '
'defaults to HEAD of origin/master')
group.add_option(
'-c', '--clobber', action='store_true', default=False,
help='Force a clobber before building; that is don\'t do an '
'incremental build')
group.add_option(
'--project',
help='Override which project to use. Projects are defined '
'in recipe to determine to which repository or directory to '
'apply the patch')
group.add_option(
'-p', '--property', dest='properties', action='append', default=[],
help='Specify generic properties in the form -p key1=value1 -p '
'key2=value2 etc. The value will be treated as '
'json if decodable, or as string otherwise. '
'NOTE: using this may make your try job not usable for CQ, '
'which will then schedule another try job with default properties')
group.add_option(
'--buildbucket-host', default='cr-buildbucket.appspot.com',
help='Host of buildbucket. The default host is %default.')
parser.add_option_group(group)
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
# Make sure that all properties are prop=value pairs.
bad_params = [x for x in options.properties if '=' not in x]
if bad_params:
parser.error('Got properties with missing "=": %s' % bad_params)
if args:
parser.error('Unknown arguments: %s' % args)
cl = Changelist(auth_config=auth_config)
if not cl.GetIssue():
parser.error('Need to upload first')
error_message = cl.CannotTriggerTryJobReason()
if error_message:
parser.error('Can\'t trigger try jobs: %s' % error_message)
if options.bucket and options.master:
parser.error('Only one of --bucket and --master may be used.')
buckets = _get_bucket_map(cl, options, parser)
# If no bots are listed and we couldn't get a list based on PRESUBMIT files,
# then we default to triggering a CQ dry run (see http://crbug.com/625697).
if not buckets:
if options.verbose:
print('git cl try with no bots now defaults to CQ Dry Run.')
return cl.TriggerDryRun()
for builders in buckets.itervalues():
if any('triggered' in b for b in builders):
print('ERROR You are trying to send a job to a triggered bot. This type '
'of bot requires an initial job from a parent (usually a builder). '
'Instead send your job to the parent.\n'
'Bot list: %s' % builders, file=sys.stderr)
return 1
patchset = cl.GetMostRecentPatchset()
# TODO(tandrii): Checking local patchset against remote patchset is only
# supported for Rietveld. Extend it to Gerrit or remove it completely.
if not cl.IsGerrit() and patchset != cl.GetPatchset():
print('Warning: Codereview server has newer patchsets (%s) than most '
'recent upload from local checkout (%s). Did a previous upload '
'fail?\n'
'By default, git cl try uses the latest patchset from '
'codereview, continuing to use patchset %s.\n' %
(patchset, cl.GetPatchset(), patchset))
try:
_trigger_try_jobs(auth_config, cl, buckets, options, 'git_cl_try',
patchset)
except BuildbucketResponseException as ex:
print('ERROR: %s' % ex)
return 1
return 0
def CMDtry_results(parser, args):
"""Prints info about try jobs associated with current CL."""
group = optparse.OptionGroup(parser, 'Try job results options')
group.add_option(
'-p', '--patchset', type=int, help='patchset number if not current.')
group.add_option(
'--print-master', action='store_true', help='print master name as well.')
group.add_option(
'--color', action='store_true', default=setup_color.IS_TTY,
help='force color output, useful when piping output.')
group.add_option(
'--buildbucket-host', default='cr-buildbucket.appspot.com',
help='Host of buildbucket. The default host is %default.')
group.add_option(
'--json', help='Path of JSON output file to write try job results to.')
parser.add_option_group(group)
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
auth_config = auth.extract_auth_config_from_options(options)
cl = Changelist(auth_config=auth_config)
if not cl.GetIssue():
parser.error('Need to upload first')
patchset = options.patchset
if not patchset:
patchset = cl.GetMostRecentPatchset()
if not patchset:
parser.error('Codereview doesn\'t know about issue %s. '
'No access to issue or wrong issue number?\n'
'Either upload first, or pass --patchset explicitely' %
cl.GetIssue())
# TODO(tandrii): Checking local patchset against remote patchset is only
# supported for Rietveld. Extend it to Gerrit or remove it completely.
if not cl.IsGerrit() and patchset != cl.GetPatchset():
print('Warning: Codereview server has newer patchsets (%s) than most '
'recent upload from local checkout (%s). Did a previous upload '
'fail?\n'
'By default, git cl try-results uses the latest patchset from '
'codereview, continuing to use patchset %s.\n' %
(patchset, cl.GetPatchset(), patchset))
try:
jobs = fetch_try_jobs(auth_config, cl, options.buildbucket_host, patchset)
except BuildbucketResponseException as ex:
print('Buildbucket error: %s' % ex)
return 1
if options.json:
write_try_results_json(options.json, jobs)
else:
print_try_jobs(options, jobs)
return 0
@subcommand.usage('[new upstream branch]')
def CMDupstream(parser, args):
"""Prints or sets the name of the upstream branch, if any."""
_, args = parser.parse_args(args)
if len(args) > 1:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist()
if args:
# One arg means set upstream branch.
branch = cl.GetBranch()
RunGit(['branch', '--set-upstream-to', args[0], branch])
cl = Changelist()
print('Upstream branch set to %s' % (cl.GetUpstreamBranch(),))
# Clear configured merge-base, if there is one.
git_common.remove_merge_base(branch)
else:
print(cl.GetUpstreamBranch())
return 0
def CMDweb(parser, args):
"""Opens the current CL in the web browser."""
_, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
issue_url = Changelist().GetIssueURL()
if not issue_url:
print('ERROR No issue to open', file=sys.stderr)
return 1
webbrowser.open(issue_url)
return 0
def CMDset_commit(parser, args):
"""Sets the commit bit to trigger the Commit Queue."""
parser.add_option('-d', '--dry-run', action='store_true',
help='trigger in dry run mode')
parser.add_option('-c', '--clear', action='store_true',
help='stop CQ run, if any')
auth.add_auth_options(parser)
_add_codereview_issue_select_options(parser)
options, args = parser.parse_args(args)
_process_codereview_issue_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
if options.dry_run and options.clear:
parser.error('Make up your mind: both --dry-run and --clear not allowed')
cl = Changelist(auth_config=auth_config, issue=options.issue,
codereview=options.forced_codereview)
if options.clear:
state = _CQState.NONE
elif options.dry_run:
# TODO(qyearsley): Use cl.TriggerDryRun.
state = _CQState.DRY_RUN
else:
state = _CQState.COMMIT
if not cl.GetIssue():
parser.error('Must upload the issue first')
cl.SetCQState(state)
return 0
def CMDset_close(parser, args):
"""Closes the issue."""
_add_codereview_issue_select_options(parser)
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
_process_codereview_issue_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist(auth_config=auth_config, issue=options.issue,
codereview=options.forced_codereview)
# Ensure there actually is an issue to close.
cl.GetDescription()
cl.CloseIssue()
return 0
def CMDdiff(parser, args):
"""Shows differences between local tree and last upload."""
parser.add_option(
'--stat',
action='store_true',
dest='stat',
help='Generate a diffstat')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
# Uncommitted (staged and unstaged) changes will be destroyed by
# "git reset --hard" if there are merging conflicts in CMDPatchIssue().
# Staged changes would be committed along with the patch from last
# upload, hence counted toward the "last upload" side in the final
# diff output, and this is not what we want.
if git_common.is_dirty_git_tree('diff'):
return 1
cl = Changelist(auth_config=auth_config)
issue = cl.GetIssue()
branch = cl.GetBranch()
if not issue:
DieWithError('No issue found for current branch (%s)' % branch)
TMP_BRANCH = 'git-cl-diff'
base_branch = cl.GetCommonAncestorWithUpstream()
# Create a new branch based on the merge-base
RunGit(['checkout', '-q', '-b', TMP_BRANCH, base_branch])
# Clear cached branch in cl object, to avoid overwriting original CL branch
# properties.
cl.ClearBranch()
try:
rtn = cl.CMDPatchIssue(issue, reject=False, nocommit=False, directory=None)
if rtn != 0:
RunGit(['reset', '--hard'])
return rtn
# Switch back to starting branch and diff against the temporary
# branch containing the latest rietveld patch.
cmd = ['git', 'diff']
if options.stat:
cmd.append('--stat')
cmd.extend([TMP_BRANCH, branch, '--'])
subprocess2.check_call(cmd)
finally:
RunGit(['checkout', '-q', branch])
RunGit(['branch', '-D', TMP_BRANCH])
return 0
def CMDowners(parser, args):
"""Interactively find the owners for reviewing."""
parser.add_option(
'--no-color',
action='store_true',
help='Use this option to disable color output')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
author = RunGit(['config', 'user.email']).strip() or None
cl = Changelist(auth_config=auth_config)
if args:
if len(args) > 1:
parser.error('Unknown args')
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
change = cl.GetChange(base_branch, None)
return owners_finder.OwnersFinder(
[f.LocalPath() for f in
cl.GetChange(base_branch, None).AffectedFiles()],
change.RepositoryRoot(), author,
fopen=file, os_path=os.path,
disable_color=options.no_color).run()
def BuildGitDiffCmd(diff_type, upstream_commit, args):
"""Generates a diff command."""
# Generate diff for the current branch's changes.
diff_cmd = ['diff', '--no-ext-diff', '--no-prefix', diff_type,
upstream_commit, '--' ]
if args:
for arg in args:
if os.path.isdir(arg) or os.path.isfile(arg):
diff_cmd.append(arg)
else:
DieWithError('Argument "%s" is not a file or a directory' % arg)
return diff_cmd
def MatchingFileType(file_name, extensions):
"""Returns true if the file name ends with one of the given extensions."""
return bool([ext for ext in extensions if file_name.lower().endswith(ext)])
@subcommand.usage('[files or directories to diff]')
def CMDformat(parser, args):
"""Runs auto-formatting tools (clang-format etc.) on the diff."""
CLANG_EXTS = ['.cc', '.cpp', '.h', '.m', '.mm', '.proto', '.java', '.js']
GN_EXTS = ['.gn', '.gni', '.typemap']
parser.add_option('--full', action='store_true',
help='Reformat the full content of all touched files')
parser.add_option('--dry-run', action='store_true',
help='Don\'t modify any file on disk.')
parser.add_option('--python', action='store_true',
help='Format python code with yapf (experimental).')
parser.add_option('--diff', action='store_true',
help='Print diff to stdout rather than modifying files.')
opts, args = parser.parse_args(args)
# Normalize any remaining args against the current path, so paths relative to
# the current directory are still resolved as expected.
args = [os.path.join(os.getcwd(), arg) for arg in args]
# git diff generates paths against the root of the repository. Change
# to that directory so clang-format can find files even within subdirs.
rel_base_path = settings.GetRelativeRoot()
if rel_base_path:
os.chdir(rel_base_path)
# Grab the merge-base commit, i.e. the upstream commit of the current
# branch when it was created or the last time it was rebased. This is
# to cover the case where the user may have called "git fetch origin",
# moving the origin branch to a newer commit, but hasn't rebased yet.
upstream_commit = None
cl = Changelist()
upstream_branch = cl.GetUpstreamBranch()
if upstream_branch:
upstream_commit = RunGit(['merge-base', 'HEAD', upstream_branch])
upstream_commit = upstream_commit.strip()
if not upstream_commit:
DieWithError('Could not find base commit for this branch. '
'Are you in detached state?')
changed_files_cmd = BuildGitDiffCmd('--name-only', upstream_commit, args)
diff_output = RunGit(changed_files_cmd)
diff_files = diff_output.splitlines()
# Filter out files deleted by this CL
diff_files = [x for x in diff_files if os.path.isfile(x)]
clang_diff_files = [x for x in diff_files if MatchingFileType(x, CLANG_EXTS)]
python_diff_files = [x for x in diff_files if MatchingFileType(x, ['.py'])]
dart_diff_files = [x for x in diff_files if MatchingFileType(x, ['.dart'])]
gn_diff_files = [x for x in diff_files if MatchingFileType(x, GN_EXTS)]
top_dir = os.path.normpath(
RunGit(["rev-parse", "--show-toplevel"]).rstrip('\n'))
# Set to 2 to signal to CheckPatchFormatted() that this patch isn't
# formatted. This is used to block during the presubmit.
return_value = 0
if clang_diff_files:
# Locate the clang-format binary in the checkout
try:
clang_format_tool = clang_format.FindClangFormatToolInChromiumTree()
except clang_format.NotFoundError as e:
DieWithError(e)
if opts.full:
cmd = [clang_format_tool]
if not opts.dry_run and not opts.diff:
cmd.append('-i')
stdout = RunCommand(cmd + clang_diff_files, cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
else:
env = os.environ.copy()
env['PATH'] = str(os.path.dirname(clang_format_tool))
try:
script = clang_format.FindClangFormatScriptInChromiumTree(
'clang-format-diff.py')
except clang_format.NotFoundError as e:
DieWithError(e)
cmd = [sys.executable, script, '-p0']
if not opts.dry_run and not opts.diff:
cmd.append('-i')
diff_cmd = BuildGitDiffCmd('-U0', upstream_commit, clang_diff_files)
diff_output = RunGit(diff_cmd)
stdout = RunCommand(cmd, stdin=diff_output, cwd=top_dir, env=env)
if opts.diff:
sys.stdout.write(stdout)
if opts.dry_run and len(stdout) > 0:
return_value = 2
# Similar code to above, but using yapf on .py files rather than clang-format
# on C/C++ files
if opts.python:
yapf_tool = gclient_utils.FindExecutable('yapf')
if yapf_tool is None:
DieWithError('yapf not found in PATH')
if opts.full:
if python_diff_files:
cmd = [yapf_tool]
if not opts.dry_run and not opts.diff:
cmd.append('-i')
stdout = RunCommand(cmd + python_diff_files, cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
else:
# TODO(sbc): yapf --lines mode still has some issues.
# https://github.com/google/yapf/issues/154
DieWithError('--python currently only works with --full')
# Dart's formatter does not have the nice property of only operating on
# modified chunks, so hard code full.
if dart_diff_files:
try:
command = [dart_format.FindDartFmtToolInChromiumTree()]
if not opts.dry_run and not opts.diff:
command.append('-w')
command.extend(dart_diff_files)
stdout = RunCommand(command, cwd=top_dir)
if opts.dry_run and stdout:
return_value = 2
except dart_format.NotFoundError as e:
print('Warning: Unable to check Dart code formatting. Dart SDK not '
'found in this checkout. Files in other languages are still '
'formatted.')
# Format GN build files. Always run on full build files for canonical form.
if gn_diff_files:
cmd = ['gn', 'format' ]
if opts.dry_run or opts.diff:
cmd.append('--dry-run')
for gn_diff_file in gn_diff_files:
gn_ret = subprocess2.call(cmd + [gn_diff_file],
shell=sys.platform == 'win32',
cwd=top_dir)
if opts.dry_run and gn_ret == 2:
return_value = 2 # Not formatted.
elif opts.diff and gn_ret == 2:
# TODO this should compute and print the actual diff.
print("This change has GN build file diff for " + gn_diff_file)
elif gn_ret != 0:
# For non-dry run cases (and non-2 return values for dry-run), a
# nonzero error code indicates a failure, probably because the file
# doesn't parse.
DieWithError("gn format failed on " + gn_diff_file +
"\nTry running 'gn format' on this file manually.")
return return_value
@subcommand.usage('<codereview url or issue id>')
def CMDcheckout(parser, args):
"""Checks out a branch associated with a given Rietveld or Gerrit issue."""
_, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
issue_arg = ParseIssueNumberArgument(args[0])
if not issue_arg.valid:
parser.print_help()
return 1
target_issue = str(issue_arg.issue)
def find_issues(issueprefix):
output = RunGit(['config', '--local', '--get-regexp',
r'branch\..*\.%s' % issueprefix],
error_ok=True)
for key, issue in [x.split() for x in output.splitlines()]:
if issue == target_issue:
yield re.sub(r'branch\.(.*)\.%s' % issueprefix, r'\1', key)
branches = []
for cls in _CODEREVIEW_IMPLEMENTATIONS.values():
branches.extend(find_issues(cls.IssueConfigKey()))
if len(branches) == 0:
print('No branch found for issue %s.' % target_issue)
return 1
if len(branches) == 1:
RunGit(['checkout', branches[0]])
else:
print('Multiple branches match issue %s:' % target_issue)
for i in range(len(branches)):
print('%d: %s' % (i, branches[i]))
which = raw_input('Choose by index: ')
try:
RunGit(['checkout', branches[int(which)]])
except (IndexError, ValueError):
print('Invalid selection, not checking out any branch.')
return 1
return 0
def CMDlol(parser, args):
# This command is intentionally undocumented.
print(zlib.decompress(base64.b64decode(
'eNptkLEOwyAMRHe+wupCIqW57v0Vq84WqWtXyrcXnCBsmgMJ+/SSAxMZgRB6NzE'
'E2ObgCKJooYdu4uAQVffUEoE1sRQLxAcqzd7uK2gmStrll1ucV3uZyaY5sXyDd9'
'JAnN+lAXsOMJ90GANAi43mq5/VeeacylKVgi8o6F1SC63FxnagHfJUTfUYdCR/W'
'Ofe+0dHL7PicpytKP750Fh1q2qnLVof4w8OZWNY')))
return 0
class OptionParser(optparse.OptionParser):
"""Creates the option parse and add --verbose support."""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(
self, *args, prog='git cl', version=__version__, **kwargs)
self.add_option(
'-v', '--verbose', action='count', default=0,
help='Use 2 times for more debugging info')
def parse_args(self, args=None, values=None):
options, args = optparse.OptionParser.parse_args(self, args, values)
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(options.verbose, len(levels) - 1)])
return options, args
def main(argv):
if sys.hexversion < 0x02060000:
print('\nYour python version %s is unsupported, please upgrade.\n' %
(sys.version.split(' ', 1)[0],), file=sys.stderr)
return 2
# Reload settings.
global settings
settings = Settings()
colorize_CMDstatus_doc()
dispatcher = subcommand.CommandDispatcher(__name__)
try:
return dispatcher.execute(OptionParser(), argv)
except auth.AuthenticationError as e:
DieWithError(str(e))
except urllib2.HTTPError as e:
if e.code != 500:
raise
DieWithError(
('AppEngine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e)))
return 0
if __name__ == '__main__':
# These affect sys.stdout so do it outside of main() to simplify mocks in
# unit testing.
fix_encoding.fix_encoding()
setup_color.init()
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
the-stack_0_19260 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../src'))
sys.path.insert(0, os.path.abspath('../../src/models'))
# -- Project information -----------------------------------------------------
project = 'CovidMDP'
copyright = '2020, Guilherme Yambanis'
author = 'Guilherme Yambanis'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['rinoh.frontend.sphinx', 'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'CovidMDP.tex',
u'CovidMDP Documentation',
u"Guilherme, Denis, Leliane", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for HTML output -------------------------------------------------
html_theme = 'p-main_theme'
import os
from PSphinxTheme import utils
p, html_theme, needs_sphinx = utils.set_psphinxtheme(html_theme)
html_theme_path = p
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
the-stack_0_19263 | #!/usr/bin/env python
from __future__ import print_function
import os
#import json
import simplejson as json
from argparse import ArgumentParser
from datetime import datetime
import time
from pprint import pprint
import numpy as np
from tools import diff_last, cost_val, load_kind, label_kind, MAX_COST, MAX_WASTE, \
outter_reproccessed
def json_at_year(filename, year, kind):
data = load_kind(filename, kind)
data = np.array(data[data['year'] == year])
return [{'name': k, 'size': data[k][0]} for k in data.dtype.names[1:] \
if data[k][0] > 0]
def main_by_year():
parser = ArgumentParser()
parser.add_argument('year', type=int)
parser.add_argument('filenames', nargs="+")
parser.add_argument('-k', dest="kind", help="waste or cost", default="waste")
ns = parser.parse_args()
j = {'name': "Year {0}".format(ns.year), 'children': []}
for i, filename in enumerate(ns.filenames, 1):
j['children'].append({'name': 'Fuel Cycle {0}'.format(i),
'children': json_at_year(filename, ns.year, ns.kind),
})
jfname = "year{0}-{1}.json".format(ns.year, ns.kind)
s = json.dumps(j)
with open(jfname, 'w') as f:
f.write(s)
CAT_LABEL = "year {0}\n{1}"
def json_at_category(data, category, years, kind):
j = []
for row in data:
if row['year'] not in years:
continue
label = CAT_LABEL.format(row['year'], label_kind(row[category], kind))
j.append({'name': label, 'size': row[category]})
return j
def main_by_fc_cat_year():
parser = ArgumentParser()
parser.add_argument('filename')
parser.add_argument('years', nargs='+', type=int)
parser.add_argument('-k', dest="kind", help="waste or cost", default="waste")
ns = parser.parse_args()
years = set(ns.years)
data = load_kind(ns.filename, ns.kind)
j = {'name': "", 'children': []} # FC level
for category in data.dtype.names[1:]:
j['children'].append({'name': category,
'children': json_at_category(data, category,
years, ns.kind),
})
jfname = "info-{0}-{1}-{2}.json".format(os.path.splitext(ns.filename)[0],
"_".join(map(str, ns.years)), ns.kind)
s = json.dumps(j)
with open(jfname, 'w') as f:
f.write(s)
YEAR_CAT_LABEL = "{0}\n{1}"
def json_at_year_cat(data, year, kind):
#data = np.array(data[data['year'] == year])
d = data[data['year'] == year]
return [{'name': YEAR_CAT_LABEL.format(k, label_kind(d[k][0], kind)),
'size': d[k][0] / (MAX_COST if kind == "cost" else MAX_WASTE)} \
for k in d.dtype.names[1:] if d[k][0] > 0]
def main_by_fc_year_cat():
parser = ArgumentParser()
parser.add_argument('filename')
parser.add_argument('years', nargs='+', type=int)
parser.add_argument('-k', dest="kind", help="waste or cost", default="waste")
ns = parser.parse_args()
years = set(ns.years)
data = load_kind(ns.filename, ns.kind)
j = {'name': "", 'children': [], 'scale': 0.0} # FC level
for year in years:
j['children'].append({'name': "year {0}".format(year),
'children': json_at_year_cat(data, year, ns.kind),})
with open('/dev/null', 'a') as f:
# prevents weird numpy segfault
print(data, file=f)
j['scale'] = max(j['scale'], sum([c['size'] for c in j['children'][-1]['children']]))
if ns.kind == 'waste':
outter_reproccessed(j['children'][-1]['children'])
jfname = "info-{0}-{1}-{2}.json".format(os.path.splitext(ns.filename)[0],
"_".join(map(str, ns.years)), ns.kind)
s = json.dumps(j)
with open(jfname, 'w') as f:
f.write(s)
if __name__ == "__main__":
main_by_fc_year_cat()
|
the-stack_0_19265 | """
Simple check list from huggingface/transformers repo: https://github.com/huggingface/transformers/blob/master/setup.py
To create the package for pypi.
1. Change the version in setup.py and docs (if applicable).
2. Unpin specific versions from setup.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
python3 -m twine upload --repository testpypi dist/*
Alternatively:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url, use the following command then:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi jiant
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
8. Add the release version to docs deployment (if applicable)
9. Update README.md to redirect to correct documentation.
"""
import shutil
from pathlib import Path
from setuptools import find_packages, setup
extras = {}
extras["testing"] = ["pytest", "pytest-cov", "pre-commit"]
extras["docs"] = ["sphinx"]
extras["quality"] = ["black == 19.10b0", "flake8-docstrings == 1.5.0", "flake8 >= 3.7.9", "mypy == 0.770"]
extras["dev"] = extras["testing"] + extras["quality"]
setup(
name="jiant",
version="2.0.1",
author="NYU Machine Learning for Language Group",
author_email="[email protected]",
description="State-of-the-art Natural Language Processing toolkit for multi-task and transfer learning built on PyTorch.",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google nyu datasets",
license="MIT",
url="https://github.com/nyu-mll/jiant",
packages=find_packages(exclude=["tests", "tests/*"]),
install_requires=[
"attrs == 19.3.0",
"bs4 == 0.0.1",
"jsonnet == 0.15.0",
"lxml == 4.5.1",
"datasets == 1.1.2",
"nltk >= 3.5",
"numexpr == 2.7.1",
"numpy == 1.18.4",
"pandas == 1.0.3",
"python-Levenshtein == 0.12.0",
"sacremoses == 0.0.43",
"seqeval == 0.0.12",
"scikit-learn == 0.22.2.post1",
"scipy == 1.4.1",
"sentencepiece == 0.1.86",
"tokenizers == 0.8.1.rc2",
"torch == 1.5.0",
"tqdm == 4.46.0",
"transformers == 3.0.2",
"torchvision == 0.6.0",
],
extras_require=extras,
python_requires=">=3.6.0",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
'License :: OSI Approved :: MIT License',
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.