max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
code/cifar_eval.py | mlaai/mentornet | 306 | 11195896 | <gh_stars>100-1000
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates a trained model.
See the README.md file for compilation and running instructions.
"""
import math
import os
import cifar_data_provider
import inception_model
import numpy as np
import resnet_model
import tensorflow as tf
import tensorflow.contrib.slim as slim
flags = tf.app.flags
flags.DEFINE_integer('batch_size', 25, 'The number of images in each batch.')
flags.DEFINE_string('data_dir', '', 'Data dir')
flags.DEFINE_string('dataset_name', 'cifar10', 'cifar10 or cifar100')
flags.DEFINE_string('studentnet', 'resnet101', 'inception or resnet101')
flags.DEFINE_string('master', None, 'BNS name of the TensorFlow master to use.')
flags.DEFINE_string('checkpoint_dir', '',
'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', '', 'Directory where the results are saved to.')
flags.DEFINE_integer(
'eval_interval_secs', 600,
'The frequency, in seconds, with which evaluation is run.')
flags.DEFINE_string('split_name', 'test', """Either 'train' or 'test'.""")
flags.DEFINE_string('output_csv_file', '',
'The csv file where the results are saved.')
flags.DEFINE_string('device_id', '0', 'GPU device ID to run the job.')
FLAGS = flags.FLAGS
# Turn this on if there are no log outputs.
tf.logging.set_verbosity(tf.logging.INFO)
def eval_inception():
"""Evalautes the inception model."""
g = tf.Graph()
with g.as_default():
# pylint: disable=line-too-long
images, one_hot_labels, num_samples, num_of_classes = cifar_data_provider.provide_cifarnet_data(
FLAGS.dataset_name,
FLAGS.split_name,
FLAGS.batch_size,
dataset_dir=FLAGS.data_dir,
num_epochs=None)
# Define the model:
logits, end_points = inception_model.cifarnet(
images, num_of_classes, is_training=False, dropout_keep_prob=1.0)
images.set_shape([FLAGS.batch_size, 32, 32, 3])
predictions = tf.argmax(end_points['Predictions'], 1)
total_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=one_hot_labels, logits=logits)
total_loss = tf.reduce_mean(total_loss, name='xent')
slim.summaries.add_scalar_summary(
total_loss, 'total_loss', print_summary=True)
# Define the metrics:
labels = tf.argmax(one_hot_labels, 1)
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'accuracy': tf.metrics.accuracy(predictions, labels),
})
for name, value in names_to_values.iteritems():
slim.summaries.add_scalar_summary(
value, name, prefix='eval', print_summary=True)
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(num_samples / float(FLAGS.batch_size))
# Limit gpu memory to run train and eval on the same gpu
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.45
slim.evaluation.evaluation_loop(
master=FLAGS.master,
checkpoint_dir=FLAGS.checkpoint_dir,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
session_config=config,
eval_op=names_to_updates.values(),
eval_interval_secs=FLAGS.eval_interval_secs)
def eval_resnet():
"""Evaluates the resnet model."""
if not os.path.exists(FLAGS.eval_dir):
os.makedirs(FLAGS.eval_dir)
g = tf.Graph()
with g.as_default():
# pylint: disable=line-too-long
images, one_hot_labels, num_samples, num_of_classes = cifar_data_provider.provide_resnet_data(
FLAGS.dataset_name,
FLAGS.split_name,
FLAGS.batch_size,
dataset_dir=FLAGS.data_dir,
num_epochs=None)
hps = resnet_model.HParams(
batch_size=FLAGS.batch_size,
num_classes=num_of_classes,
min_lrn_rate=0.0001,
lrn_rate=0,
num_residual_units=9,
use_bottleneck=False,
weight_decay_rate=0.0002,
relu_leakiness=0.1,
optimizer='mom')
# Define the model:
images.set_shape([FLAGS.batch_size, 32, 32, 3])
resnet = resnet_model.ResNet(hps, images, one_hot_labels, mode='test')
logits = resnet.build_model()
total_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=one_hot_labels, logits=logits)
total_loss = tf.reduce_mean(total_loss, name='xent')
slim.summaries.add_scalar_summary(
total_loss, 'total_loss', print_summary=True)
# Define the metrics:
predictions = tf.argmax(logits, 1)
labels = tf.argmax(one_hot_labels, 1)
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'accuracy': tf.metrics.accuracy(predictions, labels),
})
for name, value in names_to_values.iteritems():
slim.summaries.add_scalar_summary(
value, name, prefix='eval', print_summary=True)
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(num_samples / float(FLAGS.batch_size))
slim.evaluation.evaluation_loop(
master=FLAGS.master,
checkpoint_dir=FLAGS.checkpoint_dir,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=names_to_updates.values(),
eval_interval_secs=FLAGS.eval_interval_secs)
def extract_resnet_features(max_step_run=39000):
"""Not checked provide_resnet_noisy_data dataset might change."""
g = tf.Graph()
with g.as_default():
tf_global_step = tf.train.get_or_create_global_step()
# pylint: disable=line-too-long
images, one_hot_labels, num_examples, num_of_classes, clean_labels, image_ids = cifar_data_provider.provide_resnet_noisy_data(
FLAGS.dataset_name,
'train',
FLAGS.batch_size,
dataset_dir=FLAGS.data_dir)
hps = resnet_model.HParams(
batch_size=FLAGS.batch_size,
num_classes=num_of_classes,
min_lrn_rate=0.0001,
lrn_rate=0,
num_residual_units=9,
use_bottleneck=False,
weight_decay_rate=0.0002,
relu_leakiness=0.1,
optimizer='mom')
images.set_shape([FLAGS.batch_size, 32, 32, 3])
# Define the model:
resnet = resnet_model.ResNet(hps, images, one_hot_labels, mode='train')
logits = resnet.build_model()
# Specify the loss function:
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=one_hot_labels, logits=logits)
labels = tf.argmax(one_hot_labels, 1)
loss = tf.reshape(loss, [-1, 1])
epoch_step = tf.to_int32(
tf.floor(tf.divide(tf_global_step, max_step_run) * 100))
ckpt_model = FLAGS.checkpoint_dir
num_batches = int(math.ceil(num_examples / float(FLAGS.batch_size)))
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
ckpt_model,
tf.contrib.framework.get_variables_to_restore(include=['.*']))
outfile = open(FLAGS.output_csv_file, 'w')
with tf.Session() as sess:
with slim.queues.QueueRunners(sess):
init_fn(sess)
for _ in xrange(num_batches):
image_ids_val, epoch_step_val, labels_val, loss_val, clean_labels_val = sess.run(
[image_ids, epoch_step, labels, loss, clean_labels])
clean_labels_val = np.squeeze(clean_labels_val)
loss_val = np.squeeze(loss_val)
image_ids_val = np.squeeze(image_ids_val)
labels_val = np.squeeze(labels_val)
for k in range(FLAGS.batch_size):
outfile.write('{} {} {} {} {:5f}\n'.format(
int(image_ids_val[k]), epoch_step_val, labels_val[k],
clean_labels_val[k], loss_val[k]))
outfile.flush()
outfile.close()
def main(_):
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.device_id
if FLAGS.studentnet == 'resnet101':
eval_resnet()
elif FLAGS.studentnet == 'inception':
eval_inception()
else:
tf.logging.error('unknown backbone student network %s', FLAGS.studentnet)
if __name__ == '__main__':
tf.app.run()
|
preprocess.py | Shengqi-Pan/deepke | 173 | 11195910 | import os
import logging
from collections import OrderedDict
from typing import List, Dict
from transformers import BertTokenizer
from serializer import Serializer
from vocab import Vocab
from utils import save_pkl, load_csv
logger = logging.getLogger(__name__)
def _handle_pos_limit(pos: List[int], limit: int) -> List[int]:
for i, p in enumerate(pos):
if p > limit:
pos[i] = limit
if p < -limit:
pos[i] = -limit
return [p + limit + 1 for p in pos]
def _add_pos_seq(train_data: List[Dict], cfg):
for d in train_data:
entities_idx = [d['head_idx'], d['tail_idx']
] if d['head_idx'] < d['tail_idx'] else [d['tail_idx'], d['head_idx']]
d['head_pos'] = list(map(lambda i: i - d['head_idx'], list(range(d['seq_len']))))
d['head_pos'] = _handle_pos_limit(d['head_pos'], int(cfg.pos_limit))
d['tail_pos'] = list(map(lambda i: i - d['tail_idx'], list(range(d['seq_len']))))
d['tail_pos'] = _handle_pos_limit(d['tail_pos'], int(cfg.pos_limit))
if cfg.model_name == 'cnn':
if cfg.use_pcnn:
# 当句子无法分隔成三段时,无法使用PCNN
# 比如: [head, ... tail] or [... head, tail, ...] 无法使用统一方式 mask 分段
d['entities_pos'] = [1] * (entities_idx[0] + 1) + [2] * (entities_idx[1] - entities_idx[0] - 1) +\
[3] * (d['seq_len'] - entities_idx[1])
def _convert_tokens_into_index(data: List[Dict], vocab):
unk_str = '[UNK]'
unk_idx = vocab.word2idx[unk_str]
for d in data:
d['token2idx'] = [vocab.word2idx.get(i, unk_idx) for i in d['tokens']]
d['seq_len'] = len(d['token2idx'])
def _serialize_sentence(data: List[Dict], serial, cfg):
for d in data:
sent = d['sentence'].strip()
sent = sent.replace(d['head'], ' head ', 1).replace(d['tail'], ' tail ', 1)
d['tokens'] = serial(sent, never_split=['head', 'tail'])
head_idx, tail_idx = d['tokens'].index('head'), d['tokens'].index('tail')
d['head_idx'], d['tail_idx'] = head_idx, tail_idx
if cfg.replace_entity_with_type:
if cfg.replace_entity_with_scope:
d['tokens'][head_idx], d['tokens'][tail_idx] = 'HEAD_' + d['head_type'], 'TAIL_' + d['tail_type']
else:
d['tokens'][head_idx], d['tokens'][tail_idx] = d['head_type'], d['tail_type']
else:
if cfg.replace_entity_with_scope:
d['tokens'][head_idx], d['tokens'][tail_idx] = 'HEAD', 'TAIL'
else:
d['tokens'][head_idx], d['tokens'][tail_idx] = d['head'], d['tail']
def _lm_serialize(data: List[Dict], cfg):
logger.info('use bert tokenizer...')
tokenizer = BertTokenizer.from_pretrained(cfg.lm_file)
for d in data:
sent = d['sentence'].strip()
sent = sent.replace(d['head'], d['head_type'], 1).replace(d['tail'], d['tail_type'], 1)
sent += '[SEP]' + d['head'] + '[SEP]' + d['tail']
d['token2idx'] = tokenizer.encode(sent, add_special_tokens=True)
d['seq_len'] = len(d['token2idx'])
def _add_relation_data(rels: Dict, data: List) -> None:
for d in data:
d['rel2idx'] = rels[d['relation']]['index']
d['head_type'] = rels[d['relation']]['head_type']
d['tail_type'] = rels[d['relation']]['tail_type']
def _handle_relation_data(relation_data: List[Dict]) -> Dict:
rels = OrderedDict()
relation_data = sorted(relation_data, key=lambda i: int(i['index']))
for d in relation_data:
rels[d['relation']] = {
'index': int(d['index']),
'head_type': d['head_type'],
'tail_type': d['tail_type'],
}
return rels
def preprocess(cfg):
logger.info('===== start preprocess data =====')
train_fp = os.path.join(cfg.cwd, cfg.data_path, 'train.csv')
valid_fp = os.path.join(cfg.cwd, cfg.data_path, 'valid.csv')
test_fp = os.path.join(cfg.cwd, cfg.data_path, 'test.csv')
relation_fp = os.path.join(cfg.cwd, cfg.data_path, 'relation.csv')
logger.info('load raw files...')
train_data = load_csv(train_fp)
valid_data = load_csv(valid_fp)
test_data = load_csv(test_fp)
relation_data = load_csv(relation_fp)
logger.info('convert relation into index...')
rels = _handle_relation_data(relation_data)
_add_relation_data(rels, train_data)
_add_relation_data(rels, valid_data)
_add_relation_data(rels, test_data)
logger.info('verify whether use pretrained language models...')
if cfg.model_name == 'lm':
logger.info('use pretrained language models serialize sentence...')
_lm_serialize(train_data, cfg)
_lm_serialize(valid_data, cfg)
_lm_serialize(test_data, cfg)
else:
logger.info('serialize sentence into tokens...')
serializer = Serializer(do_chinese_split=cfg.chinese_split, do_lower_case=True)
serial = serializer.serialize
_serialize_sentence(train_data, serial, cfg)
_serialize_sentence(valid_data, serial, cfg)
_serialize_sentence(test_data, serial, cfg)
logger.info('build vocabulary...')
vocab = Vocab('word')
train_tokens = [d['tokens'] for d in train_data]
valid_tokens = [d['tokens'] for d in valid_data]
test_tokens = [d['tokens'] for d in test_data]
sent_tokens = [*train_tokens, *valid_tokens, *test_tokens]
for sent in sent_tokens:
vocab.add_words(sent)
vocab.trim(min_freq=cfg.min_freq)
logger.info('convert tokens into index...')
_convert_tokens_into_index(train_data, vocab)
_convert_tokens_into_index(valid_data, vocab)
_convert_tokens_into_index(test_data, vocab)
logger.info('build position sequence...')
_add_pos_seq(train_data, cfg)
_add_pos_seq(valid_data, cfg)
_add_pos_seq(test_data, cfg)
logger.info('save data for backup...')
os.makedirs(os.path.join(cfg.cwd, cfg.out_path), exist_ok=True)
train_save_fp = os.path.join(cfg.cwd, cfg.out_path, 'train.pkl')
valid_save_fp = os.path.join(cfg.cwd, cfg.out_path, 'valid.pkl')
test_save_fp = os.path.join(cfg.cwd, cfg.out_path, 'test.pkl')
save_pkl(train_data, train_save_fp)
save_pkl(valid_data, valid_save_fp)
save_pkl(test_data, test_save_fp)
if cfg.model_name != 'lm':
vocab_save_fp = os.path.join(cfg.cwd, cfg.out_path, 'vocab.pkl')
vocab_txt = os.path.join(cfg.cwd, cfg.out_path, 'vocab.txt')
save_pkl(vocab, vocab_save_fp)
logger.info('save vocab in txt file, for watching...')
with open(vocab_txt, 'w', encoding='utf-8') as f:
f.write(os.linesep.join(vocab.word2idx.keys()))
logger.info('===== end preprocess data =====')
if __name__ == '__main__':
pass
|
xero/basemanager.py | Ian2020/pyxero | 246 | 11195923 | from __future__ import unicode_literals
import json
import requests
import six
from datetime import datetime
from six.moves.urllib.parse import parse_qs
from xml.etree.ElementTree import Element, SubElement, tostring
from xml.parsers.expat import ExpatError
from .auth import OAuth2Credentials
from .exceptions import (
XeroBadRequest,
XeroExceptionUnknown,
XeroForbidden,
XeroInternalError,
XeroNotAvailable,
XeroNotFound,
XeroNotImplemented,
XeroRateLimitExceeded,
XeroTenantIdNotSet,
XeroUnauthorized,
)
from .utils import isplural, json_load_object_hook, singular
class BaseManager(object):
DECORATED_METHODS = (
"get",
"save",
"filter",
"all",
"put",
"delete",
"get_history",
"put_history",
"get_attachments",
"get_attachment_data",
"put_attachment_data",
)
OBJECT_DECORATED_METHODS = {
"Invoices": ["email", "online_invoice"],
}
DATETIME_FIELDS = (
"UpdatedDateUTC",
"Updated",
"FullyPaidOnDate",
"DateTimeUTC",
"CreatedDateUTC",
"JournalDate",
)
DATE_FIELDS = (
"DueDate",
"Date",
"PaymentDate",
"StartDate",
"EndDate",
"PeriodLockDate",
"DateOfBirth",
"OpeningBalanceDate",
"PaymentDueDate",
"ReportingDate",
"DeliveryDate",
"ExpectedArrivalDate",
)
BOOLEAN_FIELDS = (
"IsSupplier",
"IsCustomer",
"IsDemoCompany",
"PaysTax",
"IsAuthorisedToApproveTimesheets",
"IsAuthorisedToApproveLeave",
"HasHELPDebt",
"AustralianResidentForTaxPurposes",
"TaxFreeThresholdClaimed",
"HasSFSSDebt",
"EligibleToReceiveLeaveLoading",
"IsExemptFromTax",
"IsExemptFromSuper",
"SentToContact",
"IsSubscriber",
"HasAttachments",
"ShowOnCashBasisReports",
"IncludeInEmails",
"SentToContact",
"CanApplyToRevenue",
"CanApplyToLiabilities",
"CanApplyToExpenses",
"CanApplyToEquity",
"CanApplyToAssets",
"IsReconciled",
"EnablePaymentsToAccount",
"ShowInExpenseClaims",
"DiscountEnteredAsPercent",
"IsPurchased",
"IsSold",
"IsTrackedAsInventory",
)
DECIMAL_FIELDS = (
"Hours",
"NumberOfUnit",
)
INTEGER_FIELDS = (
"FinancialYearEndDay",
"FinancialYearEndMonth",
)
NO_SEND_FIELDS = (
"UpdatedDateUTC",
"HasValidationErrors",
"IsDiscounted",
"DateString",
"HasErrors",
"DueDateString",
"HasAccount",
)
OPERATOR_MAPPINGS = {
"gt": ">",
"lt": "<",
"lte": "<=",
"gte": ">=",
"ne": "!=",
}
def __init__(self):
pass
def dict_to_xml(self, root_elm, data):
for key in data.keys():
# Xero will complain if we send back these fields.
if key in self.NO_SEND_FIELDS:
continue
sub_data = data[key]
elm = SubElement(root_elm, key)
# Key references a dict. Unroll the dict
# as it's own XML node with subnodes
if isinstance(sub_data, dict):
self.dict_to_xml(elm, sub_data)
# Key references a list/tuple
elif isinstance(sub_data, list) or isinstance(sub_data, tuple):
# key name is a plural. This means each item
# in the list needs to be wrapped in an XML
# node that is a singular version of the list name.
if isplural(key):
for d in sub_data:
self.dict_to_xml(SubElement(elm, singular(key)), d)
# key name isn't a plural. Just insert the content
# as an XML node with subnodes
else:
for d in sub_data:
self.dict_to_xml(elm, d)
# Normal element - just insert the data.
else:
if key in self.BOOLEAN_FIELDS:
val = "true" if sub_data else "false"
elif key in self.DATE_FIELDS:
val = sub_data.strftime("%Y-%m-%dT%H:%M:%S")
else:
val = six.text_type(sub_data)
elm.text = val
return root_elm
def _prepare_data_for_save(self, data):
if isinstance(data, list) or isinstance(data, tuple):
root_elm = Element(self.name)
for d in data:
sub_elm = SubElement(root_elm, self.singular)
self.dict_to_xml(sub_elm, d)
else:
root_elm = self.dict_to_xml(Element(self.singular), data)
# In python3 this seems to return a bytestring
return six.u(tostring(root_elm))
def _parse_api_response(self, response, resource_name):
data = json.loads(response.text, object_hook=json_load_object_hook)
assert data["Status"] == "OK", (
"Expected the API to say OK but received %s" % data["Status"]
)
try:
return data[resource_name]
except KeyError:
return data
def _get_data(self, func):
""" This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
"""
def wrapper(*args, **kwargs):
timeout = kwargs.pop("timeout", None)
uri, params, method, body, headers, singleobject = func(*args, **kwargs)
if headers is None:
headers = {}
headers["Content-Type"] = "application/xml"
if isinstance(self.credentials, OAuth2Credentials):
if self.credentials.tenant_id:
headers["Xero-tenant-id"] = self.credentials.tenant_id
else:
raise XeroTenantIdNotSet
# Use the JSON API by default, but remember we might request a PDF (application/pdf)
# so don't force the Accept header.
if "Accept" not in headers:
headers["Accept"] = "application/json"
# Set a user-agent so Xero knows the traffic is coming from pyxero
# or individual user/partner
headers["User-Agent"] = self.user_agent
response = getattr(requests, method)(
uri,
data=body,
headers=headers,
auth=self.credentials.oauth,
params=params,
timeout=timeout,
)
if response.status_code == 200:
# If we haven't got XML or JSON, assume we're being returned a
# binary file
if not response.headers["content-type"].startswith("application/json"):
return response.content
return self._parse_api_response(response, self.name)
elif response.status_code == 204:
return response.content
elif response.status_code == 400:
try:
raise XeroBadRequest(response)
except (ValueError, ExpatError):
raise XeroExceptionUnknown(
response, msg="Unable to parse Xero API response"
)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 429:
limit_reason = response.headers.get("X-Rate-Limit-Problem") or "unknown"
payload = {"oauth_problem": ["rate limit exceeded: " + limit_reason],
"oauth_problem_advice": ["please wait before retrying the xero api",
"The limit exceeded is: " + limit_reason]}
raise XeroRateLimitExceeded(response, payload)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
return wrapper
def _get(self, id, headers=None, params=None):
uri = "/".join([self.base_url, self.name, id])
uri_params = self.extra_params.copy()
uri_params.update(params if params else {})
return uri, uri_params, "get", None, headers, True
def _get_history(self, id):
uri = "/".join([self.base_url, self.name, id, "history"]) + "/"
return uri, {}, "get", None, None, False
def _get_attachments(self, id):
"""Retrieve a list of attachments associated with this Xero object."""
uri = "/".join([self.base_url, self.name, id, "Attachments"]) + "/"
return uri, {}, "get", None, None, False
def _get_attachment_data(self, id, filename):
"""
Retrieve the contents of a specific attachment (identified by filename).
"""
uri = "/".join([self.base_url, self.name, id, "Attachments", filename])
return uri, {}, "get", None, None, False
def get_attachment(self, id, filename, file):
"""
Retrieve the contents of a specific attachment (identified by filename).
Writes data to file object, returns length of data written.
"""
data = self.get_attachment_data(id, filename)
file.write(data)
return len(data)
def _email(self, id):
uri = "/".join([self.base_url, self.name, id, "Email"])
return uri, {}, "post", None, None, True
def _online_invoice(self, id):
uri = "/".join([self.base_url, self.name, id, "OnlineInvoice"])
return uri, {}, "get", None, None, True
def save_or_put(self, data, method="post", headers=None, summarize_errors=True):
uri = "/".join([self.base_url, self.name])
body = self._prepare_data_for_save(data)
params = self.extra_params.copy()
if not summarize_errors:
params["summarizeErrors"] = "false"
return uri, params, method, body, headers, False
def _save(self, data):
return self.save_or_put(data, method="post")
def _put(self, data, summarize_errors=True):
return self.save_or_put(data, method="put", summarize_errors=summarize_errors)
def _delete(self, id):
uri = "/".join([self.base_url, self.name, id])
return uri, {}, "delete", None, None, False
def _put_history_data(self, id, details):
"""Add a history note to the Xero object."""
uri = "/".join([self.base_url, self.name, id, "history"])
details_data = {"Details": details}
root_elm = Element("HistoryRecord")
self.dict_to_xml(root_elm, details_data)
data = six.u(tostring(root_elm))
return uri, {}, "put", data, None, False
def _put_history(self, id, details):
"""Upload a history note to the Xero object."""
return self._put_history_data(id, details)
def _put_attachment_data(
self, id, filename, data, content_type, include_online=False
):
"""Upload an attachment to the Xero object."""
uri = "/".join([self.base_url, self.name, id, "Attachments", filename])
params = {"IncludeOnline": "true"} if include_online else {}
headers = {"Content-Type": content_type, "Content-Length": str(len(data))}
return uri, params, "put", data, headers, False
def put_attachment(self, id, filename, file, content_type, include_online=False):
"""Upload an attachment to the Xero object (from file object)."""
return self.put_attachment_data(
id, filename, file.read(), content_type, include_online=include_online
)
def prepare_filtering_date(self, val):
if isinstance(val, datetime):
val = val.strftime("%a, %d %b %Y %H:%M:%S GMT")
else:
val = '"%s"' % val
return {"If-Modified-Since": val}
def _filter(self, **kwargs):
params = self.extra_params.copy()
headers = None
uri = "/".join([self.base_url, self.name])
if kwargs:
if "since" in kwargs:
val = kwargs["since"]
headers = self.prepare_filtering_date(val)
del kwargs["since"]
# Accept IDs parameter for Invoices and Contacts endpoints
if "IDs" in kwargs:
params["IDs"] = ",".join(kwargs["IDs"])
del kwargs["IDs"]
def get_filter_params(key, value):
last_key = key.split("_")[-1]
if last_key.endswith("ID"):
return 'Guid("%s")' % six.text_type(value)
if key in self.BOOLEAN_FIELDS:
return "true" if value else "false"
elif key in self.DATE_FIELDS:
return "DateTime(%s,%s,%s)" % (value.year, value.month, value.day)
elif key in self.DATETIME_FIELDS:
return value.isoformat()
else:
return '"%s"' % six.text_type(value)
def generate_param(key, value):
parts = key.split("__")
field = key.replace("_", ".")
fmt = "%s==%s"
if len(parts) == 2:
# support filters:
# Name__Contains=John becomes Name.Contains("John")
if parts[1] in ["contains", "startswith", "endswith"]:
field = parts[0]
fmt = "".join(["%s.", parts[1], "(%s)"])
elif parts[1] in ["tolower", "toupper"]:
field = parts[0]
fmt = "".join(["%s.", parts[1], "()==%s"])
elif parts[1] in self.OPERATOR_MAPPINGS:
field = parts[0]
key = field
fmt = "%s" + self.OPERATOR_MAPPINGS[parts[1]] + "%s"
elif parts[1] in ["isnull"]:
sign = "=" if value else "!"
return "%s%s=null" % (parts[0], sign)
field = field.replace("_", ".")
return fmt % (field, get_filter_params(key, value))
# Move any known parameter names to the query string
KNOWN_PARAMETERS = ["order", "offset", "page", "includeArchived"]
for param in KNOWN_PARAMETERS:
if param in kwargs:
params[param] = kwargs.pop(param)
filter_params = []
if "raw" in kwargs:
raw = kwargs.pop("raw")
filter_params.append(raw)
# Treat any remaining arguments as filter predicates
# Xero will break if you search without a check for null in the first position:
# http://developer.xero.com/documentation/getting-started/http-requests-and-responses/#title3
sortedkwargs = sorted(
six.iteritems(kwargs), key=lambda item: -1 if "isnull" in item[0] else 0
)
for key, value in sortedkwargs:
filter_params.append(generate_param(key, value))
if filter_params:
params["where"] = "&&".join(filter_params)
return uri, params, "get", None, headers, False
def _all(self):
uri = "/".join([self.base_url, self.name])
return uri, {}, "get", None, None, False
|
exercises/zh/exc_02_05_03.py | Jette16/spacy-course | 2,085 | 11195934 | <reponame>Jette16/spacy-course
from spacy.lang.en import English
nlp = English()
# 导入Doc类
from ____ import ____
# 目标文本:"Oh, really?!"
words = [____, ____, ____, ____, ____]
spaces = [____, ____, ____, ____, ____]
# 用words和spaces创建一个Doc
doc = ____(____, ____=____, ____=____)
print(doc.text)
|
Chapter07/face_detection.py | lebull/Neural-Network-Projects-with-Python | 269 | 11195936 | import cv2
import os
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def detect_faces(img, draw_box=True):
# convert image to grayscale
grayscale_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# detect faces
faces = face_cascade.detectMultiScale(grayscale_img, scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
face_box, face_coords = None, []
for (x, y, w, h) in faces:
if draw_box:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 5)
face_box = img[y:y+h, x:x+w]
face_coords = [x,y,w,h]
return img, face_box, face_coords
if __name__ == "__main__":
files = os.listdir('sample_faces')
images = [file for file in files if 'jpg' in file]
for image in images:
img = cv2.imread('sample_faces/' + image)
detected_faces, _, _ = detect_faces(img)
cv2.imwrite('sample_faces/detected_faces/' + image, detected_faces)
|
bookwyrm/connectors/connector_manager.py | mouse-reeve/fedireads | 270 | 11195937 | """ interface with whatever connectors the app has """
import asyncio
import importlib
import ipaddress
import logging
from urllib.parse import urlparse
import aiohttp
from django.dispatch import receiver
from django.db.models import signals
from requests import HTTPError
from bookwyrm import book_search, models
from bookwyrm.settings import SEARCH_TIMEOUT, USER_AGENT
from bookwyrm.tasks import app
logger = logging.getLogger(__name__)
class ConnectorException(HTTPError):
"""when the connector can't do what was asked"""
async def get_results(session, url, min_confidence, query, connector):
"""try this specific connector"""
# pylint: disable=line-too-long
headers = {
"Accept": (
'application/json, application/activity+json, application/ld+json; profile="https://www.w3.org/ns/activitystreams"; charset=utf-8'
),
"User-Agent": USER_AGENT,
}
params = {"min_confidence": min_confidence}
try:
async with session.get(url, headers=headers, params=params) as response:
if not response.ok:
logger.info("Unable to connect to %s: %s", url, response.reason)
return
try:
raw_data = await response.json()
except aiohttp.client_exceptions.ContentTypeError as err:
logger.exception(err)
return
return {
"connector": connector,
"results": connector.process_search_response(
query, raw_data, min_confidence
),
}
except asyncio.TimeoutError:
logger.info("Connection timed out for url: %s", url)
except aiohttp.ClientError as err:
logger.exception(err)
async def async_connector_search(query, items, min_confidence):
"""Try a number of requests simultaneously"""
timeout = aiohttp.ClientTimeout(total=SEARCH_TIMEOUT)
async with aiohttp.ClientSession(timeout=timeout) as session:
tasks = []
for url, connector in items:
tasks.append(
asyncio.ensure_future(
get_results(session, url, min_confidence, query, connector)
)
)
results = await asyncio.gather(*tasks)
return results
def search(query, min_confidence=0.1, return_first=False):
"""find books based on arbitary keywords"""
if not query:
return []
results = []
items = []
for connector in get_connectors():
# get the search url from the connector before sending
url = connector.get_search_url(query)
try:
raise_not_valid_url(url)
except ConnectorException:
# if this URL is invalid we should skip it and move on
logger.info("Request denied to blocked domain: %s", url)
continue
items.append((url, connector))
# load as many results as we can
results = asyncio.run(async_connector_search(query, items, min_confidence))
results = [r for r in results if r]
if return_first:
# find the best result from all the responses and return that
all_results = [r for con in results for r in con["results"]]
all_results = sorted(all_results, key=lambda r: r.confidence, reverse=True)
return all_results[0] if all_results else None
# failed requests will return None, so filter those out
return results
def first_search_result(query, min_confidence=0.1):
"""search until you find a result that fits"""
# try local search first
result = book_search.search(query, min_confidence=min_confidence, return_first=True)
if result:
return result
# otherwise, try remote endpoints
return search(query, min_confidence=min_confidence, return_first=True) or None
def get_connectors():
"""load all connectors"""
for info in models.Connector.objects.filter(active=True).order_by("priority").all():
yield load_connector(info)
def get_or_create_connector(remote_id):
"""get the connector related to the object's server"""
url = urlparse(remote_id)
identifier = url.netloc
if not identifier:
raise ValueError("Invalid remote id")
try:
connector_info = models.Connector.objects.get(identifier=identifier)
except models.Connector.DoesNotExist:
connector_info = models.Connector.objects.create(
identifier=identifier,
connector_file="bookwyrm_connector",
base_url=f"https://{identifier}",
books_url=f"https://{identifier}/book",
covers_url=f"https://{identifier}/images/covers",
search_url=f"https://{identifier}/search?q=",
priority=2,
)
return load_connector(connector_info)
@app.task(queue="low_priority")
def load_more_data(connector_id, book_id):
"""background the work of getting all 10,000 editions of LoTR"""
connector_info = models.Connector.objects.get(id=connector_id)
connector = load_connector(connector_info)
book = models.Book.objects.select_subclasses().get(id=book_id)
connector.expand_book_data(book)
def load_connector(connector_info):
"""instantiate the connector class"""
connector = importlib.import_module(
f"bookwyrm.connectors.{connector_info.connector_file}"
)
return connector.Connector(connector_info.identifier)
@receiver(signals.post_save, sender="bookwyrm.FederatedServer")
# pylint: disable=unused-argument
def create_connector(sender, instance, created, *args, **kwargs):
"""create a connector to an external bookwyrm server"""
if instance.application_type == "bookwyrm":
get_or_create_connector(f"https://{instance.server_name}")
def raise_not_valid_url(url):
"""do some basic reality checks on the url"""
parsed = urlparse(url)
if not parsed.scheme in ["http", "https"]:
raise ConnectorException("Invalid scheme: ", url)
try:
ipaddress.ip_address(parsed.netloc)
raise ConnectorException("Provided url is an IP address: ", url)
except ValueError:
# it's not an IP address, which is good
pass
if models.FederatedServer.is_blocked(url):
raise ConnectorException(f"Attempting to load data from blocked url: {url}")
|
Trakttv.bundle/Contents/Libraries/Shared/trakt/core/errors.py | disrupted/Trakttv.bundle | 1,346 | 11195940 | from six.moves.urllib.parse import urlparse
ERRORS = {
400: ("Bad Request", "Request couldn't be parsed"),
401: ("Unauthorized", "OAuth must be provided"),
403: ("Forbidden", "Invalid API key or unapproved app"),
404: ("Not Found", "Method exists, but no record found"),
405: ("Method Not Found", "Method doesn't exist"),
409: ("Conflict", "Resource already created"),
412: ("Precondition Failed", "Use application/json content type"),
422: ("Unprocessible Entity", "Validation error"),
429: ("Rate Limit Exceeded", "Rate limit exceeded"),
500: ("Server Error", "Server error"),
502: ("Bad Gateway", "Server unavailable"),
503: ("Service Unavailable", "Server overloaded (try again in 30s)"),
504: ("Service Unavailable", "Server overloaded (try again in 30s)"),
520: ("Service Unavailable", "CloudFlare: Web server is returning an unknown error"),
521: ("Service Unavailable", "CloudFlare: Web server is down"),
522: ("Service Unavailable", "CloudFlare: Connection timed out"),
524: ("Service Unavailable", "CloudFlare: A timeout occurred")
}
def log_request_error(logger, response):
request = response.request
# Lookup status code in trakt error definitions
name, desc = ERRORS.get(response.status_code, ("Unknown", "Unknown"))
# Build message
if request:
method = request.method
path = urlparse(request.url).path
message = 'Request failed: "%s %s" - %s: "%%s" (%%s)' % (method, path, response.status_code)
else:
message = 'Request failed: %s: "%%s" (%%s)' % (response.status_code,)
# Log warning
logger.warn(message, desc, name, extra={
'data': {
'http.headers': {
'cf-ray': response.headers.get('cf-ray'),
'X-Request-Id': response.headers.get('X-Request-Id'),
'X-Runtime': response.headers.get('X-Runtime')
}
}
})
|
platformio/commands/remote/projectsync.py | Maniekkk/platformio-core | 4,744 | 11195941 | # Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tarfile
from binascii import crc32
from os.path import getmtime, getsize, isdir, isfile, join
from twisted.python import constants # pylint: disable=import-error
from platformio.compat import hashlib_encode_data
class PROJECT_SYNC_STAGE(constants.Flags):
INIT = constants.FlagConstant()
DBINDEX = constants.FlagConstant()
DELETE = constants.FlagConstant()
UPLOAD = constants.FlagConstant()
EXTRACTED = constants.FlagConstant()
COMPLETED = constants.FlagConstant()
class ProjectSync(object):
def __init__(self, path):
self.path = path
if not isdir(self.path):
os.makedirs(self.path)
self.items = []
self._db = {}
def add_item(self, path, relpath, cb_filter=None):
self.items.append((path, relpath, cb_filter))
def get_items(self):
return self.items
def rebuild_dbindex(self):
self._db = {}
for (path, relpath, cb_filter) in self.items:
if cb_filter and not cb_filter(path):
continue
self._insert_to_db(path, relpath)
if not isdir(path):
continue
for (root, _, files) in os.walk(path, followlinks=True):
for name in files:
self._insert_to_db(
join(root, name), join(relpath, root[len(path) + 1 :], name)
)
def _insert_to_db(self, path, relpath):
if not isfile(path):
return
index_hash = "%s-%s-%s" % (relpath, getmtime(path), getsize(path))
index = crc32(hashlib_encode_data(index_hash))
self._db[index] = (path, relpath)
def get_dbindex(self):
return list(self._db.keys())
def delete_dbindex(self, dbindex):
for index in dbindex:
if index not in self._db:
continue
path = self._db[index][0]
if isfile(path):
os.remove(path)
del self._db[index]
self.delete_empty_folders()
return True
def delete_empty_folders(self):
deleted = False
for item in self.items:
if not isdir(item[0]):
continue
for root, dirs, files in os.walk(item[0]):
if not dirs and not files and root != item[0]:
deleted = True
os.rmdir(root)
if deleted:
return self.delete_empty_folders()
return True
def compress_items(self, fileobj, dbindex, max_size):
compressed = []
total_size = 0
tar_opts = dict(fileobj=fileobj, mode="w:gz", bufsize=0, dereference=True)
with tarfile.open(**tar_opts) as tgz:
for index in dbindex:
compressed.append(index)
if index not in self._db:
continue
path, relpath = self._db[index]
tgz.add(path, relpath)
total_size += getsize(path)
if total_size > max_size:
break
return compressed
def decompress_items(self, fileobj):
fileobj.seek(0)
with tarfile.open(fileobj=fileobj, mode="r:gz") as tgz:
tgz.extractall(self.path)
return True
|
Face Reconstruction/Fast Few-shot Face alignment by Reconstruction/datasets/w300.py | swapnilgarg7/Face-X | 175 | 11195944 | import os
import numpy as np
import torch.utils.data as td
import pandas as pd
import config
from csl_common.utils.nn import Batch
from csl_common.utils import geometry
from datasets import facedataset
def read_300W_detection(lmFilepath):
lms = []
with open(lmFilepath) as f:
for line in f:
try:
x,y = [float(e) for e in line.split()]
lms.append((x, y))
except:
pass
assert(len(lms) == 68)
landmarks = np.vstack(lms)
return landmarks
class W300(facedataset.FaceDataset):
CROP_SOURCES = ['bb_detector', 'bb_ground_truth', 'lm_ground_truth']
NUM_LANDMARKS = 68
ALL_LANDMARKS = list(range(NUM_LANDMARKS))
LANDMARKS_NO_OUTLINE = list(range(17,68))
LANDMARKS_ONLY_OUTLINE = list(range(17))
def __init__(self, root, cache_root=None, train=True, test_split='full',
crop_source='bb_detector', return_landmark_heatmaps=False,
return_modified_images=False, **kwargs):
test_split = test_split.lower()
if not train:
assert(test_split in ['train', 'common', 'challenging', '300w', 'full'])
assert(crop_source in W300.CROP_SOURCES)
self.bounding_box_dir = os.path.join(root, 'Bounding Boxes')
super().__init__(root=root,
cache_root=cache_root,
fullsize_img_dir=os.path.join(root, 'images'),
train=train,
test_split=test_split,
crop_source=crop_source,
return_landmark_heatmaps=return_landmark_heatmaps,
return_modified_images=return_modified_images,
**kwargs)
if self.crop_type == 'fullsize':
self.transform = lambda x:x
def _load_annotations(self, split):
import scipy.io
import glob
split_defs = {
'train': [
('train/afw', 'afw'),
('train/helen', 'helen_trainset'),
('train/lfpw', 'lfpw_trainset')
],
'common': [
('test/common/helen', 'helen_testset'),
('test/common/lfpw', 'lfpw_testset')
],
'challenging': [
('test/challenging/ibug', 'ibug')
],
'full': [
('test/common/helen', 'helen_testset'),
('test/common/lfpw', 'lfpw_testset'),
('test/challenging/ibug', 'ibug')
],
'300w': [
('test/300W/01_Indoor', None),
('test/300W/01_Outdoor', None)
]
}
ann = []
bboxes = []
for id, subset in enumerate(split_defs[split]):
im_dir, bbox_file_suffix = subset
# get image file paths and read GT landmarks
ext = "*.jpg"
if 'lfpw' in im_dir or '300W' in im_dir:
ext = "*.png"
for img_file in sorted(glob.glob(os.path.join(self.fullsize_img_dir, im_dir, ext))):
path_abs_noext = os.path.splitext(img_file)[0]
filename_noext = os.path.split(path_abs_noext)[1]
filename = os.path.split(img_file)[1]
path_rel = os.path.join(im_dir, filename)
# load landmarks from *.pts files
landmarks = read_300W_detection(path_abs_noext+'.pts')
ann.append({'imgName': str(filename), 'fname': path_rel, 'landmarks': landmarks})
# load supplied detected bounding boxes from MAT file
if bbox_file_suffix is not None:
mat_file = os.path.join(self.bounding_box_dir, 'bounding_boxes_{}.mat'.format(bbox_file_suffix))
subset_bboxes = scipy.io.loadmat(mat_file)
for item in subset_bboxes['bounding_boxes'][0]:
imgName, bb_detector, bb_ground_truth = item[0][0]
bboxes.append({'imgName': str(imgName[0]),
'bb_detector': bb_detector[0],
'bb_ground_truth': bb_ground_truth[0]})
annotations = pd.DataFrame(ann)
if len(bboxes) > 0:
df_bboxes = pd.DataFrame(bboxes)
annotations = annotations.merge(df_bboxes, on='imgName', how='left')
return annotations
@property
def labels(self):
return None
def __len__(self):
return len(self.annotations)
def __getitem__(self, idx):
sample = self.annotations.iloc[idx]
bb = sample.bb_detector if self.crop_source == 'bb_detector' else sample.bb_ground_truth
bb = geometry.extend_bbox(bb, dt=0.2, db=0.12)
landmarks = sample.landmarks.astype(np.float32)
landmarks_for_crop = None
if self.crop_source == 'lm_ground_truth':
landmarks_for_crop = landmarks
return self.get_sample(sample.fname, bb, landmarks_for_crop, landmarks_to_return=landmarks)
config.register_dataset(W300)
if __name__ == '__main__':
from csl_common.vis import vis
import torch
import config
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
dirs = config.get_dataset_paths('w300')
ds = W300(root=dirs[0], cache_root=dirs[1], train=False, deterministic=True, use_cache=False, image_size=256,
test_split='challenging', daug=0, align_face_orientation=True, crop_source='lm_ground_truth')
dl = td.DataLoader(ds, batch_size=10, shuffle=False, num_workers=0)
for data in dl:
batch = Batch(data, gpu=False)
inputs = batch.images.clone()
imgs = vis.to_disp_images(inputs, denorm=True)
imgs = vis.add_landmarks_to_images(imgs, batch.landmarks, radius=3, color=(0,255,0))
# imgs = vis.add_landmarks_to_images(imgs, data['landmarks_of'].numpy(), color=(1,0,0))
vis.vis_square(imgs, nCols=5, fx=1, fy=1, normalize=False) |
Bl/Search.py | eugene2candy/moviecatcher | 893 | 11195951 | <filename>Bl/Search.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import threading
import webbrowser
from Da import ResZmz
from Da import ResVhao
from View import ResultView
import urllib.request, urllib.parse, urllib.error
class Search :
def __init__ (self, master) :
self.master = master
self.ResWindow = ResultView.GUI(self.master)
def showResult (self, key) :
searchKey = key.get()
self.ResWindow.showList(searchKey)
self.ResWindow.listRst = ''
data = ''
self.ResWindow.getDetail = lambda data = data : self.__searchMovDetails(data)
threading.Thread(target = lambda key = searchKey : self.__searchMov(key)).start()
self.ResWindow.updateList()
def __searchMov (self, key) :
# 开启两重匹配找寻
self.mainSearcher = ResZmz.Searcher()
self.subSearcher = ResVhao.Searcher()
mainResult = self.mainSearcher.find(key)
subResult = self.subSearcher.find(key)
mainResult.append({'title':'\n--------以下为低质量资源:--------\n','url':''})
mainResult.extend(subResult)
self.ResWindow.listRst = mainResult
def __searchMovDetails (self, data):
self.ResWindow.resRst = ''
self.ResWindow.showRes()
# 开启多线程
threading.Thread(target = lambda data = data : self.__getDetails(data)).start()
self.ResWindow.updateRes()
def __getDetails (self, data) :
if data['url'] != '' :
if data['source'] == 'zmz' :
result = self.mainSearcher.getLink(data['url'])
else :
result = self.subSearcher.getLink(data['url'])
self.ResWindow.resRst = result |
release/stubs.min/System/IO/__init___parts/FileAttributes.py | htlcnn/ironpython-stubs | 182 | 11195971 | class FileAttributes(Enum,IComparable,IFormattable,IConvertible):
"""
Provides attributes for files and directories.
enum (flags) FileAttributes,values: Archive (32),Compressed (2048),Device (64),Directory (16),Encrypted (16384),Hidden (2),IntegrityStream (32768),Normal (128),NoScrubData (131072),NotContentIndexed (8192),Offline (4096),ReadOnly (1),ReparsePoint (1024),SparseFile (512),System (4),Temporary (256)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Archive=None
Compressed=None
Device=None
Directory=None
Encrypted=None
Hidden=None
IntegrityStream=None
Normal=None
NoScrubData=None
NotContentIndexed=None
Offline=None
ReadOnly=None
ReparsePoint=None
SparseFile=None
System=None
Temporary=None
value__=None
|
tests/test_providers_base.py | xkortex/ulid | 303 | 11195999 | <gh_stars>100-1000
"""
test_providers_base
~~~~~~~~~~~~~~~~~~~
Tests for the :mod:`~ulid.providers.base` module.
"""
import inspect
from ulid.providers import base
def test_provider_is_abstract():
"""
Assert that :class:`~ulid.providers.base.Provider` is an abstract class.
"""
assert inspect.isabstract(base.Provider)
|
runtime/python/Lib/ctypes/test/test_bytes.py | hwaipy/InteractionFreeNode | 207 | 11196005 | """Test where byte objects are accepted"""
import unittest
import sys
from ctypes import *
class BytesTest(unittest.TestCase):
def test_c_char(self):
x = c_char(b"x")
self.assertRaises(TypeError, c_char, "x")
x.value = b"y"
with self.assertRaises(TypeError):
x.value = "y"
c_char.from_param(b"x")
self.assertRaises(TypeError, c_char.from_param, "x")
self.assertIn('xbd', repr(c_char.from_param(b"\xbd")))
(c_char * 3)(b"a", b"b", b"c")
self.assertRaises(TypeError, c_char * 3, "a", "b", "c")
def test_c_wchar(self):
x = c_wchar("x")
self.assertRaises(TypeError, c_wchar, b"x")
x.value = "y"
with self.assertRaises(TypeError):
x.value = b"y"
c_wchar.from_param("x")
self.assertRaises(TypeError, c_wchar.from_param, b"x")
(c_wchar * 3)("a", "b", "c")
self.assertRaises(TypeError, c_wchar * 3, b"a", b"b", b"c")
def test_c_char_p(self):
c_char_p(b"foo bar")
self.assertRaises(TypeError, c_char_p, "foo bar")
def test_c_wchar_p(self):
c_wchar_p("foo bar")
self.assertRaises(TypeError, c_wchar_p, b"foo bar")
def test_struct(self):
class X(Structure):
_fields_ = [("a", c_char * 3)]
x = X(b"abc")
self.assertRaises(TypeError, X, "abc")
self.assertEqual(x.a, b"abc")
self.assertEqual(type(x.a), bytes)
def test_struct_W(self):
class X(Structure):
_fields_ = [("a", c_wchar * 3)]
x = X("abc")
self.assertRaises(TypeError, X, b"abc")
self.assertEqual(x.a, "abc")
self.assertEqual(type(x.a), str)
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
def test_BSTR(self):
from _ctypes import _SimpleCData
class BSTR(_SimpleCData):
_type_ = "X"
BSTR("abc")
if __name__ == '__main__':
unittest.main()
|
tests/tools.py | proteanblank/building_tool | 559 | 11196015 | import os
import sys
import traceback
import types
class LoadModule:
"""Adapted from Script Watcher Addon
https://github.com/wisaac407/blender-script-watcher
"""
def __init__(self, filepath):
self.filepath = filepath
self.remove_cached_mods()
try:
f = open(filepath)
paths, files = self.get_paths()
# Get the module name and the root module path.
mod_name, mod_root = self.get_mod_name()
# Create the module and setup the basic properties.
mod = types.ModuleType('__main__')
mod.__file__ = filepath
mod.__path__ = paths
mod.__package__ = mod_name
# Add the module to the system module cache.
sys.modules[mod_name] = mod
# Fianally, execute the module.
exec(compile(f.read(), filepath, 'exec'), mod.__dict__)
except IOError:
print('Could not open script file.')
except Exception:
sys.stderr.write("There was an error when running the script:\n" + traceback.format_exc())
else:
f.close()
def get_paths(self):
"""Find all the python paths surrounding the given filepath."""
dirname = os.path.dirname(self.filepath)
paths = []
filepaths = []
for root, dirs, files in os.walk(dirname, topdown=True):
if '__init__.py' in files:
paths.append(root)
for f in files:
filepaths.append(os.path.join(root, f))
else:
dirs[:] = [] # No __init__ so we stop walking this dir.
# If we just have one (non __init__) file then return just that file.
return paths, filepaths or [self.filepath]
def get_mod_name(self):
"""Return the module name and the root path of the given python file path."""
dir, mod = os.path.split(self.filepath)
# Module is a package.
if mod == '__init__.py':
mod = os.path.basename(dir)
dir = os.path.dirname(dir)
# Module is a single file.
else:
mod = os.path.splitext(mod)[0]
return mod, dir
def remove_cached_mods(self):
"""Remove all the script modules from the system cache."""
paths, files = self.get_paths()
for mod_name, mod in list(sys.modules.items()):
try:
if hasattr(mod, '__file__') and os.path.dirname(mod.__file__) in paths:
del sys.modules[mod_name]
except TypeError:
pass
|
workers/baseurl/beta/xhr-worker.py | meyerweb/wpt | 14,668 | 11196023 | <reponame>meyerweb/wpt
def main(request, response):
return (302, b"Moved"), [(b"Location", b"../gamma/xhr-worker.js")], u"postMessage('executed redirecting script');"
|
labs/02_backprop/solutions/neural_net.py | soufiomario/labs-Deep-learning | 1,398 | 11196031 | <reponame>soufiomario/labs-Deep-learning
class NeuralNet():
"""MLP with 1 hidden layer with a sigmoid activation"""
def __init__(self, input_size, hidden_size, output_size):
self.W_h = np.random.uniform(
size=(input_size, hidden_size), high=0.01, low=-0.01)
self.b_h = np.zeros(hidden_size)
self.W_o = np.random.uniform(
size=(hidden_size, output_size), high=0.01, low=-0.01)
self.b_o = np.zeros(output_size)
self.output_size = output_size
def forward(self, X, keep_activations=False):
z_h = np.dot(X, self.W_h) + self.b_h
h = sigmoid(z_h)
z_o = np.dot(h, self.W_o) + self.b_o
y = softmax(z_o)
if keep_activations:
return y, h, z_h
else:
return y
def loss(self, X, y):
return nll(one_hot(self.output_size, y), self.forward(X))
def grad_loss(self, x, y_true):
y, h, z_h = self.forward(x, keep_activations=True)
grad_z_o = y - one_hot(self.output_size, y_true)
grad_W_o = np.outer(h, grad_z_o)
grad_b_o = grad_z_o
grad_h = np.dot(grad_z_o, np.transpose(self.W_o))
grad_z_h = grad_h * dsigmoid(z_h)
grad_W_h = np.outer(x, grad_z_h)
grad_b_h = grad_z_h
grads = {"W_h": grad_W_h, "b_h": grad_b_h,
"W_o": grad_W_o, "b_o": grad_b_o}
return grads
def train(self, x, y, learning_rate):
# Traditional SGD update on one sample at a time
grads = self.grad_loss(x, y)
self.W_h = self.W_h - learning_rate * grads["W_h"]
self.b_h = self.b_h - learning_rate * grads["b_h"]
self.W_o = self.W_o - learning_rate * grads["W_o"]
self.b_o = self.b_o - learning_rate * grads["b_o"]
def predict(self, X):
if len(X.shape) == 1:
return np.argmax(self.forward(X))
else:
return np.argmax(self.forward(X), axis=1)
def accuracy(self, X, y):
y_preds = np.argmax(self.forward(X), axis=1)
return np.mean(y_preds == y)
|
public-engines/image-classification-engine/marvin_image_classification_engine/data_handler/acquisitor_and_cleaner.py | tallandroid/incubator-marvin | 101 | 11196049 | <filename>public-engines/image-classification-engine/marvin_image_classification_engine/data_handler/acquisitor_and_cleaner.py<gh_stars>100-1000
#!/usr/bin/env python
# coding=utf-8
"""AcquisitorAndCleaner engine action.
Use this module to add the project main code.
"""
import os
import random
from random import shuffle
from .._compatibility import six
from .._logging import get_logger
from marvin_python_toolbox.engine_base import EngineBaseDataHandler
from marvin_python_toolbox.common.data import MarvinData
__all__ = ['AcquisitorAndCleaner']
logger = get_logger('acquisitor_and_cleaner')
random.seed(123)
class AcquisitorAndCleaner(EngineBaseDataHandler):
def __init__(self, **kwargs):
super(AcquisitorAndCleaner, self).__init__(**kwargs)
def read_samples(self, filename):
with open(filename, 'r') as fp:
samples = [line.strip().split() for line in fp.readlines()]
shuffle(samples)
return samples
def execute(self, params, **kwargs):
data = os.path.join(MarvinData.data_path, os.path.basename(params['DATA']))
if not os.path.exists(data):
print("Downloading...")
data = MarvinData.download_file(url=params["DATA"])
print("Extracting...")
os.system('tar xvf {} --directory {}'.format(data, MarvinData.data_path))
print("Done.")
train = self.read_samples(os.path.join(MarvinData.data_path, params['TRAIN']))
val = self.read_samples(os.path.join(MarvinData.data_path, params['VALID']))
self.marvin_initial_dataset = ((train, val))
|
setup.py | oscargus/numba-scipy | 161 | 11196063 | from setuptools import setup, find_packages
import versioneer
_install_requires = ['scipy>=0.16,<=1.7.1', 'numba>=0.45']
metadata = dict(
name='numba-scipy',
description="numba-scipy extends Numba to make it aware of SciPy",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Compilers",
],
package_data={},
scripts=[],
author="Anaconda, Inc.",
author_email="<EMAIL>",
url="https://github.com/numba/numba-scipy",
download_url="https://github.com/numba/numba-scipy",
packages=find_packages(),
setup_requires=[],
install_requires=_install_requires,
entry_points={
"numba_extensions": [
"init = numba_scipy:_init_extension",
],
},
license="BSD",
zip_safe=False,
)
with open('README.rst') as f:
metadata['long_description'] = f.read()
setup(**metadata)
|
Lib/lib2to3/fixes/fix_itertools.py | arvindm95/unladen-swallow | 2,293 | 11196068 | """ Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
it_funcs = "('imap'|'ifilter'|'izip'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if 'it' in results and func.value != 'ifilterfalse':
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.get_prefix()
it.remove()
# Replace the node wich contains ('.', 'function') with the
# function (to be consistant with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.get_prefix()
func.replace(Name(func.value[1:], prefix=prefix))
|
plugins/modules/panos_virtual_router_facts.py | EverOps/pan-os-ansible | 130 | 11196095 | <gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: panos_virtual_router_facts
short_description: Retrieves virtual router information
description:
- Retrieves information on virtual routers from a firewall or Panorama.
author: "<NAME> (@shinmog)"
version_added: '1.0.0'
requirements:
- pan-python
- pandevice
notes:
- Panorama is supported.
- Check mode is not supported.
extends_documentation_fragment:
- paloaltonetworks.panos.fragments.transitional_provider
- paloaltonetworks.panos.fragments.full_template_support
options:
name:
description:
- Name of the virtual router.
type: str
"""
EXAMPLES = """
# Get information on a specific virtual router
- name: Get vr3 info
panos_virtual_router_facts:
provider: '{{ provider }}'
name: 'vr3'
register: ans
# Get the config of all virtual routers
- name: Get all virtual routers
panos_virtual_router_facts:
provider: '{{ provider }}'
register: vrlist
"""
RETURN = """
spec:
description: The spec of the specified virtual router.
returned: When I(name) is specified.
type: complex
contains:
name:
description: Virtual router name.
type: str
interface:
description: List of interfaces
type: list
ad_static:
description: Admin distance for this protocol.
type: int
ad_static_ipv6:
description: Admin distance for this protocol.
type: int
ad_ospf_int:
description: Admin distance for this protocol.
type: int
ad_ospf_ext:
description: Admin distance for this protocol.
type: int
ad_ospfv3_int:
description: Admin distance for this protocol.
type: int
ad_ospfv3_ext:
description: Admin distance for this protocol.
type: int
ad_ibgp:
description: Admin distance for this protocol.
type: int
ad_ebgp:
description: Admin distance for this protocol.
type: int
ad_rip:
description: Admin distance for this protocol.
type: int
vrlist:
description: List of virtual router specs.
returned: When I(name) is not specified.
type: list
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.paloaltonetworks.panos.plugins.module_utils.panos import (
get_connection,
)
try:
from panos.errors import PanDeviceError
from panos.network import VirtualRouter
except ImportError:
try:
from pandevice.errors import PanDeviceError
from pandevice.network import VirtualRouter
except ImportError:
pass
def main():
helper = get_connection(
template=True,
template_stack=True,
with_classic_provider_spec=True,
argument_spec=dict(
name=dict(),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=False,
required_one_of=helper.required_one_of,
)
# Verify imports, build pandevice object tree.
parent = helper.get_pandevice_parent(module)
name = module.params["name"]
if name is None:
try:
listing = VirtualRouter.refreshall(parent)
except PanDeviceError as e:
module.fail_json(msg="Failed refreshall: {0}".format(e))
vrlist = helper.to_module_dict(listing)
module.exit_json(changed=False, vrlist=vrlist)
vr = VirtualRouter(name)
parent.add(vr)
try:
vr.refresh()
except PanDeviceError as e:
module.fail_json(msg="Failed refresh: {0}".format(e))
spec = helper.to_module_dict(vr)
module.exit_json(changed=False, spec=spec)
if __name__ == "__main__":
main()
|
Lib/idlelib/idle_test/mock_tk.py | inging44/python3 | 1,872 | 11196121 | <gh_stars>1000+
"""Classes that replace tkinter gui objects used by an object being tested.
A gui object is anything with a master or parent parameter, which is
typically required in spite of what the doc strings say.
"""
class Event:
'''Minimal mock with attributes for testing event handlers.
This is not a gui object, but is used as an argument for callbacks
that access attributes of the event passed. If a callback ignores
the event, other than the fact that is happened, pass 'event'.
Keyboard, mouse, window, and other sources generate Event instances.
Event instances have the following attributes: serial (number of
event), time (of event), type (of event as number), widget (in which
event occurred), and x,y (position of mouse). There are other
attributes for specific events, such as keycode for key events.
tkinter.Event.__doc__ has more but is still not complete.
'''
def __init__(self, **kwds):
"Create event with attributes needed for test"
self.__dict__.update(kwds)
class Var:
"Use for String/Int/BooleanVar: incomplete"
def __init__(self, master=None, value=None, name=None):
self.master = master
self.value = value
self.name = name
def set(self, value):
self.value = value
def get(self):
return self.value
class Mbox_func:
"""Generic mock for messagebox functions, which all have the same signature.
Instead of displaying a message box, the mock's call method saves the
arguments as instance attributes, which test functions can then examime.
The test can set the result returned to ask function
"""
def __init__(self, result=None):
self.result = result # Return None for all show funcs
def __call__(self, title, message, *args, **kwds):
# Save all args for possible examination by tester
self.title = title
self.message = message
self.args = args
self.kwds = kwds
return self.result # Set by tester for ask functions
class Mbox:
"""Mock for tkinter.messagebox with an Mbox_func for each function.
This module was 'tkMessageBox' in 2.x; hence the 'import as' in 3.x.
Example usage in test_module.py for testing functions in module.py:
---
from idlelib.idle_test.mock_tk import Mbox
import module
orig_mbox = module.tkMessageBox
showerror = Mbox.showerror # example, for attribute access in test methods
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
module.tkMessageBox = Mbox
@classmethod
def tearDownClass(cls):
module.tkMessageBox = orig_mbox
---
For 'ask' functions, set func.result return value before calling the method
that uses the message function. When tkMessageBox functions are the
only gui alls in a method, this replacement makes the method gui-free,
"""
askokcancel = Mbox_func() # True or False
askquestion = Mbox_func() # 'yes' or 'no'
askretrycancel = Mbox_func() # True or False
askyesno = Mbox_func() # True or False
askyesnocancel = Mbox_func() # True, False, or None
showerror = Mbox_func() # None
showinfo = Mbox_func() # None
showwarning = Mbox_func() # None
from _tkinter import TclError
class Text:
"""A semi-functional non-gui replacement for tkinter.Text text editors.
The mock's data model is that a text is a list of \n-terminated lines.
The mock adds an empty string at the beginning of the list so that the
index of actual lines start at 1, as with Tk. The methods never see this.
Tk initializes files with a terminal \n that cannot be deleted. It is
invisible in the sense that one cannot move the cursor beyond it.
This class is only tested (and valid) with strings of ascii chars.
For testing, we are not concerned with Tk Text's treatment of,
for instance, 0-width characters or character + accent.
"""
def __init__(self, master=None, cnf={}, **kw):
'''Initialize mock, non-gui, text-only Text widget.
At present, all args are ignored. Almost all affect visual behavior.
There are just a few Text-only options that affect text behavior.
'''
self.data = ['', '\n']
def index(self, index):
"Return string version of index decoded according to current text."
return "%s.%s" % self._decode(index, endflag=1)
def _decode(self, index, endflag=0):
"""Return a (line, char) tuple of int indexes into self.data.
This implements .index without converting the result back to a string.
The result is constrained by the number of lines and linelengths of
self.data. For many indexes, the result is initially (1, 0).
The input index may have any of several possible forms:
* line.char float: converted to 'line.char' string;
* 'line.char' string, where line and char are decimal integers;
* 'line.char lineend', where lineend='lineend' (and char is ignored);
* 'line.end', where end='end' (same as above);
* 'insert', the positions before terminal \n;
* 'end', whose meaning depends on the endflag passed to ._endex.
* 'sel.first' or 'sel.last', where sel is a tag -- not implemented.
"""
if isinstance(index, (float, bytes)):
index = str(index)
try:
index=index.lower()
except AttributeError:
raise TclError('bad text index "%s"' % index) from None
lastline = len(self.data) - 1 # same as number of text lines
if index == 'insert':
return lastline, len(self.data[lastline]) - 1
elif index == 'end':
return self._endex(endflag)
line, char = index.split('.')
line = int(line)
# Out of bounds line becomes first or last ('end') index
if line < 1:
return 1, 0
elif line > lastline:
return self._endex(endflag)
linelength = len(self.data[line]) -1 # position before/at \n
if char.endswith(' lineend') or char == 'end':
return line, linelength
# Tk requires that ignored chars before ' lineend' be valid int
# Out of bounds char becomes first or last index of line
char = int(char)
if char < 0:
char = 0
elif char > linelength:
char = linelength
return line, char
def _endex(self, endflag):
'''Return position for 'end' or line overflow corresponding to endflag.
-1: position before terminal \n; for .insert(), .delete
0: position after terminal \n; for .get, .delete index 1
1: same viewed as beginning of non-existent next line (for .index)
'''
n = len(self.data)
if endflag == 1:
return n, 0
else:
n -= 1
return n, len(self.data[n]) + endflag
def insert(self, index, chars):
"Insert chars before the character at index."
if not chars: # ''.splitlines() is [], not ['']
return
chars = chars.splitlines(True)
if chars[-1][-1] == '\n':
chars.append('')
line, char = self._decode(index, -1)
before = self.data[line][:char]
after = self.data[line][char:]
self.data[line] = before + chars[0]
self.data[line+1:line+1] = chars[1:]
self.data[line+len(chars)-1] += after
def get(self, index1, index2=None):
"Return slice from index1 to index2 (default is 'index1+1')."
startline, startchar = self._decode(index1)
if index2 is None:
endline, endchar = startline, startchar+1
else:
endline, endchar = self._decode(index2)
if startline == endline:
return self.data[startline][startchar:endchar]
else:
lines = [self.data[startline][startchar:]]
for i in range(startline+1, endline):
lines.append(self.data[i])
lines.append(self.data[endline][:endchar])
return ''.join(lines)
def delete(self, index1, index2=None):
'''Delete slice from index1 to index2 (default is 'index1+1').
Adjust default index2 ('index+1) for line ends.
Do not delete the terminal \n at the very end of self.data ([-1][-1]).
'''
startline, startchar = self._decode(index1, -1)
if index2 is None:
if startchar < len(self.data[startline])-1:
# not deleting \n
endline, endchar = startline, startchar+1
elif startline < len(self.data) - 1:
# deleting non-terminal \n, convert 'index1+1 to start of next line
endline, endchar = startline+1, 0
else:
# do not delete terminal \n if index1 == 'insert'
return
else:
endline, endchar = self._decode(index2, -1)
# restricting end position to insert position excludes terminal \n
if startline == endline and startchar < endchar:
self.data[startline] = self.data[startline][:startchar] + \
self.data[startline][endchar:]
elif startline < endline:
self.data[startline] = self.data[startline][:startchar] + \
self.data[endline][endchar:]
startline += 1
for i in range(startline, endline+1):
del self.data[startline]
def compare(self, index1, op, index2):
line1, char1 = self._decode(index1)
line2, char2 = self._decode(index2)
if op == '<':
return line1 < line2 or line1 == line2 and char1 < char2
elif op == '<=':
return line1 < line2 or line1 == line2 and char1 <= char2
elif op == '>':
return line1 > line2 or line1 == line2 and char1 > char2
elif op == '>=':
return line1 > line2 or line1 == line2 and char1 >= char2
elif op == '==':
return line1 == line2 and char1 == char2
elif op == '!=':
return line1 != line2 or char1 != char2
else:
raise TclError('''bad comparison operator "%s":'''
'''must be <, <=, ==, >=, >, or !=''' % op)
# The following Text methods normally do something and return None.
# Whether doing nothing is sufficient for a test will depend on the test.
def mark_set(self, name, index):
"Set mark *name* before the character at index."
pass
def mark_unset(self, *markNames):
"Delete all marks in markNames."
def tag_remove(self, tagName, index1, index2=None):
"Remove tag tagName from all characters between index1 and index2."
pass
# The following Text methods affect the graphics screen and return None.
# Doing nothing should always be sufficient for tests.
def scan_dragto(self, x, y):
"Adjust the view of the text according to scan_mark"
def scan_mark(self, x, y):
"Remember the current X, Y coordinates."
def see(self, index):
"Scroll screen to make the character at INDEX is visible."
pass
# The following is a Misc method inherited by Text.
# It should properly go in a Misc mock, but is included here for now.
def bind(sequence=None, func=None, add=None):
"Bind to this widget at event sequence a call to function func."
pass
|
coinrun/policies.py | jakegrigsby/coinrun | 332 | 11196123 | <filename>coinrun/policies.py
import numpy as np
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm
from baselines.common.distributions import make_pdtype
from baselines.common.input import observation_input
from coinrun.config import Config
def impala_cnn(images, depths=[16, 32, 32]):
"""
Model used in the paper "IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures" https://arxiv.org/abs/1802.01561
"""
use_batch_norm = Config.USE_BATCH_NORM == 1
dropout_layer_num = [0]
dropout_assign_ops = []
def dropout_layer(out):
if Config.DROPOUT > 0:
out_shape = out.get_shape().as_list()
num_features = np.prod(out_shape[1:])
var_name = 'mask_' + str(dropout_layer_num[0])
batch_seed_shape = out_shape[1:]
batch_seed = tf.get_variable(var_name, shape=batch_seed_shape, initializer=tf.random_uniform_initializer(minval=0, maxval=1), trainable=False)
batch_seed_assign = tf.assign(batch_seed, tf.random_uniform(batch_seed_shape, minval=0, maxval=1))
dropout_assign_ops.append(batch_seed_assign)
curr_mask = tf.sign(tf.nn.relu(batch_seed[None,...] - Config.DROPOUT))
curr_mask = curr_mask * (1.0 / (1.0 - Config.DROPOUT))
out = out * curr_mask
dropout_layer_num[0] += 1
return out
def conv_layer(out, depth):
out = tf.layers.conv2d(out, depth, 3, padding='same')
out = dropout_layer(out)
if use_batch_norm:
out = tf.contrib.layers.batch_norm(out, center=True, scale=True, is_training=True)
return out
def residual_block(inputs):
depth = inputs.get_shape()[-1].value
out = tf.nn.relu(inputs)
out = conv_layer(out, depth)
out = tf.nn.relu(out)
out = conv_layer(out, depth)
return out + inputs
def conv_sequence(inputs, depth):
out = conv_layer(inputs, depth)
out = tf.layers.max_pooling2d(out, pool_size=3, strides=2, padding='same')
out = residual_block(out)
out = residual_block(out)
return out
out = images
for depth in depths:
out = conv_sequence(out, depth)
out = tf.layers.flatten(out)
out = tf.nn.relu(out)
out = tf.layers.dense(out, 256, activation=tf.nn.relu)
return out, dropout_assign_ops
def nature_cnn(scaled_images, **conv_kwargs):
"""
Model used in the paper "Human-level control through deep reinforcement learning"
https://www.nature.com/articles/nature14236
"""
def activ(curr):
return tf.nn.relu(curr)
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))
def choose_cnn(images):
arch = Config.ARCHITECTURE
scaled_images = tf.cast(images, tf.float32) / 255.
dropout_assign_ops = []
if arch == 'nature':
out = nature_cnn(scaled_images)
elif arch == 'impala':
out, dropout_assign_ops = impala_cnn(scaled_images)
elif arch == 'impalalarge':
out, dropout_assign_ops = impala_cnn(scaled_images, depths=[32, 64, 64, 64, 64])
else:
assert(False)
return out, dropout_assign_ops
class LstmPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, nlstm=256):
nenv = nbatch // nsteps
self.pdtype = make_pdtype(ac_space)
X, processed_x = observation_input(ob_space, nbatch)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
h, self.dropout_assign_ops = choose_cnn(processed_x)
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm)
h5 = seq_to_batch(h5)
vf = fc(h5, 'v', 1)[:,0]
self.pd, self.pi = self.pdtype.pdfromlatent(h5)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)
def step(ob, state, mask):
return sess.run([a0, vf, snew, neglogp0], {X:ob, S:state, M:mask})
def value(ob, state, mask):
return sess.run(vf, {X:ob, S:state, M:mask})
self.X = X
self.M = M
self.S = S
self.vf = vf
self.step = step
self.value = value
class CnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, **conv_kwargs): #pylint: disable=W0613
self.pdtype = make_pdtype(ac_space)
X, processed_x = observation_input(ob_space, nbatch)
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
h, self.dropout_assign_ops = choose_cnn(processed_x)
vf = fc(h, 'v', 1)[:,0]
self.pd, self.pi = self.pdtype.pdfromlatent(h, init_scale=0.01)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.vf = vf
self.step = step
self.value = value
def get_policy():
use_lstm = Config.USE_LSTM
if use_lstm == 1:
policy = LstmPolicy
elif use_lstm == 0:
policy = CnnPolicy
else:
assert(False)
return policy
|
tests/test_io_records.py | daoran/kapture | 264 | 11196141 | #!/usr/bin/env python3
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
import unittest
import os
import os.path as path
import sys
import tempfile
# kapture
import path_to_kapture # enables import kapture # noqa: F401
import kapture
import kapture.io.records
from kapture.io.binary import transfer_files_from_dir_link, transfer_files_from_dir_copy
from kapture.utils.paths import path_secure, populate_files_in_dirpath
def make_fake_filenames(root_path: str, post_fix=''):
filenames = [
path_secure(path.join(dir1, dir2, filename))
for dir1 in ['a', 'b']
for dir2 in [f'{i:02d}' for i in range(3)]
for filename in [f'{i:02d}' for i in range(3)]
]
filepaths = [path_secure(path.join(root_path, filename + post_fix)) for filename in filenames]
for filepath in filepaths:
os.makedirs(path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as f:
f.write(filepath)
return filenames
class TestRecordCopy(unittest.TestCase):
def setUp(self):
self._tempdir = tempfile.TemporaryDirectory()
self._source_dirpath = path.join(self._tempdir.name, 'source')
self._dest_dirpath = path.join(self._tempdir.name, 'dest')
self._filenames = make_fake_filenames(self._source_dirpath)
def tearDown(self):
self._tempdir.cleanup()
def test_populate(self):
filepaths_retrieved = populate_files_in_dirpath(self._source_dirpath)
self.assertEqual(set(self._filenames),
set(filepaths_retrieved))
def test_copy(self):
origin_filepaths = (path_secure(path.join(self._source_dirpath, filename))
for filename in self._filenames)
expected_filepaths = (kapture.io.records.get_image_fullpath(self._dest_dirpath, filename)
for filename in self._filenames)
transfer_files_from_dir_copy(
origin_filepaths,
expected_filepaths
)
for expected_filepath in expected_filepaths:
self.assertTrue(path.isfile(expected_filepath))
for origin_filepath in origin_filepaths:
self.assertTrue(path.isfile(origin_filepath))
class TestRecordLinkAbs(unittest.TestCase):
def setUp(self):
self._tempdir = tempfile.TemporaryDirectory()
self._source_dirpath = path.join(self._tempdir.name, 'source')
self._dest_dirpath = path.join(self._tempdir.name, 'dest')
self._filenames = make_fake_filenames(self._source_dirpath)
def tearDown(self):
self._tempdir.cleanup()
@unittest.skipIf(sys.platform.startswith("win"), "Do not work on Windows without admin rights")
def test_link_abs(self):
source_filepaths = [
path_secure(path.join(self._source_dirpath, filename))
for filename in self._filenames]
destination_filepaths = [
kapture.io.records.get_image_fullpath(self._dest_dirpath, filename)
for filename in self._filenames]
transfer_files_from_dir_link(
source_filepaths, destination_filepaths, do_relative_link=False
)
for destination_filepath, source_filepath in zip(destination_filepaths, source_filepaths):
self.assertTrue(path.islink(destination_filepath))
resolved_path = os.readlink(destination_filepath)
self.assertEqual(source_filepath, resolved_path)
for source_filepath in source_filepaths:
self.assertTrue(path.isfile(source_filepath))
class TestRecordLinkRel(unittest.TestCase):
def setUp(self):
self._tempdir = tempfile.TemporaryDirectory()
self._source_dirpath = path.join(self._tempdir.name, 'source')
self._dest_dirpath = path.join(self._tempdir.name, 'dest')
self._filenames = make_fake_filenames(self._source_dirpath)
def tearDown(self):
self._tempdir.cleanup()
@unittest.skipIf(sys.platform.startswith("win"), "Do not work on Windows without admin rights")
def test_link_rel(self):
source_filepaths = [
path_secure(path.join(self._source_dirpath, filename))
for filename in self._filenames]
destination_filepaths = [
kapture.io.records.get_image_fullpath(self._dest_dirpath, filename)
for filename in self._filenames]
transfer_files_from_dir_link(
source_filepaths, destination_filepaths, do_relative_link=True
)
for destination_filepath, source_filepath in zip(destination_filepaths, source_filepaths):
self.assertTrue(path.islink(destination_filepath))
self.assertNotEqual(source_filepath, os.readlink(destination_filepath))
resolved_path = path.normpath(
path.join(path.dirname(destination_filepath), os.readlink(destination_filepath)))
self.assertEqual(source_filepath, resolved_path)
for source_filepath in source_filepaths:
self.assertTrue(path.isfile(source_filepath))
class TestPathComputation(unittest.TestCase):
def setUp(self) -> None:
self._tempdir = tempfile.TemporaryDirectory()
self._kapture_path = self._tempdir.name
def tearDown(self):
self._tempdir.cleanup()
def test_get_image_fullpath_empty(self):
images_path = kapture.io.records.get_image_fullpath("")
self.assertEqual(kapture.io.records.RECORD_DATA_DIRNAME, images_path)
def test_get_image_fullpath(self):
image_name = "my_image.jpg"
image_path = kapture.io.records.get_image_fullpath(self._kapture_path, image_name)
self.assertTrue(image_path.startswith(path_secure(self._kapture_path)), "Image path is under the kapture path")
self.assertTrue(image_path.endswith(image_name), "Image path end with the image name")
if __name__ == '__main__':
unittest.main()
|
keras_frcnn/MyLayer.py | salman-h-khan/ZSD_Release | 132 | 11196146 | from keras import backend as K
from keras.engine.topology import Layer
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
# self.word = word
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[2], self.output_dim),
initializer='uniform',
trainable=False) # Not trainable
super(MyLayer, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x):
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[0],self.output_dim) |
Python3/194.py | rakhi2001/ecom7 | 854 | 11196153 | <reponame>rakhi2001/ecom7
__________________________________________________________________________________________________
awk '{
for (i = 1; i <= NF; ++i) {
if (NR == 1) s[i] = $i;
else s[i] = s[i] " " $i;
}
} END {
for (i = 1; s[i] != ""; ++i) {
print s[i];
}
}' file.txt
__________________________________________________________________________________________________
while read -a line; do
for ((i = 0; i < "${#line[@]}"; ++i)); do
a[$i]="${a[$i]} ${line[$i]}"
done
done < file.txt
for ((i = 0; i < ${#a[@]}; ++i)); do
echo ${a[i]}
done
__________________________________________________________________________________________________ |
tests/test_lib.py | someguyiknow/artifacts | 702 | 11196166 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""Shared functions and classes for testing."""
from __future__ import unicode_literals
import os
import shutil
import tempfile
import unittest
class BaseTestCase(unittest.TestCase):
"""The base test case."""
_DATA_PATH = os.path.join(os.getcwd(), 'data')
_TEST_DATA_PATH = os.path.join(os.getcwd(), 'test_data')
# Show full diff results, part of TestCase so does not follow our naming
# conventions.
maxDiff = None
def _GetTestFilePath(self, path_segments):
"""Retrieves the path of a test file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(self._TEST_DATA_PATH, *path_segments)
def _SkipIfPathNotExists(self, path):
"""Skips the test if the path does not exist.
Args:
path (str): path of a test file.
Raises:
SkipTest: if the path path does not exist and the test should be skipped.
"""
if not os.path.exists(path):
filename = os.path.basename(path)
raise unittest.SkipTest('missing test file: {0:s}'.format(filename))
class TempDirectory(object):
"""Class that implements a temporary directory."""
def __init__(self):
"""Initializes a temporary directory."""
super(TempDirectory, self).__init__()
self.name = ''
def __enter__(self):
"""Make this work with the 'with' statement."""
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make this work with the 'with' statement."""
shutil.rmtree(self.name, True)
|
di_baseline/my_submission/config/gobigger_no_spatial_config.py | ABCDa102030/GoBigger-Challenge-2021 | 121 | 11196174 | from easydict import EasyDict
gobigger_dqn_config = dict(
exp_name='gobigger_no_spatial_baseline_dqn',
env=dict(
collector_env_num=8,
evaluator_env_num=3,
n_evaluator_episode=3,
stop_value=1e10,
player_num_per_team=3,
team_num=4,
match_time=200,
map_height=1000,
map_width=1000,
resize_height=160,
resize_width=160,
spatial=False,
speed = False,
all_vision = False,
train=True,
manager=dict(shared_memory=False, ),
),
policy=dict(
cuda=True,
model=dict(
scalar_shape=50,
per_unit_shape=31,
action_type_shape=16,
rnn=False,
),
nstep=3,
discount_factor=0.99,
learn=dict(
update_per_collect=4,
batch_size=128,
learning_rate=0.0003,
ignore_done=True,
),
collect=dict(n_sample=128, unroll_len=1),
eval=dict(evaluator=dict(eval_freq=1000, )),
other=dict(
eps=dict(
type='exp',
start=0.95,
end=0.1,
decay=100000,
),
replay_buffer=dict(replay_buffer_size=20000, ),
),
),
)
gobigger_dqn_config = EasyDict(gobigger_dqn_config)
main_config = gobigger_dqn_config
gobigger_dqn_create_config = dict(
env=dict(
type='gobigger',
import_names=['dizoo.gobigger.envs.gobigger_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='dqn'),
)
gobigger_dqn_create_config = EasyDict(gobigger_dqn_create_config)
create_config = gobigger_dqn_create_config
|
tests/openid/connect/core/grant_types/test_refresh_token.py | achraf-mer/oauthlib | 954 | 11196178 | <gh_stars>100-1000
import json
from unittest import mock
from oauthlib.common import Request
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from oauthlib.openid.connect.core.grant_types import RefreshTokenGrant
from tests.oauth2.rfc6749.grant_types.test_refresh_token import (
RefreshTokenGrantTest,
)
from tests.unittest import TestCase
def get_id_token_mock(token, token_handler, request):
return "MOCKED_TOKEN"
class OpenIDRefreshTokenInterferenceTest(RefreshTokenGrantTest):
"""Test that OpenID don't interfere with normal OAuth 2 flows."""
def setUp(self):
super().setUp()
self.auth = RefreshTokenGrant(request_validator=self.mock_validator)
class OpenIDRefreshTokenTest(TestCase):
def setUp(self):
self.request = Request('http://a.b/path')
self.request.grant_type = 'refresh_token'
self.request.refresh_token = '<PASSWORD>'
self.request.scope = ('hello', 'openid')
self.mock_validator = mock.MagicMock()
self.mock_validator = mock.MagicMock()
self.mock_validator.authenticate_client.side_effect = self.set_client
self.mock_validator.get_id_token.side_effect = get_id_token_mock
self.auth = RefreshTokenGrant(request_validator=self.mock_validator)
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def test_refresh_id_token(self):
self.mock_validator.get_original_scopes.return_value = [
'hello', 'openid'
]
bearer = BearerToken(self.mock_validator)
headers, body, status_code = self.auth.create_token_response(
self.request, bearer
)
token = json.loads(body)
self.assertEqual(self.mock_validator.save_token.call_count, 1)
self.assertIn('access_token', token)
self.assertIn('refresh_token', token)
self.assertIn('id_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
self.assertEqual(token['scope'], 'hello openid')
self.mock_validator.refresh_id_token.assert_called_once_with(
self.request
)
def test_refresh_id_token_false(self):
self.mock_validator.refresh_id_token.return_value = False
self.mock_validator.get_original_scopes.return_value = [
'hello', 'openid'
]
bearer = BearerToken(self.mock_validator)
headers, body, status_code = self.auth.create_token_response(
self.request, bearer
)
token = json.loads(body)
self.assertEqual(self.mock_validator.save_token.call_count, 1)
self.assertIn('access_token', token)
self.assertIn('refresh_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
self.assertEqual(token['scope'], 'hello openid')
self.assertNotIn('id_token', token)
self.mock_validator.refresh_id_token.assert_called_once_with(
self.request
)
def test_refresh_token_without_openid_scope(self):
self.request.scope = "hello"
bearer = BearerToken(self.mock_validator)
headers, body, status_code = self.auth.create_token_response(
self.request, bearer
)
token = json.loads(body)
self.assertEqual(self.mock_validator.save_token.call_count, 1)
self.assertIn('access_token', token)
self.assertIn('refresh_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
self.assertNotIn('id_token', token)
self.assertEqual(token['scope'], 'hello')
|
kats/tests/detectors/test_cusum_detection.py | iamxiaodong/Kats | 3,580 | 11196201 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import re
from operator import attrgetter
from unittest import TestCase
import numpy as np
import pandas as pd
import statsmodels
from kats.consts import TimeSeriesData
from kats.detectors.cusum_detection import (
CUSUMDetector,
MultiCUSUMDetector,
VectorizedCUSUMDetector,
)
from parameterized.parameterized import parameterized
from scipy.stats import chi2 # @manual
from sklearn.datasets import make_spd_matrix
statsmodels_ver = float(
re.findall("([0-9]+\\.[0-9]+)\\..*", statsmodels.__version__)[0]
)
class CUSUMDetectorTest(TestCase):
def setUp(self) -> None:
np.random.seed(10)
# increasing with variance detection setup
df_increase = pd.DataFrame(
{
"increase": np.concatenate(
[np.random.normal(1, 0.2, 30), np.random.normal(1.5, 0.2, 30)]
)
}
)
df_increase["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
inc_timeseries = TimeSeriesData(df_increase)
self.inc_detector = CUSUMDetector(inc_timeseries)
self.inc_change_points = self.inc_detector.detector()
self.inc_metadata = self.inc_change_points[0]
# decreasing detection setup
df_decrease = pd.DataFrame(
{
"decrease": np.concatenate(
[np.random.normal(1, 0.2, 50), np.random.normal(0.5, 0.2, 10)]
)
}
)
df_decrease["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
dec_timeseries = TimeSeriesData(df_decrease)
self.dec_detector = CUSUMDetector(dec_timeseries)
self.dec_change_points = self.dec_detector.detector()
self.dec_metadata = self.dec_change_points[0]
# seasonality setup
self.periodicity = 48
self.total_cycles = 3
harmonics = 2
noise_std = 3
seasonal_term = CUSUMDetectorTest.simulate_seasonal_term(
self.periodicity,
self.total_cycles,
noise_std=noise_std,
harmonics=harmonics,
)
seasonal_term = seasonal_term / seasonal_term.std() * 2
residual = np.random.normal(0, 1, self.periodicity * self.total_cycles)
self.seasonal_data = seasonal_term + residual
# seasonality with increase trend setup
trend_term = np.logspace(0, 1, self.periodicity * self.total_cycles)
data = self.seasonal_data + trend_term
data -= np.min(data)
df_seasonality = pd.DataFrame(
{
"time": pd.date_range(
"2020-01-01",
periods=self.periodicity * self.total_cycles,
freq="30T",
),
"seasonality": data,
}
)
timeseries = TimeSeriesData(df_seasonality)
self.season_inc_trend_detector = CUSUMDetector(timeseries)
self.season_inc_trend_change_points = self.season_inc_trend_detector.detector(
interest_window=[
self.periodicity * (self.total_cycles - 1),
self.periodicity * self.total_cycles - 1,
],
magnitude_quantile=1,
change_directions=["increase", "decrease"],
delta_std_ratio=0,
)
self.season_metadata = self.season_inc_trend_change_points[0]
# test on step change with no variance
df_increase_no_var = pd.DataFrame(
{
"increase": np.concatenate(
[np.random.normal(1, 0, 30), np.random.normal(2, 0, 30)]
)
}
)
df_increase_no_var["time"] = pd.Series(
pd.date_range("2019-01-01", "2019-03-01")
)
no_var_timeseries = TimeSeriesData(df_increase_no_var)
self.no_var_detector = CUSUMDetector(no_var_timeseries)
self.no_var_change_points = self.no_var_detector.detector()
# no seasonality setup
data = self.seasonal_data
data -= np.min(data)
df_seasonality = pd.DataFrame(
{
"time": pd.date_range(
"2020-01-01",
periods=self.periodicity * self.total_cycles,
freq="30T",
),
"seasonality": data,
}
)
timeseries = TimeSeriesData(df_seasonality)
self.no_season_detector = CUSUMDetector(timeseries)
self.no_season_change_points = self.no_season_detector.detector(
interest_window=[
self.periodicity * (self.total_cycles - 1),
self.periodicity * self.total_cycles - 1,
],
magnitude_quantile=1,
change_directions=["increase"],
delta_std_ratio=0,
)
# no regression setup
df_noregress = pd.DataFrame({"no_change": np.random.normal(1, 0.2, 60)})
df_noregress["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
timeseries = TimeSeriesData(df_noregress)
self.no_reg_detector = CUSUMDetector(timeseries)
self.no_reg_change_points = self.no_reg_detector.detector(start_point=20)
@parameterized.expand(
[
["inc_change_points", 1],
["dec_change_points", 1],
["season_inc_trend_change_points", 1],
["no_var_change_points", 1],
["no_reg_change_points", 0],
["no_season_change_points", 0],
]
)
def test_cp_len(self, cp_name, expected) -> None:
self.assertEqual(len(attrgetter(cp_name)(self)), expected)
@parameterized.expand(
[
["inc_metadata", 29],
["dec_metadata", 49],
]
)
def test_cp_index(self, metadata_name, expected) -> None:
self.assertLessEqual(
abs(attrgetter(metadata_name)(self).cp_index - expected), 1
)
@parameterized.expand(
[
["inc_metadata", "increase"],
["dec_metadata", "decrease"],
]
)
def test_direction(self, metadata_name, expected) -> None:
self.assertEqual(attrgetter(metadata_name)(self).direction, expected)
def test_increasing_mu(self) -> None:
self.assertLess(self.inc_metadata.mu0, self.inc_metadata.mu1)
def test_increasing_correct_delta(self) -> None:
self.assertEqual(
self.inc_metadata.delta, self.inc_metadata.mu1 - self.inc_metadata.mu0
)
def test_increasing_regression(self) -> None:
self.assertTrue(self.inc_metadata.regression_detected)
@parameterized.expand(
[
["season_metadata.p_value_int", "season_metadata.llr_int"],
["inc_metadata.p_value", "inc_metadata.llr"],
]
)
def test_p_val(self, pval_name, llr_name) -> None:
self.assertEqual(
attrgetter(pval_name)(self),
1 - chi2.cdf(attrgetter(llr_name)(self), 2),
)
def test_increasing_p_val_nan(self) -> None:
self.assertTrue(np.isnan(self.inc_metadata.p_value_int))
def test_increasing_llr_int(self) -> None:
self.assertEqual(self.inc_metadata.llr_int, np.inf)
def test_increasing_stable_changepoint(self) -> None:
self.assertTrue(self.inc_metadata.stable_changepoint)
@parameterized.expand(
[
["inc_detector", "inc_change_points"],
["dec_detector", "dec_change_points"],
["season_inc_trend_detector", "season_inc_trend_change_points"],
["no_var_detector", "no_var_change_points"],
["no_reg_detector", "no_reg_change_points"],
["no_season_detector", "no_season_change_points"],
]
)
def test_plot(self, detector_name, cp_name) -> None:
attrgetter(detector_name)(self).plot(attrgetter(cp_name)(self))
@staticmethod
def simulate_seasonal_term(
periodicity, total_cycles, noise_std=1.0, harmonics=None
):
duration = periodicity * total_cycles
assert duration == int(duration)
duration = int(duration)
harmonics = harmonics if harmonics else int(np.floor(periodicity / 2))
lambda_p = 2 * np.pi / float(periodicity)
gamma_jt = noise_std * np.random.randn((harmonics))
gamma_star_jt = noise_std * np.random.randn((harmonics))
total_timesteps = 100 * duration # Pad for burn in
series = np.zeros(total_timesteps)
for t in range(total_timesteps):
gamma_jtp1 = np.zeros_like(gamma_jt)
gamma_star_jtp1 = np.zeros_like(gamma_star_jt)
for j in range(1, harmonics + 1):
cos_j = np.cos(lambda_p * j)
sin_j = np.sin(lambda_p * j)
gamma_jtp1[j - 1] = (
gamma_jt[j - 1] * cos_j
+ gamma_star_jt[j - 1] * sin_j
+ noise_std * np.random.randn()
)
gamma_star_jtp1[j - 1] = (
-gamma_jt[j - 1] * sin_j
+ gamma_star_jt[j - 1] * cos_j
+ noise_std * np.random.randn()
)
series[t] = np.sum(gamma_jtp1)
gamma_jt = gamma_jtp1
gamma_star_jt = gamma_star_jtp1
wanted_series = series[-duration:] # Discard burn in
return wanted_series
def test_seasonality_with_increasing_trend_cp_index(self) -> None:
self.assertGreaterEqual(
self.season_metadata.cp_index, self.periodicity * (self.total_cycles - 1)
)
def test_logging_multivariate_error(self) -> None:
# test multivariate error
np.random.seed(10)
df_multi_var = pd.DataFrame(
{
"no_change": np.random.normal(1, 0.2, 60),
"no_change2": np.random.normal(1, 0.2, 60),
}
)
df_multi_var["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
with self.assertRaises(ValueError):
timeseries = TimeSeriesData(df_multi_var)
CUSUMDetector(timeseries)
@parameterized.expand(
[
["WARNING", 0.9],
["DEBUG", None],
]
)
def test_logging_neg_magnitude(self, level, mag_q) -> None:
# test logging setup - negative in magnitude
np.random.seed(10)
df_neg = pd.DataFrame({"no_change": -np.random.normal(1, 0.2, 60)})
df_neg["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
timeseries = TimeSeriesData(df_neg)
logging_detector = CUSUMDetector(timeseries)
with self.assertLogs(level=level):
logging_detector.detector(
magnitude_quantile=mag_q, interest_window=[40, 60]
)
def test_ts_without_name(self) -> None:
n = 10
time = pd.Series(pd.date_range(start="2018-01-01", periods=n, freq="D"))
value = pd.Series(np.arange(n))
ts = TimeSeriesData(time=time, value=value)
detector = CUSUMDetector(ts)
change_points = detector.detector()
detector.plot(change_points)
class MultiCUSUMDetectorTest(TestCase):
def setUp(self) -> None:
# increasing setup
self.D = 10
random_state = 10
np.random.seed(random_state)
mean1 = np.ones(self.D)
mean2 = mean1 * 2
sigma = make_spd_matrix(self.D, random_state=random_state)
df_increase = pd.DataFrame(
np.concatenate(
[
np.random.multivariate_normal(mean1, sigma, 60),
np.random.multivariate_normal(mean2, sigma, 30),
]
)
)
df_increase["time"] = pd.Series(pd.date_range("2019-01-01", "2019-04-01"))
timeseries_increase = TimeSeriesData(df_increase)
self.inc_change_points = MultiCUSUMDetector(timeseries_increase).detector()
self.inc_metadata = self.inc_change_points[0]
# decreasing setup
df_decrease = pd.DataFrame(
np.concatenate(
[
np.random.multivariate_normal(mean2, sigma, 60),
np.random.multivariate_normal(mean1, sigma, 30),
]
)
)
df_decrease["time"] = pd.Series(pd.date_range("2019-01-01", "2019-04-01"))
timeseries_decrease = TimeSeriesData(df_decrease)
self.dec_change_points = MultiCUSUMDetector(timeseries_decrease).detector()
self.dec_metadata = self.dec_change_points[0]
@parameterized.expand(
[
["inc_change_points"],
["dec_change_points"],
]
)
def test_cp_len(self, cp_name) -> None:
self.assertEqual(len(attrgetter(cp_name)(self)), 1)
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_cp_index(self, cp_name) -> None:
self.assertLessEqual(abs(attrgetter(cp_name)(self).cp_index - 59), 1)
@parameterized.expand(
[
["inc_metadata.mu0", "inc_metadata.mu1"],
["dec_metadata.mu1", "dec_metadata.mu0"],
]
)
def test_mu(self, m1_name, m2_name) -> None:
for m1, m2 in zip(attrgetter(m1_name)(self), attrgetter(m2_name)(self)):
self.assertLess(m1, m2)
@parameterized.expand(
[
["inc_metadata", "inc_metadata.mu0", "inc_metadata.mu1"],
["dec_metadata", "dec_metadata.mu0", "dec_metadata.mu1"],
]
)
def test_correct_delta(self, metadata_name, mu0_name, mu1_name) -> None:
for d, diff in zip(
attrgetter(metadata_name)(self).delta,
attrgetter(mu1_name)(self) - attrgetter(mu0_name)(self),
):
self.assertEqual(d, diff)
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_regression(self, metadata_name) -> None:
self.assertTrue(attrgetter(metadata_name)(self).regression_detected)
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_p_val(self, metadata_name) -> None:
self.assertEqual(
attrgetter(metadata_name)(self).p_value,
1 - chi2.cdf(attrgetter(metadata_name)(self).llr, self.D + 1),
)
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_gaussian_increase_p_val_nan(self, metadata_name) -> None:
self.assertTrue(np.isnan(attrgetter(metadata_name)(self).p_value_int))
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_gaussian_increase_llr_int(self, metadata_name) -> None:
self.assertEqual(attrgetter(metadata_name)(self).llr_int, np.inf)
@parameterized.expand(
[
["inc_metadata"],
["dec_metadata"],
]
)
def test_gaussian_increase_stable_changepoint(self, metadata_name) -> None:
self.assertTrue(attrgetter(metadata_name)(self).stable_changepoint)
def test_no_changepoint(self) -> None:
D = 10
random_state = 10
np.random.seed(random_state)
mean = np.ones(D)
sigma = make_spd_matrix(D, random_state=random_state)
# Use the same mean for the entire series and there should be no changepoint
df_no_change = pd.DataFrame(np.random.multivariate_normal(mean, sigma, 90))
df_no_change["time"] = pd.Series(pd.date_range("2019-01-01", "2019-04-01"))
timeseries_no_change = TimeSeriesData(df_no_change)
change_points = MultiCUSUMDetector(timeseries_no_change).detector()
self.assertEqual(len(change_points), 0)
class VectorizedCUSUMDetectorTest(TestCase):
def setUp(self) -> None:
np.random.seed(10)
# increasing with variance detection setup
df = pd.DataFrame(
{
"increase": np.concatenate(
[np.random.normal(1, 0.2, 30), np.random.normal(1.5, 0.2, 30)]
),
"decrease": np.concatenate(
[np.random.normal(1, 0.2, 50), np.random.normal(0.5, 0.2, 10)]
),
}
)
df["time"] = pd.Series(pd.date_range("2019-01-01", "2019-03-01"))
self.inc_change_points = CUSUMDetector(
TimeSeriesData(df[["increase", "time"]])
).detector()
self.dec_change_points = CUSUMDetector(
TimeSeriesData(df[["decrease", "time"]])
).detector()
timeseries = TimeSeriesData(df)
change_points_vectorized_ = VectorizedCUSUMDetector(timeseries).detector_()
# take the change points in all columns with the corresponding directions
change_points_vectorized = [[], []]
for i in range(len(change_points_vectorized_)):
for change_points_ts in change_points_vectorized_[i]:
if change_points_ts.direction == df.columns.values[i]:
change_points_vectorized[i].append(change_points_ts)
# change points for the first column in the matrix
self.inc_change_points_vectorized = change_points_vectorized[0]
# change points for the second column in the matrix
self.dec_change_points_vectorized = change_points_vectorized[1]
def test_vectorized_results(self) -> None:
# check if vectorized CUSUM produces the same results with the original CUSUM
self.assertEqual(
self.inc_change_points[0].start_time,
self.inc_change_points_vectorized[0].start_time,
)
self.assertEqual(
self.dec_change_points[0].start_time,
self.dec_change_points_vectorized[0].start_time,
)
|
data structures/linked list/python/Stack_with_Singly_Linked_List.py | gggrafff/Algorithms | 715 | 11196206 | <reponame>gggrafff/Algorithms<gh_stars>100-1000
class Node:
def __init__(self,data=None,next_node=None):
self.data = data
self.next_node = next_node
def get_data(self):
return self.data
def get_next(self):
return self.next_node
def set_next(self,new_node):
self.next_node = new_node
class Stack:
def __init__(self,head=None):
self.head = head
def push(self,data):
new_item = Node(data)
current = self.head
if current is None:
self.head = new_item
else:
while current.get_next():
current = current.get_next()
current.set_next(new_item)
def pop(self):
current = self.head
prev = None
if current is not None:
while current.get_next():
prev = current
current = current.get_next()
if prev:
prev.set_next(current.get_next())
else:
self.head = current.get_next()
else:
print("Empty stack!")
def print_stack(self):
current = self.head
temp = []
while current:
temp.append(current.get_data())
current = current.get_next()
return temp
|
setup.py | EasonC13/iota.py | 347 | 11196210 | #!/usr/bin/env python
from codecs import StreamReader, open
from distutils.version import LooseVersion
import setuptools
##
# Because of the way PyOTA declares its dependencies, it requires a
# more recent version of setuptools.
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
if LooseVersion(setuptools.__version__) < LooseVersion('24.2'):
import sys
sys.exit('Installation failed: Upgrade setuptools to version 24.2 or later')
##
# Load long description for PyPI.
with open('docs/README.rst', 'r', 'utf-8') as f: # type: StreamReader
long_description = f.read()
##
# Declare test dependencies separately, so that they can be installed
# either automatically (``python setup.py test``) or manually
# (``pip install -e .[test-runner]``).
tests_require = [
'aiounittest',
'nose',
]
##
# Off we go!
setuptools.setup(
name='PyOTA',
description='IOTA API library for Python',
url='https://github.com/iotaledger/iota.py',
version='3.1.0b1',
long_description=long_description,
packages=setuptools.find_packages('.', exclude=(
'examples', 'examples.*',
'test', 'test.*',
)),
include_package_data=True,
# http://python-packaging.readthedocs.io/en/latest/command-line-scripts.html#the-console-scripts-entry-point
entry_points={
'console_scripts': [
'pyota-cli=iota.bin.repl:main',
],
},
# Tell setuptools which python versions to support. Will include metadata
# in the built sdist and wheel that tells pypi to tell pip about supported
# python versions.
# 'python_requires' works from setuptools 24.2.0 (previous versions ignore
# it with a warning), pip understands it from 9.0.0.
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
python_requires='>=3.6, <4',
# filters is no longer maintained and does not support Python 3.7
# phx-filters is a fork that supports 3.7 and 3.8 but not 2.7
install_requires=[
'httpx',
'phx-filters',
'pysha3',
],
extras_require={
'ccurl': ['pyota-ccurl'],
'docs-builder': ['sphinx >= 2.4.2', 'sphinx_rtd_theme >= 0.4.3'],
'pow': ['pyota-pow >= 1.0.2'],
# tox is able to run the tests in parallel since version 3.7
'test-runner': ['tox >= 3.7'] + tests_require,
},
test_suite='test',
test_loader='nose.loader:TestLoader',
tests_require=tests_require,
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords=(
'iota,tangle,iot,internet of things,api,library,cryptocurrency,'
'balanced ternary'
),
author='<NAME>',
author_email='<EMAIL>',
)
|
tests/unit/recommenders/tuning/test_nni_utils.py | enowy/Recommenders | 10,147 | 11196221 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import json
import os
import sys
from tempfile import TemporaryDirectory
from unittest.mock import patch
import pytest
from recommenders.tuning.nni.nni_utils import (
get_experiment_status,
check_experiment_status,
check_stopped,
check_metrics_written,
get_trials,
NNI_STATUS_URL,
NNI_TRIAL_JOBS_URL,
)
class MockResponse:
# Class that mocks requests.models.Response
def __init__(self, content, error):
self._content = content
self._error = error
def json(self):
return {"status": self._content, "errors": [self._error]}
def mocked_status_get(url, content, error):
assert url.startswith(NNI_STATUS_URL)
return MockResponse(content, error)
class MockResponseTrials:
# Class that mocks requests.models.Response
def __init__(self, content):
self._content = content
def json(self):
return self._content
def mocked_trials_get(url, content):
assert url.startswith(NNI_TRIAL_JOBS_URL)
return MockResponseTrials(content)
def mock_exception():
raise Exception()
@pytest.mark.skipif(sys.platform == "win32", reason="nni not installable on windows")
def test_get_experiment_status():
content = "some_status"
error = ""
with patch(
"requests.get", side_effect=lambda url: mocked_status_get(url, content, error)
):
nni_status = get_experiment_status(NNI_STATUS_URL)
assert nni_status["status"] == "some_status"
assert nni_status["errors"] == [""]
@pytest.mark.skipif(sys.platform == "win32", reason="nni not installable on windows")
def test_check_experiment_status_done():
content = "DONE"
error = ""
with patch(
"requests.get", side_effect=lambda url: mocked_status_get(url, content, error)
):
check_experiment_status(wait=0.1, max_retries=1)
@pytest.mark.skipif(sys.platform == "win32", reason="nni not installable on windows")
def test_check_experiment_status_tuner_no_more_trial():
content = "TUNER_NO_MORE_TRIAL"
error = ""
with patch(
"requests.get", side_effect=lambda url: mocked_status_get(url, content, error)
):
check_experiment_status(wait=0.1, max_retries=1)
@pytest.mark.skipif(sys.platform == "win32", reason="nni not installable on windows")
def test_check_experiment_status_running():
content = "RUNNING"
error = ""
with pytest.raises(TimeoutError) as excinfo:
with patch(
"requests.get",
side_effect=lambda url: mocked_status_get(url, content, error),
):
check_experiment_status(wait=0.1, max_retries=1)
assert "check_experiment_status() timed out" == str(excinfo.value)
@pytest.mark.skipif(sys.platform == "win32", reason="nni not installable on windows")
def test_check_experiment_status_no_more_trial():
content = "NO_MORE_TRIAL"
error = ""
with pytest.raises(TimeoutError) as excinfo:
with patch(
"requests.get",
side_effect=lambda url: mocked_status_get(url, content, error),
):
check_experiment_status(wait=0.1, max_retries=1)
assert "check_experiment_status() timed out" == str(excinfo.value)
@pytest.mark.skipif(sys.platform == "win32", reason="nni not installable on windows")
def test_check_experiment_status_failed():
content = "some_failed_status"
error = "NNI_ERROR"
with pytest.raises(RuntimeError) as excinfo:
with patch(
"requests.get",
side_effect=lambda url: mocked_status_get(url, content, error),
):
check_experiment_status(wait=0.1, max_retries=1)
assert (
"NNI experiment failed to complete with status some_failed_status - NNI_ERROR"
== str(excinfo.value)
)
@pytest.mark.skipif(sys.platform == "win32", reason="nni not installable on windows")
def test_check_stopped_timeout():
content = "some_status"
error = ""
with pytest.raises(TimeoutError) as excinfo:
with patch(
"requests.get",
side_effect=lambda url: mocked_status_get(url, content, error),
):
check_stopped(wait=0.1, max_retries=1)
assert "check_stopped() timed out" == str(excinfo.value)
@pytest.mark.skipif(sys.platform == "win32", reason="nni not installable on windows")
def test_check_stopped():
with patch("requests.get", side_effect=mock_exception):
check_stopped(wait=0.1, max_retries=1)
@pytest.mark.skipif(sys.platform == "win32", reason="nni not installable on windows")
def test_check_metrics_written():
content = [{"finalMetricData": None}, {"finalMetricData": None}]
with patch("requests.get", side_effect=lambda url: mocked_trials_get(url, content)):
check_metrics_written(wait=0.1, max_retries=1)
@pytest.mark.skipif(sys.platform == "win32", reason="nni not installable on windows")
def test_check_metrics_written_timeout():
content = [{"logPath": "/p"}, {"logPath": "/q"}]
with pytest.raises(TimeoutError) as excinfo:
with patch(
"requests.get", side_effect=lambda url: mocked_trials_get(url, content)
):
check_metrics_written(wait=0.1, max_retries=1)
assert "check_metrics_written() timed out" == str(excinfo.value)
@pytest.mark.skipif(sys.platform == "win32", reason="nni not installable on windows")
def test_get_trials():
with TemporaryDirectory() as tmp_dir1, TemporaryDirectory() as tmp_dir2:
mock_trials = [
{
"finalMetricData": [
{"data": '"{\\"rmse\\": 0.8, \\"default\\": 0.3}"'}
],
"logPath": "file://localhost:{}".format(tmp_dir1),
},
{
"finalMetricData": [
{"data": '"{\\"rmse\\": 0.9, \\"default\\": 0.2}"'}
],
"logPath": "file://localhost:{}".format(tmp_dir2),
},
]
metrics1 = {"rmse": 0.8, "precision_at_k": 0.3}
with open(os.path.join(tmp_dir1, "metrics.json"), "w") as f:
json.dump(metrics1, f)
params1 = {
"parameter_id": 1,
"parameter_source": "algorithm",
"parameters": {"n_factors": 100, "reg": 0.1},
}
with open(os.path.join(tmp_dir1, "parameter.cfg"), "w") as f:
json.dump(params1, f)
metrics2 = {"rmse": 0.9, "precision_at_k": 0.2}
with open(os.path.join(tmp_dir2, "metrics.json"), "w") as f:
json.dump(metrics2, f)
params2 = {
"parameter_id": 2,
"parameter_source": "algorithm",
"parameters": {"n_factors": 50, "reg": 0.02},
}
with open(os.path.join(tmp_dir2, "parameter.cfg"), "w") as f:
json.dump(params2, f)
with patch(
"requests.get", side_effect=lambda url: mocked_trials_get(url, mock_trials)
):
trials, best_metrics, best_params, best_trial_path = get_trials(
optimize_mode="maximize"
)
expected_trials = [
({"rmse": 0.8, "default": 0.3}, tmp_dir1),
({"rmse": 0.9, "default": 0.2}, tmp_dir2),
]
assert trials == expected_trials
assert best_metrics == metrics1
assert best_params == params1
assert best_trial_path == tmp_dir1
|
anno_json_image_urls.py | CasiaFan/Dataset_to_VOC_converter | 194 | 11196234 | <filename>anno_json_image_urls.py
import json
import cytoolz
import argparse, os, re
def extract_urls(args):
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
content = json.load(open(args.anno_file, 'r'))
merge_info_list = list(map(cytoolz.merge, cytoolz.join('id', content['images'], 'image_id', content['annotations'])))
if args.type == 'instance':
outfiles = {category['id']: os.path.join(args.output_dir, re.sub(" ", "_", category['name'])+"_image_urls.txt") for category in content['categories']}
for info in merge_info_list:
print("Saving file name: ", info['file_name'])
with open(outfiles[info['category_id']], "a") as f:
f.write(os.path.splitext(info['file_name'])[0]+" "+info['coco_url']+"\n")
f.close()
print("Exporting coco image urls for instance done!")
else:
outfile = os.path.join(args.output_dir, "person_keypoints_imag_urls.txt")
url_dict = {info['file_name']: info['coco_url'] for info in merge_info_list}
with open(outfile, "w") as f:
for name, url in url_dict.items():
f.write(os.path.splitext(name)[0]+" "+url+"\n")
f.close()
print("Exporting coco image urls for keypoints done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--anno_file", help="annotation file for object instance/keypoint")
parser.add_argument("--type", type=str, help="object instance or keypoint", choices=['instance', 'keypoint'])
parser.add_argument("--output_dir", help="output directory for image urls in json annotation file")
args = parser.parse_args()
extract_urls(args) |
tests/test_container_view.py | zhgcao/pyvmomi | 1,894 | 11196235 | # VMware vSphere Python SDK
# Copyright (c) 2008-2015 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tests
from pyVim import connect
from pyVmomi import vim
class ContainerViewTests(tests.VCRTestBase):
@tests.VCRTestBase.my_vcr.use_cassette('basic_container_view.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='once')
def test_basic_container_view(self):
# see: http://python3porting.com/noconv.html
si = connect.SmartConnect(host='vcsa',
user='my_user',
pwd='<PASSWORD>')
content = si.RetrieveContent()
datacenter_object_view = content.viewManager.CreateContainerView(
content.rootFolder, [vim.Datacenter], True)
for datacenter in datacenter_object_view.view:
datastores = datacenter.datastore
# NOTE (hartsocks): the object handle here is a managed object
# reference, until we ask for more details, no other detail is
# transmitted. Our sample fixture is quite small.
self.assertEqual(1, len(datastores))
datacenter_object_view.Destroy()
|
tests/functional/kvpy/pdel.py | efeslab/hse | 558 | 11196249 | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2021 Micron Technology, Inc. All rights reserved.
from contextlib import ExitStack
from hse2 import hse
from utility import lifecycle, cli
def add_keys(kvs: hse.Kvs, pfx: str, start: int, end: int):
for k_id in range(start, end):
key = f"{pfx}-{k_id:0>10}"
kvs.put(key.encode(), b"val")
def verify_keys(kvs: hse.Kvs, pfx: str, start: int, end: int):
with kvs.cursor(filt=pfx.encode()) as cur:
assert sum(1 for _ in cur.items()) == end - start
with kvs.cursor(filt=pfx.encode()) as cur:
k_id = start
for (k, _) in cur.items():
expected = f"{pfx}-{k_id:0>10}".encode()
assert k == expected
assert k_id < end
k_id = k_id + 1
hse.init(cli.CONFIG)
try:
with ExitStack() as stack:
kvdb_ctx = lifecycle.KvdbContext().rparams("durability.enabled=false")
kvdb = stack.enter_context(kvdb_ctx)
kvs_ctx = lifecycle.KvsContext(kvdb, "pdel").cparams("prefix.length=2")
kvs = stack.enter_context(kvs_ctx)
num_keys = 50 * 1000
kvs.prefix_delete(b"AA")
add_keys(kvs=kvs, pfx="AA", start=0, end=num_keys)
add_keys(kvs=kvs, pfx="AB", start=0, end=num_keys)
add_keys(kvs=kvs, pfx="AC", start=0, end=num_keys)
add_keys(kvs=kvs, pfx="AD", start=0, end=num_keys)
kvdb.sync()
kvs.prefix_delete(b"AB")
add_keys(kvs=kvs, pfx="AA", start=num_keys, end=2 * num_keys)
add_keys(kvs=kvs, pfx="AB", start=num_keys, end=2 * num_keys)
add_keys(kvs=kvs, pfx="AC", start=num_keys, end=2 * num_keys)
add_keys(kvs=kvs, pfx="AD", start=num_keys, end=2 * num_keys)
kvs.prefix_delete(b"AC")
verify_keys(kvs=kvs, pfx="AA", start=0, end=2 * num_keys)
verify_keys(kvs=kvs, pfx="AB", start=num_keys, end=2 * num_keys)
verify_keys(kvs=kvs, pfx="AC", start=0, end=0)
verify_keys(kvs=kvs, pfx="AD", start=0, end=2 * num_keys)
finally:
hse.fini()
|
recipes/Python/115421_Date_difference/recipe-115421.py | tdiprima/code | 2,023 | 11196285 | <reponame>tdiprima/code
# cal.py
#
# This code has been released to the Public Domain.
#
# finds the number of days between two particular dates
#
from string import *
FALSE,TRUE = range(2)
# Standard number of days for each month.
months = (31,28,31,30,31,30,31,31,30,31,30,31)
JAN,FEB,MAR,APR,MAY,JUN,JUL,AUG,SEP,OCT,NOV,DEC = range(len(months))
def leapyear(year):
return year % 4 == 0 and year % 100 != 0 or year % 400 == 0
def main():
days=sum=0
month = atoi(raw_input("Enter Month 1: "))
day = atoi(raw_input("Enter Day 1: "))
year = atoi(raw_input("Enter Year 1: "))
emonth = atoi(raw_input("Enter Month 2: "))
eday = atoi(raw_input("Enter Day 2: "))
eyear = atoi(raw_input("Enter Year 2: "))
month = month - 1
emonth = emonth - 1
if month == JAN:
if leapyear(year):
days = days + (366 - day)
else:
days = days + (365 - day)
else:
i = 0
while i < month:
sum = sum + months[i]
i = i + 1
sum = sum + day
if leapyear(year):
days = days + (366 - sum)
else:
days = days + (365 - sum)
print "Days first year ==",days
print "Number of years between ==",eyear - year
i = year + 1
while i < eyear:
if leapyear(i):
days = days + 366
else:
days = days + 365
print "in year",i
i = i + 1
print "Total days not including last year ==",days
if emonth == JAN:
days = days + eday
else:
i = 0
while i < emonth:
days = days + months[i]
i = i + 1
days = days + day
if leapyear(year) and emonth > FEB:
days = days + 1
print "Final total days ==",days
if __name__ == '__main__':
main()
|
train.py | Finspire13/CMCS-Temporal-Action-Localization | 136 | 11196297 | <filename>train.py
import matlab.engine # Must import matlab.engine first
import os
import torch
import numpy as np
import argparse
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from logger import Logger
from model import BackboneNet
from dataset import SingleVideoDataset
from utils import get_dataset, eval_thumos_recog, load_config_file
import pdb
device = torch.device('cuda')
def get_diversity_loss(scores):
assert (len(scores) > 1)
softmax_scores = [F.softmax(i, dim=2) for i in scores]
S1 = torch.stack(softmax_scores).permute(1, 3, 0, 2)
S2 = torch.stack(softmax_scores).permute(1, 3, 2, 0)
S1_norm = S1.norm(p=2, dim=3, keepdim=True) # + 1e-6 carefule here
S2_norm = S2.norm(p=2, dim=2, keepdim=True) #
R = torch.matmul(S1, S2) / (torch.matmul(S1_norm, S2_norm) + 1e-6)
I = torch.eye(len(scores)).to(device)
I = I.repeat((R.shape[0], R.shape[1], 1, 1))
pair_num = len(scores) * (len(scores) - 1)
loss_div = F.relu(R - I).sum(-1).sum(-1) / pair_num
loss_div = loss_div.mean()
return loss_div
def get_norm_regularization(scores):
video_len = scores[0].shape[1]
assert (video_len > 0)
S_raw = torch.stack(scores).permute(1, 3, 0, 2)
S_raw_norm = S_raw.norm(p=1, dim=3) / video_len
deviations = S_raw_norm - S_raw_norm.mean(dim=2, keepdim=True).repeat(
1, 1, S_raw_norm.shape[2])
loss_norm = torch.abs(deviations).mean()
return loss_norm
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config-file', type=str)
parser.add_argument('--train-subset-name', type=str)
parser.add_argument('--test-subset-name', type=str)
parser.add_argument('--test-log', dest='test_log', action='store_true')
parser.add_argument('--no-test-log', dest='test_log', action='store_false')
parser.set_defaults(test_log=True)
args = parser.parse_args()
print(args.config_file)
print(args.train_subset_name)
print(args.test_subset_name)
print(args.test_log)
all_params = load_config_file(args.config_file)
locals().update(all_params)
def test(model, loader, modality):
assert (modality in ['both', 'rgb', 'flow'])
pred_score_dict = {}
label_dict = {}
correct = 0
total_cnt = 0
total_loss = {
'cls': 0,
'div': 0,
'norm': 0,
'sum': 0,
}
criterion = nn.CrossEntropyLoss(reduction='elementwise_mean')
with torch.no_grad():
model.eval()
for _, data in enumerate(loader): # No shuffle
video_name = data['video_name'][0]
label = data['label'].to(device)
weight = data['weight'].to(device).float()
if label.item() == action_class_num:
continue
else:
total_cnt += 1
if modality == 'both':
rgb = data['rgb'].to(device).squeeze(0)
flow = data['flow'].to(device).squeeze(0)
model_input = torch.cat([rgb, flow], dim=2)
elif modality == 'rgb':
model_input = data['rgb'].to(device).squeeze(0)
else:
model_input = data['flow'].to(device).squeeze(0)
model_input = model_input.transpose(2, 1)
_, _, out, scores, _ = model(model_input)
out = out.mean(0, keepdim=True)
loss_cls = criterion(out, label) * weight
total_loss['cls'] += loss_cls.item()
if diversity_reg:
loss_div = get_diversity_loss(scores) * weight
loss_div = loss_div * diversity_weight
loss_norm = get_norm_regularization(scores) * weight
loss_norm = loss_norm * diversity_weight
total_loss['div'] += loss_div.item()
total_loss['norm'] += loss_norm.item()
out = out[:, :action_class_num] # Remove bg
pred = torch.argmax(out, dim=1)
correct += (pred.item() == label.item())
###############
video_key = ''.join(video_name.split('-')
[:-1]) # remove content after the last -
pred_score_dict[video_key] = out.cpu().numpy()
if video_key not in label_dict.keys():
label_dict[video_key] = np.zeros((1, action_class_num))
label_dict[video_key][0, label.item()] = 1
###############
accuracy = correct / total_cnt
total_loss[
'sum'] = total_loss['cls'] + total_loss['div'] + total_loss['norm']
avg_loss = {k: v / total_cnt for k, v in total_loss.items()}
##############
pred_score_matrix = []
label_matrix = []
for k, v in pred_score_dict.items():
pred_score_matrix.append(v)
label_matrix.append(label_dict[k])
_, mean_ap = eval_thumos_recog(
np.concatenate(pred_score_matrix, axis=0),
np.concatenate(label_matrix, axis=0), action_class_num)
return accuracy, avg_loss, mean_ap
def train(train_train_loader, train_test_loader, test_test_loader, modality,
naming):
assert (modality in ['both', 'rgb', 'flow'])
log_dir = os.path.join('logs', naming, modality)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logger = Logger(log_dir)
save_dir = os.path.join('models', naming)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if modality == 'both':
model = BackboneNet(in_features=feature_dim * 2,
**model_params).to(device)
else:
model = BackboneNet(in_features=feature_dim,
**model_params).to(device)
optimizer = optim.Adam(model.parameters(),
lr=learning_rate,
weight_decay=weight_decay)
if learning_rate_decay:
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[max_step_num // 2], gamma=0.1)
optimizer.zero_grad()
criterion = nn.CrossEntropyLoss(reduction='elementwise_mean')
update_step_idx = 0
single_video_idx = 0
loss_recorder = {
'cls': 0,
'div': 0,
'norm': 0,
'sum': 0,
}
while update_step_idx < max_step_num:
# Train loop
for _, data in enumerate(train_train_loader):
model.train()
single_video_idx += 1
label = data['label'].to(device)
weight = data['weight'].to(device).float()
if modality == 'both':
rgb = data['rgb'].to(device)
flow = data['flow'].to(device)
model_input = torch.cat([rgb, flow], dim=2)
elif modality == 'rgb':
model_input = data['rgb'].to(device)
else:
model_input = data['flow'].to(device)
model_input = model_input.transpose(2, 1)
_, _, out, scores, _ = model(model_input)
loss_cls = criterion(out, label) * weight
if diversity_reg:
loss_div = get_diversity_loss(scores) * weight
loss_div = loss_div * diversity_weight
loss_norm = get_norm_regularization(scores) * weight
loss_norm = loss_norm * diversity_weight
loss = loss_cls + loss_div + loss_norm
loss_recorder['div'] += loss_div.item()
loss_recorder['norm'] += loss_norm.item()
else:
loss = loss_cls
loss_recorder['cls'] += loss_cls.item()
loss_recorder['sum'] += loss.item()
loss.backward()
# Test and Update
if single_video_idx % batch_size == 0:
# Test
if update_step_idx % log_freq == 0:
train_acc, train_loss, train_map = test(
model, train_test_loader, modality)
logger.scalar_summary('Train Accuracy', train_acc,
update_step_idx)
logger.scalar_summary('Train map', train_map,
update_step_idx)
for k in train_loss.keys():
logger.scalar_summary('Train Loss {}'.format(k),
train_loss[k],
update_step_idx)
if args.test_log:
test_acc, test_loss, test_map = test(
model, test_test_loader, modality)
logger.scalar_summary('Test Accuracy', test_acc,
update_step_idx)
logger.scalar_summary('Test map', test_map,
update_step_idx)
for k in test_loss.keys():
logger.scalar_summary('Test Loss {}'.format(k),
test_loss[k],
update_step_idx)
# Batch Update
update_step_idx += 1
for k, v in loss_recorder.items():
print('Step {}: Loss_{}-{}'.format(
update_step_idx, k, v / batch_size))
logger.scalar_summary('Loss_{}_ps'.format(k),
v / batch_size, update_step_idx)
loss_recorder[k] = 0
optimizer.step()
optimizer.zero_grad()
if learning_rate_decay:
scheduler.step()
if update_step_idx in check_points:
torch.save(
model.state_dict(),
os.path.join(
save_dir,
'model-{}-{}'.format(modality,
update_step_idx)))
if update_step_idx >= max_step_num:
break
train_dataset_dict = get_dataset(dataset_name=dataset_name,
subset=args.train_subset_name,
file_paths=file_paths,
sample_rate=sample_rate,
base_sample_rate=base_sample_rate,
action_class_num=action_class_num,
modality='both',
feature_type=feature_type,
feature_oversample=feature_oversample,
temporal_aug=True,
load_background=with_bg)
train_train_dataset = SingleVideoDataset(
train_dataset_dict,
single_label=True,
random_select=True,
max_len=training_max_len) # To be checked
train_test_dataset = SingleVideoDataset(train_dataset_dict,
single_label=True,
random_select=False,
max_len=None)
train_train_loader = torch.utils.data.DataLoader(train_train_dataset,
batch_size=1,
pin_memory=True,
shuffle=True)
train_test_loader = torch.utils.data.DataLoader(train_test_dataset,
batch_size=1,
pin_memory=True,
shuffle=False)
if args.test_log:
test_dataset_dict = get_dataset(dataset_name=dataset_name,
subset=args.test_subset_name,
file_paths=file_paths,
sample_rate=sample_rate,
base_sample_rate=base_sample_rate,
action_class_num=action_class_num,
modality='both',
feature_type=feature_type,
feature_oversample=feature_oversample,
temporal_aug=True,
load_background=False)
test_test_dataset = SingleVideoDataset(test_dataset_dict,
single_label=True,
random_select=False,
max_len=None)
test_test_loader = torch.utils.data.DataLoader(test_test_dataset,
batch_size=1,
pin_memory=True,
shuffle=False)
else:
test_test_loader = None
for run_idx in range(train_run_num):
naming = '{}-run-{}'.format(experiment_naming, run_idx)
train(train_train_loader, train_test_loader, test_test_loader, 'rgb',
naming)
train(train_train_loader, train_test_loader, test_test_loader, 'flow',
naming)
train(train_train_loader, train_test_loader, test_test_loader, 'both',
naming)
|
adb/systrace/catapult/common/py_vulcanize/third_party/rcssmin/setup.py | mohanedmoh/TBS | 2,151 | 11196307 | <reponame>mohanedmoh/TBS
#!/usr/bin/env python
# -*- coding: ascii -*-
#
# Copyright 2006 - 2013
# <NAME> or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys as _sys
from _setup import run
def setup(args=None, _manifest=0):
""" Main setup function """
from _setup.ext import Extension
if 'java' in _sys.platform.lower():
# no c extension for jython
ext = None
else:
ext=[Extension('_rcssmin', sources=['rcssmin.c'])]
return run(script_args=args, ext=ext, manifest_only=_manifest)
def manifest():
""" Create List of packaged files """
return setup((), _manifest=1)
if __name__ == '__main__':
setup()
|
mistral/utils/__init__.py | shubhamdang/mistral | 205 | 11196309 | <reponame>shubhamdang/mistral
# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - Huawei Technologies Co. Ltd
# Copyright 2016 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import os
import shutil
import tempfile
import threading
from oslo_concurrency import processutils
from oslo_serialization import jsonutils
from mistral_lib.utils import inspect_utils
from mistral import exceptions as exc
from mistral import expressions as expr
# Thread local storage.
_th_loc_storage = threading.local()
@contextlib.contextmanager
def tempdir(**kwargs):
argdict = kwargs.copy()
if 'dir' not in argdict:
argdict['dir'] = '/tmp/'
tmpdir = tempfile.mkdtemp(**argdict)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
raise exc.DataAccessException(
"Failed to delete temp dir %(dir)s (reason: %(reason)s)" %
{'dir': tmpdir, 'reason': e}
)
def save_text_to(text, file_path, overwrite=False):
if os.path.exists(file_path) and not overwrite:
raise exc.DataAccessException(
"Cannot save data to file. File %s already exists."
)
with open(file_path, 'w') as f:
f.write(text)
def generate_key_pair(key_length=2048):
"""Create RSA key pair with specified number of bits in key.
Returns tuple of private and public keys.
"""
with tempdir() as tmpdir:
keyfile = os.path.join(tmpdir, 'tempkey')
args = [
'ssh-keygen',
'-q', # quiet
'-N', '', # w/o passphrase
'-t', 'rsa', # create key of rsa type
'-f', keyfile, # filename of the key file
'-C', 'Generated-by-Mistral' # key comment
]
if key_length is not None:
args.extend(['-b', key_length])
processutils.execute(*args)
if not os.path.exists(keyfile):
raise exc.DataAccessException(
"Private key file hasn't been created"
)
private_key = open(keyfile).read()
public_key_path = keyfile + '.pub'
if not os.path.exists(public_key_path):
raise exc.DataAccessException(
"Public key file hasn't been created"
)
public_key = open(public_key_path).read()
return private_key, public_key
def to_json_str(obj):
"""Serializes an object into a JSON string.
:param obj: Object to serialize.
:return: JSON string.
"""
if obj is None:
return None
def _fallback(value):
if inspect.isgenerator(value):
result = list(value)
# The result of the generator call may be again not primitive
# so we need to call "to_primitive" again with the same fallback
# function. Note that the endless recursion here is not a problem
# because "to_primitive" limits the depth for custom classes,
# if they are present in the object graph being traversed.
return jsonutils.to_primitive(
result,
convert_instances=True,
fallback=_fallback
)
return value
# We need to convert the root of the given object graph into
# a primitive by hand so that we also enable conversion of
# object of custom classes into primitives. Otherwise, they are
# ignored by the "json" lib.
return jsonutils.dumps(
jsonutils.to_primitive(obj, convert_instances=True, fallback=_fallback)
)
def from_json_str(json_str):
"""Reconstructs an object from a JSON string.
:param json_str: A JSON string.
:return: Deserialized object.
"""
if json_str is None:
return None
return jsonutils.loads(json_str)
def evaluate_object_fields(obj, ctx):
"""Evaluates all expressions recursively contained in the object fields.
Some of the given object fields may be strings or data structures that
contain YAQL/Jinja expressions. The method evaluates them and updates
the corresponding object fields with the evaluated values.
:param obj: The object to inspect.
:param ctx: Expression context.
"""
fields = inspect_utils.get_public_fields(obj)
evaluated_fields = expr.evaluate_recursively(fields, ctx)
for k, v in evaluated_fields.items():
setattr(obj, k, v)
|
cartoonify/app/object_detection/builders/box_coder_builder_test.py | theendsofinvention/cartoonify | 1,991 | 11196315 | <reponame>theendsofinvention/cartoonify
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for box_coder_builder."""
import tensorflow as tf
from google.protobuf import text_format
from app.object_detection.box_coders import faster_rcnn_box_coder
from app.object_detection.box_coders import keypoint_box_coder
from app.object_detection.box_coders import mean_stddev_box_coder
from app.object_detection.box_coders import square_box_coder
from app.object_detection.builders import box_coder_builder
from app.object_detection.protos import box_coder_pb2
class BoxCoderBuilderTest(tf.test.TestCase):
def test_build_faster_rcnn_box_coder_with_defaults(self):
box_coder_text_proto = """
faster_rcnn_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object,
faster_rcnn_box_coder.FasterRcnnBoxCoder)
self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0])
def test_build_faster_rcnn_box_coder_with_non_default_parameters(self):
box_coder_text_proto = """
faster_rcnn_box_coder {
y_scale: 6.0
x_scale: 3.0
height_scale: 7.0
width_scale: 8.0
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object,
faster_rcnn_box_coder.FasterRcnnBoxCoder)
self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0])
def test_build_keypoint_box_coder_with_defaults(self):
box_coder_text_proto = """
keypoint_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder)
self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0])
def test_build_keypoint_box_coder_with_non_default_parameters(self):
box_coder_text_proto = """
keypoint_box_coder {
num_keypoints: 6
y_scale: 6.0
x_scale: 3.0
height_scale: 7.0
width_scale: 8.0
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder)
self.assertEqual(box_coder_object._num_keypoints, 6)
self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0])
def test_build_mean_stddev_box_coder(self):
box_coder_text_proto = """
mean_stddev_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertTrue(
isinstance(box_coder_object,
mean_stddev_box_coder.MeanStddevBoxCoder))
def test_build_square_box_coder_with_defaults(self):
box_coder_text_proto = """
square_box_coder {
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertTrue(
isinstance(box_coder_object, square_box_coder.SquareBoxCoder))
self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0])
def test_build_square_box_coder_with_non_default_parameters(self):
box_coder_text_proto = """
square_box_coder {
y_scale: 6.0
x_scale: 3.0
length_scale: 7.0
}
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
box_coder_object = box_coder_builder.build(box_coder_proto)
self.assertTrue(
isinstance(box_coder_object, square_box_coder.SquareBoxCoder))
self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0])
def test_raise_error_on_empty_box_coder(self):
box_coder_text_proto = """
"""
box_coder_proto = box_coder_pb2.BoxCoder()
text_format.Merge(box_coder_text_proto, box_coder_proto)
with self.assertRaises(ValueError):
box_coder_builder.build(box_coder_proto)
if __name__ == '__main__':
tf.test.main()
|
scripts/mask.py | aminya/despacer | 110 | 11196345 | <filename>scripts/mask.py
print("(1<<16) * 16 = ", (1<<16)*16)
for i in range(1<<15):
solution = []
lastbit = 0
for bit in range(16):
if ((i & (1<<bit)) == 0):
solution.append(bit)
lastbit = bit
while(len(solution) < 16): solution.append(lastbit),
s = ""
for j in range(16):
s+=hex(solution[j])+","
print(s)
|
test/win/gyptest-cl-warning-as-error.py | chlorm-forks/gyp | 2,151 | 11196363 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure warning-as-error is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('warning-as-error.gyp', chdir=CHDIR)
# The source file contains a warning, so if WarnAsError is false (or
# default, which is also false), then the build should succeed, otherwise it
# must fail.
test.build('warning-as-error.gyp', 'test_warn_as_error_false', chdir=CHDIR)
test.build('warning-as-error.gyp', 'test_warn_as_error_unset', chdir=CHDIR)
test.build('warning-as-error.gyp', 'test_warn_as_error_true', chdir=CHDIR,
status=1)
test.pass_test()
|
python/tvm/relay/op/strategy/hexagon.py | shengxinhu/tvm | 4,640 | 11196380 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of Hexagon operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
from tvm import topi
from .generic import *
from .. import op as _op
# --- Op strategy registration
@batch_matmul_strategy.register("hexagon")
def batch_matmul_strategy_hexagon(attrs, inputs, out_type, target):
"""batch_matmul strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_matmul(topi.nn.batch_matmul),
wrap_topi_schedule(topi.hexagon.schedule_batch_matmul),
name="batch_matmul.hexagon",
)
return strategy
@concatenate_strategy.register("hexagon")
def concatenate_strategy_hexagon(attrs, inputs, out_type, target):
"""concatenate strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_concat(topi.concatenate),
wrap_topi_schedule(topi.hexagon.schedule_injective),
name="concatenate.hexagon",
)
return strategy
@conv2d_strategy.register("hexagon")
def conv2d_strategy_hexagon(attrs, inputs, out_type, target):
"""Conv2d strategy for Hexagon"""
strategy = _op.OpStrategy()
data_layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
groups = attrs.groups
data, kernel = inputs
layout = attrs.data_layout
if groups == 1:
if data_layout == "NHWC" and kernel_layout == "HWIO":
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nhwc),
wrap_topi_schedule(topi.hexagon.schedule_conv2d_nhwc),
name="conv2d_nhwc.hexagon",
)
elif data_layout == "NCHW" and kernel_layout == "OIHW":
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nchw),
wrap_topi_schedule(topi.hexagon.schedule_conv2d_nchw),
name="conv2d_nchw.hexagon",
)
else:
raise RuntimeError(
f"Unsupported layouts: data_layout:{data_layout}, kernel_layout:{kernel_layout}, "
f"groups:{attrs.groups}"
)
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.hexagon.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.hexagon.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
raise RuntimeError(f"Unsupported group_conv2d layout {layout}")
return strategy
@dense_strategy.register("hexagon")
def dense_strategy_hexagon(attrs, inputs, out_type, target):
"""Dense strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense),
wrap_topi_schedule(topi.hexagon.schedule_dense),
name="dense.hexagon",
)
return strategy
@softmax_strategy.register("hexagon")
def softmax_strategy_hexagon(attrs, inputs, out_type, target):
"""Softmax strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.hexagon.schedule_softmax),
name="softmax.hexagon",
)
return strategy
@conv2d_transpose_strategy.register("hexagon")
def conv2d_transpose_strategy_hexagon(attrs, inputs, out_type, target):
"""conv2d_transpose hexagon strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
strategy = _op.OpStrategy()
if groups == 1:
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.conv2d_transpose_nchw),
wrap_topi_schedule(topi.hexagon.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.generic",
)
else:
raise RuntimeError("Unsupported conv2d_transpose layout {}".format(layout))
return strategy
# --- Op schedule registration
@schedule_adaptive_pool.register("hexagon")
def schedule_adaptive_pool_hexagon(attrs, outs, target):
"""Schedule adaptive pool ops for Hexagon"""
with target:
return topi.hexagon.schedule_adaptive_pool(outs)
@schedule_injective.register("hexagon")
def schedule_injective_hexagon(attrs, outs, target):
"""Schedule injective ops for Hexagon"""
with target:
return topi.hexagon.schedule_injective(outs)
@schedule_concatenate.register("hexagon")
def schedule_concatenate_hexagon(attrs, outs, target):
"""Schedule concatenate ops for Hexagon"""
with target:
return topi.hexagon.schedule_injective(outs)
@schedule_pool.register("hexagon")
def schedule_pool_hexagon(attrs, outs, target):
"""Schedule pool ops for Hexagon"""
with target:
return topi.hexagon.schedule_pool(outs)
@schedule_reduce.register("hexagon")
def schedule_reduce_hexagon(attrs, outs, target):
"""Schedule reduction ops for Hexagon"""
with target:
return topi.hexagon.schedule_reduce(outs)
|
AutoDL_sample_code_submission/at_speech/policy_space/decision_making.py | dianjixz/AutoDL | 1,044 | 11196384 | <filename>AutoDL_sample_code_submission/at_speech/policy_space/decision_making.py
from at_speech.policy_space.meta_learning import ModelSelectLearner
from at_speech.policy_space.ensemble_learning import EnsembleLearner
from at_toolkit import AdlSpeechDMetadata
from at_speech.at_speech_cons import CLS_LR_LIBLINEAER, CLS_LR_SAG, CLS_TR34
class DecisionMaker(object):
def __init__(self, aspeech_metadata: AdlSpeechDMetadata):
self.aspeech_metadata = aspeech_metadata
self.meta_model_select_learner = ModelSelectLearner()
self.ensemble_learner = EnsembleLearner(self.aspeech_metadata)
self.aspeech_metadata_minix_report_flag = False
def learn_train_minisamples_report(self, train_minis_report:dict):
self.aspeech_metadata.init_train_minisamples_report(train_minis_report)
self.aspeech_metadata_minix_report_flag = True
def decide_if_start_val(self):
self.IF_START_VAL = False
def decide_if_ensemble_pred(self):
self.IF_ENSEMBLE_PRED = False
def decide_model_select(self, train_pip_id):
return self.meta_model_select_learner.predict_train_cls_select(train_pip_id)
def decide_g_valid_num(self) -> int:
return self.ensemble_learner.predict_g_valid_num()
def decide_if_split_val(self, token_size):
return self.ensemble_learner.predict_if_split_val(token_size)
def decide_tfds2np_array(self):
assert self.aspeech_metadata_minix_report_flag is True, "Error:Meta mini_samples_report flag is False"
if self.aspeech_metadata.train_minisamples_report.get("x_seqlen_mean") > 200000:
return [0.1, 0.2, 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
elif self.aspeech_metadata.class_num >= 40:
return [0.1, 0.2, 0.4, 0.3]
else:
return [0.1, 0.2, 0.4, 0.3]
def infer_model_select_def(self):
model_select_def = None
if self.aspeech_metadata.class_num < 10:
model_select_def = {
0: CLS_LR_LIBLINEAER,
1: CLS_LR_LIBLINEAER,
2: CLS_TR34,
}
else:
model_select_def = {
0: CLS_LR_LIBLINEAER,
1: CLS_LR_LIBLINEAER,
2: CLS_LR_SAG,
3: CLS_TR34,
}
self.meta_model_select_learner.model_select_def = model_select_def
def infer_tr34_trainpip_warmup(self):
if self.aspeech_metadata.class_num <= 10:
return 2
elif 10 < self.aspeech_metadata.class_num <= 37:
return 8
else:
return 11
def infer_tr34_hps_epoch(self):
if self.aspeech_metadata.class_num <= 10:
first_epoch = 8
else:
first_epoch = 14
left_epoch = 1
return {"first_epoch": first_epoch, "left_epoch":left_epoch}
def infer_tr34_hps_samplenum(self):
tr34_hps_sample_info = dict()
if self.aspeech_metadata.class_num > 37 or self.aspeech_metadata.train_num > 1000:
tr34_hps_sample_info["SAMP_MAX_NUM"] = 300
tr34_hps_sample_info["SAMP_MIN_NUM"] = 300
else:
tr34_hps_sample_info["SAMP_MAX_NUM"] = 200
tr34_hps_sample_info["SAMP_MIN_NUM"] = 200
return tr34_hps_sample_info
|
braintree/payment_method_nonce_gateway.py | futureironman/braintree_python | 182 | 11196400 | import braintree
from braintree.payment_method_nonce import PaymentMethodNonce
from braintree.error_result import ErrorResult
from braintree.exceptions.not_found_error import NotFoundError
from braintree.resource import Resource
from braintree.resource_collection import ResourceCollection
from braintree.successful_result import SuccessfulResult
class PaymentMethodNonceGateway(object):
def __init__(self, gateway):
self.gateway = gateway
self.config = gateway.config
def create(self, payment_method_token, params = {"payment_method_nonce": {}}):
try:
schema = [{"payment_method_nonce": ["merchant_account_id", "authentication_insight", {"authentication_insight_options": ["amount", "recurring_customer_consent", "recurring_max_amount"]}]}]
Resource.verify_keys(params, schema)
response = self.config.http().post(self.config.base_merchant_path() + "/payment_methods/" + payment_method_token + "/nonces", params)
if "api_error_response" in response:
return ErrorResult(self.gateway, response["api_error_response"])
else:
payment_method_nonce = self._parse_payment_method_nonce(response)
return SuccessfulResult({"payment_method_nonce": payment_method_nonce})
except NotFoundError:
raise NotFoundError("payment method with token " + repr(payment_method_token) + " not found")
def find(self, payment_method_nonce):
try:
if payment_method_nonce is None or payment_method_nonce.strip() == "":
raise NotFoundError()
response = self.config.http().get(self.config.base_merchant_path() + "/payment_method_nonces/" + payment_method_nonce)
return self._parse_payment_method_nonce(response)
except NotFoundError:
raise NotFoundError("payment method nonce with id " + repr(payment_method_nonce) + " not found")
def _parse_payment_method_nonce(self, response):
return PaymentMethodNonce(self.gateway, response["payment_method_nonce"])
|
tests/test_config.py | hawkeone/PlexTraktSync | 631 | 11196413 | <reponame>hawkeone/PlexTraktSync
#!/usr/bin/env python3 -m pytest
from os import environ
from plex_trakt_sync.factory import factory
def test_config():
config = factory.config()
config.save()
config.initialized = False
assert config["PLEX_TOKEN"] is None
config.save()
assert config["PLEX_TOKEN"] is None
environ["PLEX_TOKEN"] = "Foo"
config.initialized = False
assert config["PLEX_TOKEN"] == "Foo"
try:
del environ["PLEX_TOKEN"]
except KeyError:
pass
config.initialized = False
assert config["PLEX_TOKEN"] is None
environ["PLEX_TOKEN"] = "-"
config.initialized = False
assert config["PLEX_TOKEN"] is None
environ["PLEX_TOKEN"] = "None"
config.initialized = False
assert config["PLEX_TOKEN"] is None
|
admin/management/views.py | gaybro8777/osf.io | 628 | 11196491 | from django.views.generic import TemplateView, View
from django.contrib import messages
from django.http import HttpResponse
from django.contrib.auth.mixins import PermissionRequiredMixin
from osf.management.commands.manage_switch_flags import manage_waffle
from osf.management.commands.update_registration_schemas import update_registration_schemas
from scripts.find_spammy_content import manage_spammy_content
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from osf.models import Preprint, Node, Registration
class ManagementCommands(TemplateView):
'''Basic form to trigger various management commands'''
template_name = 'management/commands.html'
object_type = 'management'
class ManagementCommandPermissionView(View, PermissionRequiredMixin):
permission_required = 'osf.view_management'
class WaffleFlag(ManagementCommandPermissionView):
def post(self, request, *args, **kwargs):
manage_waffle()
messages.success(request, 'Waffle flags have been successfully updated.')
return redirect(reverse('management:commands'))
class UpdateRegistrationSchemas(ManagementCommandPermissionView):
def post(self, request, *args, **kwargs):
update_registration_schemas()
messages.success(request, 'Registration schemas have been successfully updated.')
return redirect(reverse('management:commands'))
class GetSpamDataCSV(ManagementCommandPermissionView):
def post(self, request, *args, **kwargs):
days = int(request.POST.get('days_get', 0))
models = []
if request.POST.get('preprint_get', None):
models.append(Preprint)
if request.POST.get('node_get', None):
models.append(Node)
if request.POST.get('registration_get', None):
models.append(Registration)
regex = request.POST.get('regex_get', None)
if not days:
messages.error(request, 'A number of days over 0 must be specified. Check your inputs and try again')
return redirect(reverse('management:commands'))
if not models:
messages.error(request, 'At least one model must be specified. Check your inputs and try again')
return redirect(reverse('management:commands'))
if not regex:
messages.error(request, 'A regular expression input must be specified. Check your inputs and try again')
return redirect(reverse('management:commands'))
response = HttpResponse(content_type='text/csv')
manage_spammy_content(regex, days, models, response_object=response)
filename = 'spam_document.csv'
response['Content-Disposition'] = f'attachment; filename={filename}'
return response
class BanSpamByRegex(ManagementCommandPermissionView):
def post(self, request, *args, **kwargs):
days = int(request.POST.get('days_ban', 0))
models = []
if request.POST.get('preprint_ban', None):
models.append(Preprint)
if request.POST.get('node_ban', None):
models.append(Node)
if request.POST.get('registration_ban', None):
models.append(Registration)
regex = request.POST.get('regex_ban', None)
if not days:
messages.error(request, 'A number of days over 0 must be specified. Check your inputs and try again')
return redirect(reverse('management:commands'))
if not models:
messages.error(request, 'At least one model must be specified. Check your inputs and try again')
return redirect(reverse('management:commands'))
if not regex:
messages.error(request, 'A regular expression input must be specified. Check your inputs and try again')
return redirect(reverse('management:commands'))
spam_ban_count = manage_spammy_content(regex, days, models, ban=True)
messages.success(request, f'{spam_ban_count} users have been banned')
return redirect(reverse('management:commands'))
|
eliot/tests/test_output.py | pombredanne/eliot-1 | 598 | 11196495 | """
Tests for L{eliot._output}.
"""
from sys import stdout
from unittest import TestCase, skipUnless
# Make sure to use StringIO that only accepts unicode:
from io import BytesIO, StringIO
import json as pyjson
from tempfile import mktemp
from time import time
from uuid import UUID
from threading import Thread
try:
import numpy as np
except ImportError:
np = None
from zope.interface.verify import verifyClass
from .._output import (
MemoryLogger,
ILogger,
Destinations,
Logger,
bytesjson as json,
to_file,
FileDestination,
_safe_unicode_dictionary,
)
from .._action import start_action
from .._validation import ValidationError, Field, _MessageSerializer
from .._traceback import write_traceback
from ..testing import assertContainsFields
from .common import CustomObject, CustomJSONEncoder
class MemoryLoggerTests(TestCase):
"""
Tests for L{MemoryLogger}.
"""
def test_interface(self):
"""
L{MemoryLogger} implements L{ILogger}.
"""
verifyClass(ILogger, MemoryLogger)
def test_write(self):
"""
Dictionaries written with L{MemoryLogger.write} are stored on a list.
"""
logger = MemoryLogger()
logger.write({"a": "b"})
logger.write({"c": 1})
self.assertEqual(logger.messages, [{"a": "b"}, {"c": 1}])
logger.validate()
def test_notStringFieldKeys(self):
"""
Field keys must be unicode or bytes; if not L{MemoryLogger.validate}
raises a C{TypeError}.
"""
logger = MemoryLogger()
logger.write({123: "b"})
self.assertRaises(TypeError, logger.validate)
def test_bytesMustBeUTF8(self):
"""
Field keys can be bytes, but only if they're UTF-8 encoded Unicode.
"""
logger = MemoryLogger()
logger.write({"\u1234".encode("utf-16"): "b"})
self.assertRaises(UnicodeDecodeError, logger.validate)
def test_serializer(self):
"""
L{MemoryLogger.validate} calls the given serializer's C{validate()}
method with the message, as does L{MemoryLogger.write}.
"""
class FakeValidator(list):
def validate(self, message):
self.append(message)
def serialize(self, obj):
return obj
validator = FakeValidator()
logger = MemoryLogger()
message = {"message_type": "mymessage", "X": 1}
logger.write(message, validator)
self.assertEqual(validator, [message])
logger.validate()
self.assertEqual(validator, [message, message])
def test_failedValidation(self):
"""
L{MemoryLogger.validate} will allow exceptions raised by the serializer
to pass through.
"""
serializer = _MessageSerializer(
[Field.forValue("message_type", "mymessage", "The type")]
)
logger = MemoryLogger()
logger.write({"message_type": "wrongtype"}, serializer)
self.assertRaises(ValidationError, logger.validate)
def test_JSON(self):
"""
L{MemoryLogger.validate} will encode the output of serialization to
JSON.
"""
serializer = _MessageSerializer(
[
Field.forValue("message_type", "type", "The type"),
Field("foo", lambda value: object(), "The type"),
]
)
logger = MemoryLogger()
logger.write(
{"message_type": "type", "foo": "will become object()"}, serializer
)
self.assertRaises(TypeError, logger.validate)
@skipUnless(np, "NumPy is not installed.")
def test_EliotJSONEncoder(self):
"""
L{MemoryLogger.validate} uses the EliotJSONEncoder by default to do
encoding testing.
"""
logger = MemoryLogger()
logger.write({"message_type": "type", "foo": np.uint64(12)}, None)
logger.validate()
def test_JSON_custom_encoder(self):
"""
L{MemoryLogger.validate} will use a custom JSON encoder if one was given.
"""
logger = MemoryLogger(encoder=CustomJSONEncoder)
logger.write(
{"message_type": "type", "custom": CustomObject()},
None,
)
logger.validate()
def test_serialize(self):
"""
L{MemoryLogger.serialize} returns a list of serialized versions of the
logged messages.
"""
serializer = _MessageSerializer(
[
Field.forValue("message_type", "mymessage", "The type"),
Field("length", len, "The length"),
]
)
messages = [
{"message_type": "mymessage", "length": "abc"},
{"message_type": "mymessage", "length": "abcd"},
]
logger = MemoryLogger()
for message in messages:
logger.write(message, serializer)
self.assertEqual(
logger.serialize(),
[
{"message_type": "mymessage", "length": 3},
{"message_type": "mymessage", "length": 4},
],
)
def test_serializeCopies(self):
"""
L{MemoryLogger.serialize} does not mutate the original logged messages.
"""
serializer = _MessageSerializer(
[
Field.forValue("message_type", "mymessage", "The type"),
Field("length", len, "The length"),
]
)
message = {"message_type": "mymessage", "length": "abc"}
logger = MemoryLogger()
logger.write(message, serializer)
logger.serialize()
self.assertEqual(logger.messages[0]["length"], "abc")
def write_traceback(self, logger, exception):
"""
Write an exception as a traceback to the logger.
"""
try:
raise exception
except:
write_traceback(logger)
def test_tracebacksCauseTestFailure(self):
"""
Logging a traceback to L{MemoryLogger} will add its exception to
L{MemoryLogger.tracebackMessages}.
"""
logger = MemoryLogger()
exception = Exception()
self.write_traceback(logger, exception)
self.assertEqual(logger.tracebackMessages[0]["reason"], exception)
def test_flushTracebacksNoTestFailure(self):
"""
Any tracebacks cleared by L{MemoryLogger.flushTracebacks} (as specified
by exception type) are removed from
L{MemoryLogger.tracebackMessages}.
"""
logger = MemoryLogger()
exception = RuntimeError()
self.write_traceback(logger, exception)
logger.flushTracebacks(RuntimeError)
self.assertEqual(logger.tracebackMessages, [])
def test_flushTracebacksReturnsExceptions(self):
"""
L{MemoryLogger.flushTracebacks} returns the traceback messages.
"""
exceptions = [ZeroDivisionError(), ZeroDivisionError()]
logger = MemoryLogger()
logger.write({"x": 1})
for exc in exceptions:
self.write_traceback(logger, exc)
logger.write({"x": 1})
flushed = logger.flushTracebacks(ZeroDivisionError)
self.assertEqual(flushed, logger.messages[1:3])
def test_flushTracebacksUnflushedTestFailure(self):
"""
Any tracebacks uncleared by L{MemoryLogger.flushTracebacks} (because
they are of a different type) are still listed in
L{MemoryLogger.tracebackMessages}.
"""
logger = MemoryLogger()
exception = RuntimeError()
self.write_traceback(logger, exception)
logger.flushTracebacks(KeyError)
self.assertEqual(logger.tracebackMessages[0]["reason"], exception)
def test_flushTracebacksUnflushedUnreturned(self):
"""
Any tracebacks uncleared by L{MemoryLogger.flushTracebacks} (because
they are of a different type) are not returned.
"""
logger = MemoryLogger()
exception = RuntimeError()
self.write_traceback(logger, exception)
self.assertEqual(logger.flushTracebacks(KeyError), [])
def test_reset(self):
"""
L{MemoryLogger.reset} clears all logged messages and tracebacks.
"""
logger = MemoryLogger()
logger.write({"key": "value"}, None)
logger.reset()
self.assertEqual(
(logger.messages, logger.serializers, logger.tracebackMessages),
([], [], []),
)
def test_threadSafeWrite(self):
"""
L{MemoryLogger.write} can be called from multiple threads concurrently.
"""
# Some threads will log some messages
thread_count = 10
# A lot of messages. This will keep the threads running long enough
# to give them a chance to (try to) interfere with each other.
write_count = 10000
# They'll all use the same MemoryLogger instance.
logger = MemoryLogger()
# Each thread will have its own message and serializer that it writes
# to the log over and over again.
def write(msg, serializer):
for i in range(write_count):
logger.write(msg, serializer)
# Generate a single distinct message for each thread to log.
msgs = list({"i": i} for i in range(thread_count))
# Generate a single distinct serializer for each thread to log.
serializers = list(object() for i in range(thread_count))
# Pair them all up. This gives us a simple invariant we can check
# later on.
write_args = zip(msgs, serializers)
# Create the threads.
threads = list(Thread(target=write, args=args) for args in write_args)
# Run them all. Note threads early in this list will start writing to
# the log before later threads in the list even get a chance to start.
# That's part of why we have each thread write so many messages.
for t in threads:
t.start()
# Wait for them all to finish.
for t in threads:
t.join()
# Check that we got the correct number of messages in the log.
expected_count = thread_count * write_count
self.assertEqual(len(logger.messages), expected_count)
self.assertEqual(len(logger.serializers), expected_count)
# Check the simple invariant we created above. Every logged message
# must be paired with the correct serializer, where "correct" is
# defined by ``write_args`` above.
for position, (msg, serializer) in enumerate(
zip(logger.messages, logger.serializers)
):
# The indexes must match because the objects are paired using
# zip() above.
msg_index = msgs.index(msg)
serializer_index = serializers.index(serializer)
self.assertEqual(
msg_index,
serializer_index,
"Found message #{} with serializer #{} at position {}".format(
msg_index, serializer_index, position
),
)
class MyException(Exception):
"""
Custom exception.
"""
class BadDestination(list):
"""
A destination that throws an exception the first time it is called.
"""
called = 0
def __call__(self, msg):
if not self.called:
self.called = True
raise MyException("ono")
self.append(msg)
class DestinationsTests(TestCase):
"""
Tests for L{Destinations}.
"""
def test_send(self):
"""
L{Destinations.send} calls all destinations added with
L{Destinations.add} with the given dictionary.
"""
destinations = Destinations()
message = {"hoorj": "blargh"}
dest = []
dest2 = []
dest3 = []
destinations.add(dest.append, dest2.append)
destinations.add(dest3.append)
destinations.send(message)
self.assertEqual(dest, [message])
self.assertEqual(dest2, [message])
self.assertEqual(dest3, [message])
def test_destination_exception_multiple_destinations(self):
"""
If one destination throws an exception, other destinations still
get the message.
"""
destinations = Destinations()
dest = []
dest2 = BadDestination()
dest3 = []
destinations.add(dest.append)
destinations.add(dest2)
destinations.add(dest3.append)
message = {"hello": 123}
destinations.send(message)
self.assertIn(message, dest)
self.assertIn(message, dest3)
def test_destination_exception_continue(self):
"""
If a destination throws an exception, future messages are still
sent to it.
"""
destinations = Destinations()
dest = BadDestination()
destinations.add(dest)
msg1 = {"hello": 123}
msg2 = {"world": 456}
destinations.send(msg1)
self.assertNotIn(msg1, dest)
destinations.send(msg2)
self.assertIn(msg2, dest)
def test_remove(self):
"""
A destination removed with L{Destinations.remove} will no longer
receive messages from L{Destionations.add} calls.
"""
destinations = Destinations()
message = {"hello": 123}
dest = []
destinations.add(dest.append)
destinations.remove(dest.append)
destinations.send(message)
self.assertEqual(dest, [])
def test_removeNonExistent(self):
"""
Removing a destination that has not previously been added with result
in a C{ValueError} being thrown.
"""
destinations = Destinations()
self.assertRaises(ValueError, destinations.remove, [].append)
def test_addGlobalFields(self):
"""
L{Destinations.addGlobalFields} adds the given fields and values to
the messages being passed in.
"""
destinations = Destinations()
dest = []
destinations.add(dest.append)
destinations.addGlobalFields(x=123, y="hello")
destinations.send({"z": 456})
self.assertEqual(dest, [{"x": 123, "y": "hello", "z": 456}])
def test_addGlobalFieldsCumulative(self):
"""
L{Destinations.addGlobalFields} adds the given fields to those set by
previous calls.
"""
destinations = Destinations()
dest = []
destinations.add(dest.append)
destinations.addGlobalFields(x=123, y="hello")
destinations.addGlobalFields(x=456, z=456)
destinations.send({"msg": "X"})
self.assertEqual(dest, [{"x": 456, "y": "hello", "z": 456, "msg": "X"}])
def test_buffering(self):
"""
Before any destinations are set up to 1000 messages are buffered, and
then delivered to the first registered destinations.
"""
destinations = Destinations()
messages = [{"k": i} for i in range(1050)]
for m in messages:
destinations.send(m)
dest, dest2 = [], []
destinations.add(dest.append, dest2.append)
self.assertEqual((dest, dest2), (messages[-1000:], messages[-1000:]))
def test_buffering_second_batch(self):
"""
The second batch of added destination don't get the buffered messages.
"""
destinations = Destinations()
message = {"m": 1}
message2 = {"m": 2}
destinations.send(message)
dest = []
dest2 = []
destinations.add(dest.append)
destinations.add(dest2.append)
destinations.send(message2)
self.assertEqual((dest, dest2), ([message, message2], [message2]))
def test_global_fields_buffering(self):
"""
Global fields are added to buffered messages, when possible.
"""
destinations = Destinations()
message = {"m": 1}
destinations.send(message)
destinations.addGlobalFields(k=123)
dest = []
destinations.add(dest.append)
self.assertEqual(dest, [{"m": 1, "k": 123}])
def makeLogger():
"""
Return a tuple (L{Logger} instance, C{list} of written messages).
"""
logger = Logger()
logger._destinations = Destinations()
written = []
logger._destinations.add(written.append)
return logger, written
class LoggerTests(TestCase):
"""
Tests for L{Logger}.
"""
def test_interface(self):
"""
L{Logger} implements L{ILogger}.
"""
verifyClass(ILogger, Logger)
def test_global(self):
"""
A global L{Destinations} is used by the L{Logger} class.
"""
self.assertIsInstance(Logger._destinations, Destinations)
def test_write(self):
"""
L{Logger.write} sends the given dictionary L{Destinations} object.
"""
logger, written = makeLogger()
d = {"hello": 1}
logger.write(d)
self.assertEqual(written, [d])
def test_serializer(self):
"""
If a L{_MessageSerializer} is passed to L{Logger.write}, it is used to
serialize the message before it is passed to the destination.
"""
logger, written = makeLogger()
serializer = _MessageSerializer(
[
Field.forValue("message_type", "mymessage", "The type"),
Field("length", len, "The length of a thing"),
]
)
logger.write({"message_type": "mymessage", "length": "thething"}, serializer)
self.assertEqual(written, [{"message_type": "mymessage", "length": 8}])
def test_passedInDictionaryUnmodified(self):
"""
The dictionary passed in to L{Logger.write} is not modified.
"""
logger, written = makeLogger()
serializer = _MessageSerializer(
[
Field.forValue("message_type", "mymessage", "The type"),
Field("length", len, "The length of a thing"),
]
)
d = {"message_type": "mymessage", "length": "thething"}
original = d.copy()
logger.write(d, serializer)
self.assertEqual(d, original)
def test_safe_unicode_dictionary(self):
"""
L{_safe_unicode_dictionary} converts the given dictionary's
values and keys to unicode using C{safeunicode}.
"""
class badobject(object):
def __repr__(self):
raise TypeError()
dictionary = {badobject(): 123, 123: badobject()}
badMessage = "eliot: unknown, unicode() raised exception"
self.assertEqual(
eval(_safe_unicode_dictionary(dictionary)),
{badMessage: "123", "123": badMessage},
)
def test_safe_unicode_dictionary_fallback(self):
"""
If converting the dictionary failed for some reason,
L{_safe_unicode_dictionary} runs C{repr} on the object.
"""
self.assertEqual(_safe_unicode_dictionary(None), "None")
def test_safe_unicode_dictionary_fallback_failure(self):
"""
If all else fails, L{_safe_unicode_dictionary} just gives up.
"""
class badobject(object):
def __repr__(self):
raise TypeError()
self.assertEqual(
_safe_unicode_dictionary(badobject()),
"eliot: unknown, unicode() raised exception",
)
def test_serializationErrorTraceback(self):
"""
If serialization fails in L{Logger.write}, a traceback is logged,
along with a C{eliot:serialization_failure} message for debugging
purposes.
"""
logger, written = makeLogger()
def raiser(i):
raise RuntimeError("oops")
serializer = _MessageSerializer(
[
Field.forValue("message_type", "mymessage", "The type"),
Field("fail", raiser, "Serialization fail"),
]
)
message = {"message_type": "mymessage", "fail": "will"}
logger.write(message, serializer)
self.assertEqual(len(written), 2)
tracebackMessage = written[0]
assertContainsFields(
self,
tracebackMessage,
{
"exception": "%s.RuntimeError" % (RuntimeError.__module__,),
"message_type": "eliot:traceback",
},
)
self.assertIn("RuntimeError: oops", tracebackMessage["traceback"])
# Calling _safe_unicode_dictionary multiple times leads to
# inconsistent results due to hash ordering, so compare contents:
assertContainsFields(
self, written[1], {"message_type": "eliot:serialization_failure"}
)
self.assertEqual(
eval(written[1]["message"]),
dict((repr(key), repr(value)) for (key, value) in message.items()),
)
def test_destination_exception_caught(self):
"""
If a destination throws an exception, an appropriate error is
logged.
"""
logger = Logger()
logger._destinations = Destinations()
dest = BadDestination()
logger._destinations.add(dest)
message = {"hello": 123}
logger.write({"hello": 123})
assertContainsFields(
self,
dest[0],
{
"message_type": "eliot:destination_failure",
"message": _safe_unicode_dictionary(message),
"reason": "ono",
"exception": "eliot.tests.test_output.MyException",
},
)
def test_destination_multiple_exceptions_caught(self):
"""
If multiple destinations throw an exception, an appropriate error is
logged for each.
"""
logger = Logger()
logger._destinations = Destinations()
logger._destinations.add(BadDestination())
logger._destinations.add(lambda msg: 1 / 0)
messages = []
logger._destinations.add(messages.append)
try:
1 / 0
except ZeroDivisionError as e:
zero_divide = str(e)
zero_type = ZeroDivisionError.__module__ + ".ZeroDivisionError"
message = {"hello": 123}
logger.write({"hello": 123})
def remove(key):
return [message.pop(key) for message in messages[1:]]
# Make sure we have task_level & task_uuid in exception messages.
task_levels = remove("task_level")
task_uuids = remove("task_uuid")
timestamps = remove("timestamp")
self.assertEqual(
(
abs(timestamps[0] + timestamps[1] - 2 * time()) < 1,
task_levels == [[1], [1]],
len([UUID(uuid) for uuid in task_uuids]) == 2,
messages,
),
(
True,
True,
True,
[
message,
{
"message_type": "eliot:destination_failure",
"message": _safe_unicode_dictionary(message),
"reason": "ono",
"exception": "eliot.tests.test_output.MyException",
},
{
"message_type": "eliot:destination_failure",
"message": _safe_unicode_dictionary(message),
"reason": zero_divide,
"exception": zero_type,
},
],
),
)
def test_destination_exception_caught_twice(self):
"""
If a destination throws an exception, and the logged error about
it also causes an exception, then just drop that exception on the
floor, since there's nothing we can do with it.
"""
logger = Logger()
logger._destinations = Destinations()
def always_raise(message):
raise ZeroDivisionError()
logger._destinations.add(always_raise)
# Just a message. No exception raised; since everything is dropped no
# other assertions to be made.
logger.write({"hello": 123})
# With an action. No exception raised; since everything is dropped no
# other assertions to be made.
with start_action(logger, "sys:do"):
logger.write({"hello": 123})
class PEP8Tests(TestCase):
"""
Tests for PEP 8 method compatibility.
"""
def test_flush_tracebacks(self):
"""
L{MemoryLogger.flush_tracebacks} is the same as
L{MemoryLogger.flushTracebacks}
"""
self.assertEqual(MemoryLogger.flush_tracebacks, MemoryLogger.flushTracebacks)
class ToFileTests(TestCase):
"""
Tests for L{to_file}.
"""
def test_to_file_adds_destination(self):
"""
L{to_file} adds a L{FileDestination} destination with the given file.
"""
f = stdout
to_file(f)
expected = FileDestination(file=f)
self.addCleanup(Logger._destinations.remove, expected)
self.assertIn(expected, Logger._destinations._destinations)
def test_to_file_custom_encoder(self):
"""
L{to_file} accepts a custom encoder, and sets it on the resulting
L{FileDestination}.
"""
f = stdout
encoder = object()
to_file(f, encoder=encoder)
expected = FileDestination(file=f, encoder=encoder)
self.addCleanup(Logger._destinations.remove, expected)
self.assertIn(expected, Logger._destinations._destinations)
def test_bytes_values(self):
"""
DEPRECATED: On Python 3L{FileDestination} will encode bytes as if they were
UTF-8 encoded strings when writing to BytesIO only.
"""
message = {"x": b"abc"}
bytes_f = BytesIO()
destination = FileDestination(file=bytes_f)
destination(message)
self.assertEqual(
[json.loads(line) for line in bytes_f.getvalue().splitlines()],
[{"x": "abc"}],
)
@skipUnless(np, "NumPy is not installed.")
def test_default_encoder_is_EliotJSONEncoder(self):
"""The default encoder if none are specified is EliotJSONEncoder."""
message = {"x": np.int64(3)}
f = StringIO()
destination = FileDestination(file=f)
destination(message)
self.assertEqual(
[json.loads(line) for line in f.getvalue().splitlines()], [{"x": 3}]
)
def test_filedestination_writes_json_bytes(self):
"""
L{FileDestination} writes JSON-encoded messages to a file that accepts
bytes.
"""
message1 = {"x": 123}
message2 = {"y": None, "x": "abc"}
bytes_f = BytesIO()
destination = FileDestination(file=bytes_f)
destination(message1)
destination(message2)
self.assertEqual(
[json.loads(line) for line in bytes_f.getvalue().splitlines()],
[message1, message2],
)
def test_filedestination_custom_encoder(self):
"""
L{FileDestionation} can use a custom encoder.
"""
custom = object()
class CustomEncoder(pyjson.JSONEncoder):
def default(self, o):
if o is custom:
return "CUSTOM!"
else:
return pyjson.JSONEncoder.default(self, o)
message = {"x": 123, "z": custom}
f = BytesIO()
destination = FileDestination(file=f, encoder=CustomEncoder)
destination(message)
self.assertEqual(
json.loads(f.getvalue().splitlines()[0]), {"x": 123, "z": "CUSTOM!"}
)
def test_filedestination_flushes(self):
"""
L{FileDestination} flushes after every write, to ensure logs get
written out even if the local buffer hasn't filled up.
"""
path = mktemp()
# File with large buffer:
f = open(path, "wb", 1024 * 1024 * 10)
# and a small message that won't fill the buffer:
message1 = {"x": 123}
destination = FileDestination(file=f)
destination(message1)
# Message got written even though buffer wasn't filled:
self.assertEqual(
[json.loads(line) for line in open(path, "rb").read().splitlines()],
[message1],
)
def test_filedestination_writes_json_unicode(self):
"""
L{FileDestination} writes JSON-encoded messages to file that only
accepts Unicode.
"""
message = {"x": "\u1234"}
unicode_f = StringIO()
destination = FileDestination(file=unicode_f)
destination(message)
self.assertEqual(pyjson.loads(unicode_f.getvalue()), message)
def test_filedestination_unwriteable_file(self):
"""
L{FileDestination} raises a runtime error if the given file isn't writeable.
"""
path = mktemp()
open(path, "w").close()
f = open(path, "r")
with self.assertRaises(RuntimeError):
FileDestination(f)
|
Chapter 11/demo_corr.py | tanS30/Building-Machine-Learning-Systems-With-Python-Second-Edition | 1,490 | 11196541 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by <NAME> and <NAME>
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from matplotlib import pylab
import numpy as np
import scipy
from scipy.stats import norm, pearsonr
from utils import CHART_DIR
def _plot_correlation_func(x, y):
r, p = pearsonr(x, y)
title = "Cor($X_1$, $X_2$) = %.3f" % r
pylab.scatter(x, y)
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
f1 = scipy.poly1d(scipy.polyfit(x, y, 1))
pylab.plot(x, f1(x), "r--", linewidth=2)
# pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in
# [0,1,2,3,4]])
def plot_correlation_demo():
np.random.seed(0) # to reproduce the data later on
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(0, 10, 0.2)
pylab.subplot(221)
y = 0.5 * x + norm.rvs(1, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x + norm.rvs(1, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x + norm.rvs(1, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = norm.rvs(1, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(-5, 5, 0.2)
pylab.subplot(221)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x ** 2 + norm.rvs(1, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = 0.5 * x ** 2 + norm.rvs(1, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_2.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_correlation_demo()
|
recipes/mpark-variant/all/conanfile.py | rockandsalt/conan-center-index | 562 | 11196544 | <gh_stars>100-1000
from conans import ConanFile, tools
import os
class VariantConan(ConanFile):
name = "mpark-variant"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/mpark/variant"
description = "C++17 std::variant for C++11/14/17"
license = "BSL-1.0"
topics = ("conan", "variant", "mpark-variant")
settings = "compiler"
@property
def _source_subfolder(self):
return "source_subfolder"
def configure(self):
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, "11")
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "variant-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def package(self):
self.copy(pattern="LICENSE.md", dst="licenses", src=self._source_subfolder)
self.copy("*", dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_info(self):
# TODO: CMake imported target shouldn't be namespaced (waiting https://github.com/conan-io/conan/issues/7615 to be implemented)
self.cpp_info.names["cmake_find_package"] = "mpark_variant"
self.cpp_info.names["cmake_find_package_multi"] = "mpark_variant"
|
lib/codereview/codereview.py | hongwozai/go-src-1.4.3 | 152 | 11196573 | <gh_stars>100-1000
# coding=utf-8
# (The line above is necessary so that I can use 世界 in the
# *comment* below without Python getting all bent out of shape.)
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = /path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
In addition to the new commands, this extension introduces
the file pattern syntax @nnnnnn, where nnnnnn is a change list
number, to mean the files included in that change list, which
must be associated with the current client.
For example, if change 123456 contains the files x.go and y.go,
"hg diff @123456" is equivalent to"hg diff x.go y.go".
'''
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
# We require Python 2.6 for the json package.
if sys.version < '2.6':
print >>sys.stderr, "The codereview extension requires Python 2.6 or newer."
print >>sys.stderr, "You are running Python " + sys.version
sys.exit(2)
import json
import os
import re
import stat
import subprocess
import threading
import time
from mercurial import commands as hg_commands
from mercurial import util as hg_util
# bind Plan 9 preferred dotfile location
if os.sys.platform == 'plan9':
try:
import plan9
n = plan9.bind(os.path.expanduser("~/lib"), os.path.expanduser("~"), plan9.MBEFORE|plan9.MCREATE)
except ImportError:
pass
defaultcc = None
codereview_disabled = None
real_rollback = None
releaseBranch = None
server = "codereview.appspot.com"
server_url_base = None
testing = None
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
# The different parts of the file are separated by banners like this one.
#######################################################################
# Helpers
def RelativePath(path, cwd):
n = len(cwd)
if path.startswith(cwd) and path[n] == '/':
return path[n+1:]
return path
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
#######################################################################
# RE: UNICODE STRING HANDLING
#
# Python distinguishes between the str (string of bytes)
# and unicode (string of code points) types. Most operations
# work on either one just fine, but some (like regexp matching)
# require unicode, and others (like write) require str.
#
# As befits the language, Python hides the distinction between
# unicode and str by converting between them silently, but
# *only* if all the bytes/code points involved are 7-bit ASCII.
# This means that if you're not careful, your program works
# fine on "hello, world" and fails on "hello, 世界". And of course,
# the obvious way to be careful - use static types - is unavailable.
# So the only way is trial and error to find where to put explicit
# conversions.
#
# Because more functions do implicit conversion to str (string of bytes)
# than do implicit conversion to unicode (string of code points),
# the convention in this module is to represent all text as str,
# converting to unicode only when calling a unicode-only function
# and then converting back to str as soon as possible.
def typecheck(s, t):
if type(s) != t:
raise hg_util.Abort("type check failed: %s has type %s != %s" % (repr(s), type(s), t))
# If we have to pass unicode instead of str, ustr does that conversion clearly.
def ustr(s):
typecheck(s, str)
return s.decode("utf-8")
# Even with those, Mercurial still sometimes turns unicode into str
# and then tries to use it as ascii. Change Mercurial's default.
def set_mercurial_encoding_to_utf8():
from mercurial import encoding
encoding.encoding = 'utf-8'
set_mercurial_encoding_to_utf8()
# Even with those we still run into problems.
# I tried to do things by the book but could not convince
# Mercurial to let me check in a change with UTF-8 in the
# CL description or author field, no matter how many conversions
# between str and unicode I inserted and despite changing the
# default encoding. I'm tired of this game, so set the default
# encoding for all of Python to 'utf-8', not 'ascii'.
def default_to_utf8():
import sys
stdout, __stdout__ = sys.stdout, sys.__stdout__
reload(sys) # site.py deleted setdefaultencoding; get it back
sys.stdout, sys.__stdout__ = stdout, __stdout__
sys.setdefaultencoding('utf-8')
default_to_utf8()
#######################################################################
# Status printer for long-running commands
global_status = None
def set_status(s):
if verbosity > 0:
print >>sys.stderr, time.asctime(), s
global global_status
global_status = s
class StatusThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# pause a reasonable amount of time before
# starting to display status messages, so that
# most hg commands won't ever see them.
time.sleep(30)
# now show status every 15 seconds
while True:
time.sleep(15 - time.time() % 15)
s = global_status
if s is None:
continue
if s == "":
s = "(unknown status)"
print >>sys.stderr, time.asctime(), s
def start_status_thread():
t = StatusThread()
t.setDaemon(True) # allowed to exit if t is still running
t.start()
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
class CL(object):
def __init__(self, name):
typecheck(name, str)
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
self.copied_from = None # None means current user
self.mailed = False
self.private = False
self.lgtm = []
def DiskText(self):
cl = self
s = ""
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n\n"
if cl.private:
s += "Private: " + str(self.private) + "\n"
s += "Mailed: " + str(self.mailed) + "\n"
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
typecheck(s, str)
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
if cl.private:
s += "Private: True\n"
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
if cl.local or cl.name == "new":
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
typecheck(s, str)
return s
def PendingText(self, quick=False):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
if cl.copied_from:
s += "\tAuthor: " + cl.copied_from + "\n"
if not quick:
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
for (who, line, _) in cl.lgtm:
s += "\t\t" + who + ": " + line + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
typecheck(s, str)
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo, gofmt_just_warn=True, creating=True)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
if sys.platform == "win32" and os.path.isfile(path):
os.remove(path)
os.rename(path+'!', path)
if self.web and not self.copied_from:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc),
private=self.private)
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self, ui, repo):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
typecheck(s, str)
s = branch_prefix(ui, repo) + s
# Rietveld does a hard reject on any subject > 100 chars. Be sure.
if len(s) >= 100:
s = s[0:95] + "..."
return s
def Upload(self, ui, repo, send_mail=False, gofmt=True, gofmt_just_warn=False, creating=False, quiet=False):
if not self.files and not creating:
ui.warn("no files in change list\n")
if ui.configbool("codereview", "force_gofmt", True) and gofmt:
CheckFormat(ui, repo, self.files, just_warn=gofmt_just_warn)
set_status("uploading CL metadata + diffs")
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
]
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = None
# We do not include files when creating the issue,
# because we want the patch sets to record the repository
# and base revision they are diffs against. We use the patch
# set message for that purpose, but there is no message with
# the first patch set. Instead the message gets used as the
# new CL's overall subject. So omit the diffs when creating
# and then we'll run an immediate upload.
# This has the effect that every CL begins with an empty "Patch set 1".
if self.files and not creating:
vcs = MercurialVCS(upload_options, ui, repo)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
else:
uploaded_diff_file = [("data", "data.diff", emptydiff)]
if vcs and self.name != "new":
form_fields.append(("subject", "diff -r " + vcs.base_rev + " " + ui.expandpath("default")))
else:
# First upload sets the subject for the CL itself.
form_fields.append(("subject", self.Subject(ui, repo)))
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
print >>sys.stderr, "Server says there is nothing to upload (probably wrong):\n" + msg
if response_body.startswith("Issue updated.") and quiet:
pass
else:
ui.status(msg + "\n")
set_status("uploaded CL metadata + diffs")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
raise hg_util.Abort("failed to update issue: " + response_body)
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
set_status("uploading patches")
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
if vcs:
set_status("uploading base files")
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if patchset != "1":
MySend("/" + issue + "/upload_complete/" + patchset, payload="")
if send_mail:
set_status("sending mail")
MySend("/" + issue + "/mail", payload="")
self.web = True
set_status("flushing changes to disk")
self.Flush(ui, repo)
return
def Mail(self, ui, repo):
pmsg = "Hello " + JoinComma(self.reviewer)
if self.cc:
pmsg += " (cc: %s)" % (', '.join(self.cc),)
pmsg += ",\n"
pmsg += "\n"
repourl = ui.expandpath("default")
if not self.mailed:
pmsg += "I'd like you to review this change to"
branch = repo[None].branch()
if workbranch(branch) and branch != "default":
pmsg += " the " + branch + " branch of"
pmsg += "\n" + repourl + "\n"
else:
pmsg += "Please take another look.\n"
typecheck(pmsg, str)
PostMessage(ui, self.name, pmsg, subject=self.Subject(ui, repo))
self.mailed = True
self.Flush(ui, repo)
def GoodCLName(name):
typecheck(name, str)
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
typecheck(text, str)
typecheck(name, str)
sname = None
lineno = 0
sections = {
'Author': '',
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
'Mailed': '',
'Private': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
if sections['Author']:
cl.copied_from = sections['Author']
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
line = line.strip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if sections['Mailed'] != 'False':
# Odd default, but avoids spurious mailings when
# reading old CLs that do not have a Mailed: line.
# CLs created with this update will always have
# Mailed: False on disk.
cl.mailed = True
if sections['Private'] in ('True', 'true', 'Yes', 'yes'):
cl.private = True
if cl.desc == '<enter description here>':
cl.desc = ''
return cl, 0, ''
def SplitCommaSpace(s):
typecheck(s, str)
s = s.strip()
if s == "":
return []
return re.split(", *", s)
def CutDomain(s):
typecheck(s, str)
i = s.find('@')
if i >= 0:
s = s[0:i]
return s
def JoinComma(l):
seen = {}
uniq = []
for s in l:
typecheck(s, str)
if s not in seen:
seen[s] = True
uniq.append(s)
return ", ".join(uniq)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
def IsLocalCL(ui, repo, name):
return GoodCLName(name) and os.access(CodeReviewDir(ui, repo) + "/cl." + name, 0)
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
typecheck(name, str)
set_status("loading CL " + name)
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
set_status("getting issue metadata from web")
d = JSONGet(ui, "/api/" + name + "?messages=true")
set_status(None)
if d is None:
return None, "cannot load CL %s from server" % (name,)
if 'owner_email' not in d or 'issue' not in d or str(d['issue']) != name:
return None, "malformed response loading CL data from code review server"
cl.dict = d
cl.reviewer = d.get('reviewers', [])
cl.cc = d.get('cc', [])
if cl.local and cl.copied_from and cl.desc:
# local copy of CL written by someone else
# and we saved a description. use that one,
# so that committers can edit the description
# before doing hg submit.
pass
else:
cl.desc = d.get('description', "")
cl.url = server_url_base + name
cl.web = True
cl.private = d.get('private', False) != False
cl.lgtm = []
for m in d.get('messages', []):
if m.get('approval', False) == True or m.get('disapproval', False) == True:
who = re.sub('@.*', '', m.get('sender', ''))
text = re.sub("\n(.|\n)*", '', m.get('text', ''))
cl.lgtm.append((who, text, m.get('approval', False)))
set_status("loaded CL " + name)
return cl, ''
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
active = []
first = True
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
if web and first:
# first request: wait in case it needs to authenticate
# otherwise we get lots of user/password prompts
# running in parallel.
t.join()
if t.cl:
m[t.cl.name] = t.cl
first = False
else:
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
typecheck(url, str)
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
typecheck(dir, str)
return dir
# Turn leading tabs into spaces, so that the common white space
# prefix doesn't get confused when people's editors write out
# some lines with spaces, some with tabs. Only a heuristic
# (some editors don't use 8 spaces either) but a useful one.
def TabsToSpaces(line):
i = 0
while i < len(line) and line[i] == '\t':
i += 1
return ' '*(8*i) + line[i:]
# Strip maximal common leading white space prefix from text
def StripCommon(text):
typecheck(text, str)
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
line = TabsToSpaces(line)
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
line = TabsToSpaces(line)
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
typecheck(t, str)
return t
# Indent text with indent.
def Indent(text, indent):
typecheck(text, str)
typecheck(indent, str)
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
typecheck(t, str)
return t
# Return the first line of l
def line1(text):
typecheck(text, str)
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
desc_re = '^(.+: |(tag )?(release|weekly)\.|fix build|undo CL)'
desc_msg = '''Your CL description appears not to use the standard form.
The first line of your change description is conventionally a
one-line summary of the change, prefixed by the primary affected package,
and is used as the subject for code review mail; the rest of the description
elaborates.
Examples:
encoding/rot13: new package
math: add IsInf, IsNaN
net: fix cname in LookupHost
unicode: update to Unicode 5.0.2
'''
def promptyesno(ui, msg):
if hgversion >= "2.7":
return ui.promptchoice(msg + " $$ &yes $$ &no", 0) == 0
else:
return ui.promptchoice(msg, ["&yes", "&no"], 0) == 0
def promptremove(ui, repo, f):
if promptyesno(ui, "hg remove %s (y/n)?" % (f,)):
if hg_commands.remove(ui, repo, 'path:'+f) != 0:
ui.warn("error removing %s" % (f,))
def promptadd(ui, repo, f):
if promptyesno(ui, "hg add %s (y/n)?" % (f,)):
if hg_commands.add(ui, repo, 'path:'+f) != 0:
ui.warn("error adding %s" % (f,))
def EditCL(ui, repo, cl):
set_status(None) # do not show status
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
# We can't trust Mercurial + Python not to die before making the change,
# so, by popular demand, just scribble the most recent CL edit into
# $(hg root)/last-change so that if Mercurial does die, people
# can look there for their work.
try:
f = open(repo.root+"/last-change", "w")
f.write(s)
f.close()
except:
pass
clx, line, err = ParseCL(s, cl.name)
if err != '':
if not promptyesno(ui, "error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err)):
return "change list not modified"
continue
# Check description.
if clx.desc == '':
if promptyesno(ui, "change list should have a description\nre-edit (y/n)?"):
continue
elif re.search('<enter reason for undo>', clx.desc):
if promptyesno(ui, "change list description omits reason for undo\nre-edit (y/n)?"):
continue
elif not re.match(desc_re, clx.desc.split('\n')[0]):
if promptyesno(ui, desc_msg + "re-edit (y/n)?"):
continue
# Check file list for files that need to be hg added or hg removed
# or simply aren't understood.
pats = ['path:'+f for f in clx.files]
changed = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
deleted = hg_matchPattern(ui, repo, *pats, deleted=True)
unknown = hg_matchPattern(ui, repo, *pats, unknown=True)
ignored = hg_matchPattern(ui, repo, *pats, ignored=True)
clean = hg_matchPattern(ui, repo, *pats, clean=True)
files = []
for f in clx.files:
if f in changed:
files.append(f)
continue
if f in deleted:
promptremove(ui, repo, f)
files.append(f)
continue
if f in unknown:
promptadd(ui, repo, f)
files.append(f)
continue
if f in ignored:
ui.warn("error: %s is excluded by .hgignore; omitting\n" % (f,))
continue
if f in clean:
ui.warn("warning: %s is listed in the CL but unchanged\n" % (f,))
files.append(f)
continue
p = repo.root + '/' + f
if os.path.isfile(p):
ui.warn("warning: %s is a file but not known to hg\n" % (f,))
files.append(f)
continue
if os.path.isdir(p):
ui.warn("error: %s is a directory, not a file; omitting\n" % (f,))
continue
ui.warn("error: %s does not exist; omitting\n" % (f,))
clx.files = files
cl.desc = clx.desc
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
cl.private = clx.private
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts, op="verb", defaultcc=None):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
if err != "":
return None, err
else:
cl = CL("new")
cl.local = True
cl.files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if not cl.files:
return None, "no files changed (use hg %s <number> to use existing CL)" % op
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if defaultcc and not cl.private:
cl.cc = Add(cl.cc, defaultcc)
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
#######################################################################
# Change list file management
# Return list of changed files in repository that match pats.
# The patterns came from the command line, so we warn
# if they have no effect or cannot be understood.
def ChangedFiles(ui, repo, pats, taken=None):
taken = taken or {}
# Run each pattern separately so that we can warn about
# patterns that didn't do anything useful.
for p in pats:
for f in hg_matchPattern(ui, repo, p, unknown=True):
promptadd(ui, repo, f)
for f in hg_matchPattern(ui, repo, p, removed=True):
promptremove(ui, repo, f)
files = hg_matchPattern(ui, repo, p, modified=True, added=True, removed=True)
for f in files:
if f in taken:
ui.warn("warning: %s already in CL %s\n" % (f, taken[f].name))
if not files:
ui.warn("warning: %s did not match any modified files\n" % (p,))
# Again, all at once (eliminates duplicates)
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
l.sort()
if taken:
l = Sub(l, taken.keys())
return l
# Return list of changed files in repository that match pats and still exist.
def ChangedExistingFiles(ui, repo, pats, opts):
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True)
l.sort()
return l
# Return list of files claimed by existing CLs
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats):
return ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
#######################################################################
# File format checking.
def CheckFormat(ui, repo, files, just_warn=False):
set_status("running gofmt")
CheckGofmt(ui, repo, files, just_warn)
CheckTabfmt(ui, repo, files, just_warn)
# Check that gofmt run on the list of files does not change them
def CheckGofmt(ui, repo, files, just_warn):
files = gofmt_required(files)
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
if not files:
return
try:
cmd = subprocess.Popen(["gofmt", "-l"] + files, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=sys.platform != "win32")
cmd.stdin.close()
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
data = cmd.stdout.read()
errors = cmd.stderr.read()
cmd.wait()
set_status("done with gofmt")
if len(errors) > 0:
ui.warn("gofmt errors:\n" + errors.rstrip() + "\n")
return
if len(data) > 0:
msg = "gofmt needs to format these files (run hg gofmt):\n" + Indent(data, "\t").rstrip()
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
# Check that *.[chys] files indent using tabs.
def CheckTabfmt(ui, repo, files, just_warn):
files = [f for f in files if f.startswith('src/') and re.search(r"\.[chys]$", f) and not re.search(r"\.tab\.[ch]$", f)]
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
badfiles = []
for f in files:
try:
for line in open(f, 'r'):
# Four leading spaces is enough to complain about,
# except that some Plan 9 code uses four spaces as the label indent,
# so allow that.
if line.startswith(' ') and not re.match(' [A-Za-z0-9_]+:', line):
badfiles.append(f)
break
except:
# ignore cannot open file, etc.
pass
if len(badfiles) > 0:
msg = "these files use spaces for indentation (use tabs instead):\n\t" + "\n\t".join(badfiles)
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
#######################################################################
# CONTRIBUTORS file parsing
contributorsCache = None
contributorsURL = None
def ReadContributors(ui, repo):
global contributorsCache
if contributorsCache is not None:
return contributorsCache
try:
if contributorsURL is not None:
opening = contributorsURL
f = urllib2.urlopen(contributorsURL)
else:
opening = repo.root + '/CONTRIBUTORS'
f = open(repo.root + '/CONTRIBUTORS', 'r')
except:
ui.write("warning: cannot open %s: %s\n" % (opening, ExceptionDetail()))
return {}
contributors = {}
for line in f:
# CONTRIBUTORS is a list of lines like:
# Person <email>
# Person <email> <alt-email>
# The first email address is the one used in commit logs.
if line.startswith('#'):
continue
m = re.match(r"([^<>]+\S)\s+(<[^<>\s]+>)((\s+<[^<>\s]+>)*)\s*$", line)
if m:
name = m.group(1)
email = m.group(2)[1:-1]
contributors[email.lower()] = (name, email)
for extra in m.group(3).split():
contributors[extra[1:-1].lower()] = (name, email)
contributorsCache = contributors
return contributors
def CheckContributor(ui, repo, user=None):
set_status("checking CONTRIBUTORS file")
user, userline = FindContributor(ui, repo, user, warn=False)
if not userline:
raise hg_util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
return userline
def FindContributor(ui, repo, user=None, warn=True):
if not user:
user = ui.config("ui", "username")
if not user:
raise hg_util.Abort("[ui] username is not configured in .hgrc")
user = user.lower()
m = re.match(r".*<(.*)>", user)
if m:
user = m.group(1)
contributors = ReadContributors(ui, repo)
if user not in contributors:
if warn:
ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
return user, None
user, email = contributors[user]
return email, "%s <%s>" % (user, email)
#######################################################################
# Mercurial helper functions.
# Read http://mercurial.selenic.com/wiki/MercurialApi before writing any of these.
# We use the ui.pushbuffer/ui.popbuffer + hg_commands.xxx tricks for all interaction
# with Mercurial. It has proved the most stable as they make changes.
hgversion = hg_util.version()
# We require Mercurial 1.9 and suggest Mercurial 2.1.
# The details of the scmutil package changed then,
# so allowing earlier versions would require extra band-aids below.
# Ubuntu 11.10 ships with Mercurial 1.9.1 as the default version.
hg_required = "1.9"
hg_suggested = "2.1"
old_message = """
The code review extension requires Mercurial """+hg_required+""" or newer.
You are using Mercurial """+hgversion+""".
To install a new Mercurial, visit http://mercurial.selenic.com/downloads/.
"""
linux_message = """
You may need to clear your current Mercurial installation by running:
sudo apt-get remove mercurial mercurial-common
sudo rm -rf /etc/mercurial
"""
if hgversion < hg_required:
msg = old_message
if os.access("/etc/mercurial", 0):
msg += linux_message
raise hg_util.Abort(msg)
from mercurial.hg import clean as hg_clean
from mercurial import cmdutil as hg_cmdutil
from mercurial import error as hg_error
from mercurial import match as hg_match
from mercurial import node as hg_node
class uiwrap(object):
def __init__(self, ui):
self.ui = ui
ui.pushbuffer()
self.oldQuiet = ui.quiet
ui.quiet = True
self.oldVerbose = ui.verbose
ui.verbose = False
def output(self):
ui = self.ui
ui.quiet = self.oldQuiet
ui.verbose = self.oldVerbose
return ui.popbuffer()
def to_slash(path):
if sys.platform == "win32":
return path.replace('\\', '/')
return path
def hg_matchPattern(ui, repo, *pats, **opts):
w = uiwrap(ui)
hg_commands.status(ui, repo, *pats, **opts)
text = w.output()
ret = []
prefix = to_slash(os.path.realpath(repo.root))+'/'
for line in text.split('\n'):
f = line.split()
if len(f) > 1:
if len(pats) > 0:
# Given patterns, Mercurial shows relative to cwd
p = to_slash(os.path.realpath(f[1]))
if not p.startswith(prefix):
print >>sys.stderr, "File %s not in repo root %s.\n" % (p, prefix)
else:
ret.append(p[len(prefix):])
else:
# Without patterns, Mercurial shows relative to root (what we want)
ret.append(to_slash(f[1]))
return ret
def hg_heads(ui, repo):
w = uiwrap(ui)
hg_commands.heads(ui, repo)
return w.output()
noise = [
"",
"resolving manifests",
"searching for changes",
"couldn't find merge tool hgmerge",
"adding changesets",
"adding manifests",
"adding file changes",
"all local heads known remotely",
]
def isNoise(line):
line = str(line)
for x in noise:
if line == x:
return True
return False
def hg_incoming(ui, repo):
w = uiwrap(ui)
ret = hg_commands.incoming(ui, repo, force=False, bundle="")
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_log(ui, repo, **opts):
for k in ['date', 'keyword', 'rev', 'user']:
if not opts.has_key(k):
opts[k] = ""
w = uiwrap(ui)
ret = hg_commands.log(ui, repo, **opts)
if ret:
raise hg_util.Abort(ret)
return w.output()
def hg_outgoing(ui, repo, **opts):
w = uiwrap(ui)
ret = hg_commands.outgoing(ui, repo, **opts)
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_pull(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True # for file list
err = hg_commands.pull(ui, repo, **opts)
for line in w.output().split('\n'):
if isNoise(line):
continue
if line.startswith('moving '):
line = 'mv ' + line[len('moving '):]
if line.startswith('getting ') and line.find(' to ') >= 0:
line = 'mv ' + line[len('getting '):]
if line.startswith('getting '):
line = '+ ' + line[len('getting '):]
if line.startswith('removing '):
line = '- ' + line[len('removing '):]
ui.write(line + '\n')
return err
def hg_update(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True # for file list
err = hg_commands.update(ui, repo, **opts)
for line in w.output().split('\n'):
if isNoise(line):
continue
if line.startswith('moving '):
line = 'mv ' + line[len('moving '):]
if line.startswith('getting ') and line.find(' to ') >= 0:
line = 'mv ' + line[len('getting '):]
if line.startswith('getting '):
line = '+ ' + line[len('getting '):]
if line.startswith('removing '):
line = '- ' + line[len('removing '):]
ui.write(line + '\n')
return err
def hg_push(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True
err = hg_commands.push(ui, repo, **opts)
for line in w.output().split('\n'):
if not isNoise(line):
ui.write(line + '\n')
return err
def hg_commit(ui, repo, *pats, **opts):
return hg_commands.commit(ui, repo, *pats, **opts)
#######################################################################
# Mercurial precommit hook to disable commit except through this interface.
commit_okay = False
def precommithook(ui, repo, **opts):
if hgversion >= "2.1":
from mercurial import phases
if repo.ui.config('phases', 'new-commit') >= phases.secret:
return False
if commit_okay:
return False # False means okay.
ui.write("\ncodereview extension enabled; use mail, upload, or submit instead of commit\n\n")
return True
#######################################################################
# @clnumber file pattern support
# We replace scmutil.match with the MatchAt wrapper to add the @clnumber pattern.
match_repo = None
match_ui = None
match_orig = None
def InstallMatch(ui, repo):
global match_repo
global match_ui
global match_orig
match_ui = ui
match_repo = repo
from mercurial import scmutil
match_orig = scmutil.match
scmutil.match = MatchAt
def MatchAt(ctx, pats=None, opts=None, globbed=False, default='relpath'):
taken = []
files = []
pats = pats or []
opts = opts or {}
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if clname == "default":
files = DefaultFiles(match_ui, match_repo, [])
else:
if not GoodCLName(clname):
raise hg_util.Abort("invalid CL name " + clname)
cl, err = LoadCL(match_repo.ui, match_repo, clname, web=False)
if err != '':
raise hg_util.Abort("loading CL " + clname + ": " + err)
if not cl.files:
raise hg_util.Abort("no files in CL " + clname)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
# work-around for http://selenic.com/hg/rev/785bbc8634f8
if not hasattr(ctx, 'match'):
ctx = ctx[None]
return match_orig(ctx, pats=pats, opts=opts, globbed=globbed, default=default)
#######################################################################
# Commands added by code review extension.
def hgcommand(f):
return f
#######################################################################
# hg change
@hgcommand
def change(ui, repo, *pats, **opts):
"""create, edit or delete a change list
Create, edit or delete a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
Deleting a change with the -d or -D flag does not affect
the contents of the files listed in that change. To revert
the files listed in a change, use
hg revert @123456
before running hg change -d 123456.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
raise hg_util.Abort("cannot specify CL name and file patterns")
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
raise hg_util.Abort(err)
if not cl.local and (opts["stdin"] or not opts["stdout"]):
raise hg_util.Abort("cannot change non-local CL " + name)
else:
name = "new"
cl = CL("new")
if not workbranch(repo[None].branch()):
raise hg_util.Abort("cannot create CL outside default branch; switch with 'hg update default'")
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if opts["delete"] or opts["deletelocal"]:
if opts["delete"] and opts["deletelocal"]:
raise hg_util.Abort("cannot use -d and -D together")
flag = "-d"
if opts["deletelocal"]:
flag = "-D"
if name == "new":
raise hg_util.Abort("cannot use "+flag+" with file patterns")
if opts["stdin"] or opts["stdout"]:
raise hg_util.Abort("cannot use "+flag+" with -i or -o")
if not cl.local:
raise hg_util.Abort("cannot change non-local CL " + name)
if opts["delete"]:
if cl.copied_from:
raise hg_util.Abort("original author must delete CL; hg change -D will remove locally")
PostMessage(ui, cl.name, "*** Abandoned ***", send_mail=cl.mailed)
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
raise hg_util.Abort("error parsing change list: line %d: %s" % (line, err))
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if clx.private != cl.private:
cl.private = clx.private
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
raise hg_util.Abort(err)
dirty[cl] = True
for d, _ in dirty.items():
name = d.name
d.Flush(ui, repo)
if name == "new":
d.Upload(ui, repo, quiet=True)
if opts["stdout"]:
ui.write(cl.EditorText())
elif opts["pending"]:
ui.write(cl.PendingText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
#######################################################################
# hg code-login (broken?)
@hgcommand
def code_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
MySend(None)
#######################################################################
# hg clpatch / undo / release-apply / download
# All concerned with applying or unapplying patches to the repository.
@hgcommand
def clpatch(ui, repo, clname, **opts):
"""import a patch from the code review server
Imports a patch from the code review server into the local client.
If the local client has already modified any of the files that the
patch modifies, this command will refuse to apply the patch.
Submitting an imported patch will keep the original author's
name as the Author: line but add your own name to a Committer: line.
"""
if not workbranch(repo[None].branch()):
raise hg_util.Abort("cannot run hg clpatch outside default branch")
err = clpatch_or_undo(ui, repo, clname, opts, mode="clpatch")
if err:
raise hg_util.Abort(err)
@hgcommand
def undo(ui, repo, clname, **opts):
"""undo the effect of a CL
Creates a new CL that undoes an earlier CL.
After creating the CL, opens the CL text for editing so that
you can add the reason for the undo to the description.
"""
if not workbranch(repo[None].branch()):
raise hg_util.Abort("cannot run hg undo outside default branch")
err = clpatch_or_undo(ui, repo, clname, opts, mode="undo")
if err:
raise hg_util.Abort(err)
@hgcommand
def release_apply(ui, repo, clname, **opts):
"""apply a CL to the release branch
Creates a new CL copying a previously committed change
from the main branch to the release branch.
The current client must either be clean or already be in
the release branch.
The release branch must be created by starting with a
clean client, disabling the code review plugin, and running:
hg update weekly.YYYY-MM-DD
hg branch release-branch.rNN
hg commit -m 'create release-branch.rNN'
hg push --new-branch
Then re-enable the code review plugin.
People can test the release branch by running
hg update release-branch.rNN
in a clean client. To return to the normal tree,
hg update default
Move changes since the weekly into the release branch
using hg release-apply followed by the usual code review
process and hg submit.
When it comes time to tag the release, record the
final long-form tag of the release-branch.rNN
in the *default* branch's .hgtags file. That is, run
hg update default
and then edit .hgtags as you would for a weekly.
"""
c = repo[None]
if not releaseBranch:
raise hg_util.Abort("no active release branches")
if c.branch() != releaseBranch:
if c.modified() or c.added() or c.removed():
raise hg_util.Abort("uncommitted local changes - cannot switch branches")
err = hg_clean(repo, releaseBranch)
if err:
raise hg_util.Abort(err)
try:
err = clpatch_or_undo(ui, repo, clname, opts, mode="backport")
if err:
raise hg_util.Abort(err)
except Exception, e:
hg_clean(repo, "default")
raise e
def rev2clname(rev):
# Extract CL name from revision description.
# The last line in the description that is a codereview URL is the real one.
# Earlier lines might be part of the user-written description.
all = re.findall('(?m)^https?://codereview.appspot.com/([0-9]+)$', rev.description())
if len(all) > 0:
return all[-1]
return ""
undoHeader = """undo CL %s / %s
<enter reason for undo>
««« original CL description
"""
undoFooter = """
»»»
"""
backportHeader = """[%s] %s
««« CL %s / %s
"""
backportFooter = """
»»»
"""
# Implementation of clpatch/undo.
def clpatch_or_undo(ui, repo, clname, opts, mode):
if codereview_disabled:
return codereview_disabled
if mode == "undo" or mode == "backport":
# Find revision in Mercurial repository.
# Assume CL number is 7+ decimal digits.
# Otherwise is either change log sequence number (fewer decimal digits),
# hexadecimal hash, or tag name.
# Mercurial will fall over long before the change log
# sequence numbers get to be 7 digits long.
if re.match('^[0-9]{7,}$', clname):
found = False
for r in hg_log(ui, repo, keyword="codereview.appspot.com/"+clname, limit=100, template="{node}\n").split():
rev = repo[r]
# Last line with a code review URL is the actual review URL.
# Earlier ones might be part of the CL description.
n = rev2clname(rev)
if n == clname:
found = True
break
if not found:
return "cannot find CL %s in local repository" % clname
else:
rev = repo[clname]
if not rev:
return "unknown revision %s" % clname
clname = rev2clname(rev)
if clname == "":
return "cannot find CL name in revision description"
# Create fresh CL and start with patch that would reverse the change.
vers = hg_node.short(rev.node())
cl = CL("new")
desc = str(rev.description())
if mode == "undo":
cl.desc = (undoHeader % (clname, vers)) + desc + undoFooter
else:
cl.desc = (backportHeader % (releaseBranch, line1(desc), clname, vers)) + desc + undoFooter
v1 = vers
v0 = hg_node.short(rev.parents()[0].node())
if mode == "undo":
arg = v1 + ":" + v0
else:
vers = v0
arg = v0 + ":" + v1
patch = RunShell(["hg", "diff", "--git", "-r", arg])
else: # clpatch
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
if patch == emptydiff:
return "codereview issue %s has no diff" % clname
# find current hg version (hg identify)
ctx = repo[None]
parents = ctx.parents()
id = '+'.join([hg_node.short(p.node()) for p in parents])
# if version does not match the patch version,
# try to update the patch line numbers.
if vers != "" and id != vers:
# "vers in repo" gives the wrong answer
# on some versions of Mercurial. Instead, do the actual
# lookup and catch the exception.
try:
repo[vers].description()
except:
return "local repository is out of date; sync to get %s" % (vers)
patch1, err = portPatch(repo, patch, vers, id)
if err != "":
if not opts["ignore_hgapplydiff_failure"]:
return "codereview issue %s is out of date: %s (%s->%s)" % (clname, err, vers, id)
else:
patch = patch1
argv = ["hgapplydiff"]
if opts["no_incoming"] or mode == "backport":
argv += ["--checksync=false"]
try:
cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=sys.platform != "win32")
except:
return "hgapplydiff: " + ExceptionDetail() + "\nInstall hgapplydiff with:\n$ go get golang.org/x/codereview/cmd/hgapplydiff\n"
out, err = cmd.communicate(patch)
if cmd.returncode != 0 and not opts["ignore_hgapplydiff_failure"]:
return "hgapplydiff failed"
cl.local = True
cl.files = out.strip().split()
if not cl.files and not opts["ignore_hgapplydiff_failure"]:
return "codereview issue %s has no changed files" % clname
files = ChangedFiles(ui, repo, [])
extra = Sub(cl.files, files)
if extra:
ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
cl.Flush(ui, repo)
if mode == "undo":
err = EditCL(ui, repo, cl)
if err != "":
return "CL created, but error editing: " + err
cl.Flush(ui, repo)
else:
ui.write(cl.PendingText() + "\n")
# portPatch rewrites patch from being a patch against
# oldver to being a patch against newver.
def portPatch(repo, patch, oldver, newver):
lines = patch.splitlines(True) # True = keep \n
delta = None
for i in range(len(lines)):
line = lines[i]
if line.startswith('--- a/'):
file = line[6:-1]
delta = fileDeltas(repo, file, oldver, newver)
if not delta or not line.startswith('@@ '):
continue
# @@ -x,y +z,w @@ means the patch chunk replaces
# the original file's line numbers x up to x+y with the
# line numbers z up to z+w in the new file.
# Find the delta from x in the original to the same
# line in the current version and add that delta to both
# x and z.
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
return None, "error parsing patch line numbers"
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
d, err = lineDelta(delta, n1, len1)
if err != "":
return "", err
n1 += d
n2 += d
lines[i] = "@@ -%d,%d +%d,%d @@\n" % (n1, len1, n2, len2)
newpatch = ''.join(lines)
return newpatch, ""
# fileDelta returns the line number deltas for the given file's
# changes from oldver to newver.
# The deltas are a list of (n, len, newdelta) triples that say
# lines [n, n+len) were modified, and after that range the
# line numbers are +newdelta from what they were before.
def fileDeltas(repo, file, oldver, newver):
cmd = ["hg", "diff", "--git", "-r", oldver + ":" + newver, "path:" + file]
data = RunShell(cmd, silent_ok=True)
deltas = []
for line in data.splitlines():
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
continue
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
deltas.append((n1, len1, n2+len2-(n1+len1)))
return deltas
# lineDelta finds the appropriate line number delta to apply to the lines [n, n+len).
# It returns an error if those lines were rewritten by the patch.
def lineDelta(deltas, n, len):
d = 0
for (old, oldlen, newdelta) in deltas:
if old >= n+len:
break
if old+len > n:
return 0, "patch and recent changes conflict"
d = newdelta
return d, ""
@hgcommand
def download(ui, repo, clname, **opts):
"""download a change from the code review server
Download prints a description of the given change list
followed by its diff, downloaded from the code review server.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
ui.write(cl.EditorText() + "\n")
ui.write(patch + "\n")
return
#######################################################################
# hg file
@hgcommand
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
#######################################################################
# hg gofmt
@hgcommand
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
files = ChangedExistingFiles(ui, repo, pats, opts)
files = gofmt_required(files)
if not files:
ui.status("no modified go files\n")
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if subprocess.call(cmd + files) != 0:
raise hg_util.Abort("gofmt did not exit cleanly")
except hg_error.Abort, e:
raise
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
return
def gofmt_required(files):
return [f for f in files if (not f.startswith('test/') or f.startswith('test/bench/')) and f.endswith('.go')]
#######################################################################
# hg mail
@hgcommand
def mail(ui, repo, *pats, **opts):
"""mail a change for review
Uploads a patch to the code review server and then sends mail
to the reviewer and CC list asking for a review.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
cl, err = CommandLineCL(ui, repo, pats, opts, op="mail", defaultcc=defaultcc)
if err != "":
raise hg_util.Abort(err)
cl.Upload(ui, repo, gofmt_just_warn=True)
if not cl.reviewer:
# If no reviewer is listed, assign the review to defaultcc.
# This makes sure that it appears in the
# codereview.appspot.com/user/defaultcc
# page, so that it doesn't get dropped on the floor.
if not defaultcc or cl.private:
raise hg_util.Abort("no reviewers listed in CL")
cl.cc = Sub(cl.cc, defaultcc)
cl.reviewer = defaultcc
cl.Flush(ui, repo)
if cl.files == []:
raise hg_util.Abort("no changed files, not sending mail")
cl.Mail(ui, repo)
#######################################################################
# hg p / hg pq / hg ps / hg pending
@hgcommand
def ps(ui, repo, *pats, **opts):
"""alias for hg p --short
"""
opts['short'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pq(ui, repo, *pats, **opts):
"""alias for hg p --quick
"""
opts['quick'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pending(ui, repo, *pats, **opts):
"""show pending changes
Lists pending changes followed by a list of unassigned but modified files.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
quick = opts.get('quick', False)
short = opts.get('short', False)
m = LoadAllCL(ui, repo, web=not quick and not short)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
if short:
ui.write(name + "\t" + line1(cl.desc) + "\n")
else:
ui.write(cl.PendingText(quick=quick) + "\n")
if short:
return 0
files = DefaultFiles(ui, repo, [])
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
#######################################################################
# hg submit
def need_sync():
raise hg_util.Abort("local repository out of date; must sync before submit")
def branch_prefix(ui, repo):
prefix = ""
branch = repo[None].branch()
if workbranch(branch) and branch != "default":
prefix = "[" + branch + "] "
return prefix
@hgcommand
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
# We already called this on startup but sometimes Mercurial forgets.
set_mercurial_encoding_to_utf8()
if not opts["no_incoming"] and hg_incoming(ui, repo):
need_sync()
cl, err = CommandLineCL(ui, repo, pats, opts, op="submit", defaultcc=defaultcc)
if err != "":
raise hg_util.Abort(err)
user = None
if cl.copied_from:
user = cl.copied_from
userline = CheckContributor(ui, repo, user)
typecheck(userline, str)
about = ""
if not cl.lgtm and not opts.get('tbr') and needLGTM(cl):
raise hg_util.Abort("this CL has not been LGTM'ed")
if cl.lgtm:
about += "LGTM=" + JoinComma([CutDomain(who) for (who, line, approval) in cl.lgtm if approval]) + "\n"
reviewer = cl.reviewer
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
for name in tbr:
if name.startswith('golang-'):
raise hg_util.Abort("--tbr requires a person, not a mailing list")
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
if reviewer:
about += "R=" + JoinComma([CutDomain(s) for s in reviewer]) + "\n"
if cl.cc:
about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
if not cl.reviewer and needLGTM(cl):
raise hg_util.Abort("no reviewers listed in CL")
if not cl.local:
raise hg_util.Abort("cannot submit non-local CL")
# upload, to sync current patch and also get change number if CL is new.
if not cl.copied_from:
cl.Upload(ui, repo, gofmt_just_warn=True)
# check gofmt for real; allowed upload to warn in order to save CL.
cl.Flush(ui, repo)
CheckFormat(ui, repo, cl.files)
about += "%s%s\n" % (server_url_base, cl.name)
if cl.copied_from:
about += "\nCommitter: " + CheckContributor(ui, repo, None) + "\n"
typecheck(about, str)
if not cl.mailed and not cl.copied_from: # in case this is TBR
cl.Mail(ui, repo)
# submit changes locally
message = branch_prefix(ui, repo) + cl.desc.rstrip() + "\n\n" + about
typecheck(message, str)
set_status("pushing " + cl.name + " to remote server")
if hg_outgoing(ui, repo):
raise hg_util.Abort("local repository corrupt or out-of-phase with remote: found outgoing changes")
old_heads = len(hg_heads(ui, repo).split())
# Normally we commit listing the specific files in the CL.
# If there are no changed files other than those in the CL, however,
# let hg build the list, because then committing a merge works.
# (You cannot name files for a merge commit, even if you name
# all the files that would be committed by not naming any.)
files = ['path:'+f for f in cl.files]
if ChangedFiles(ui, repo, []) == cl.files:
files = []
global commit_okay
commit_okay = True
ret = hg_commit(ui, repo, *files, message=message, user=userline)
commit_okay = False
if ret:
raise hg_util.Abort("nothing changed")
node = repo["-1"].node()
# push to remote; if it fails for any reason, roll back
try:
new_heads = len(hg_heads(ui, repo).split())
if old_heads != new_heads and not (old_heads == 0 and new_heads == 1):
# Created new head, so we weren't up to date.
need_sync()
# Push changes to remote. If it works, we're committed. If not, roll back.
try:
if hg_push(ui, repo):
raise hg_util.Abort("push error")
except hg_error.Abort, e:
if e.message.find("push creates new heads") >= 0:
# Remote repository had changes we missed.
need_sync()
raise
except urllib2.HTTPError, e:
print >>sys.stderr, "pushing to remote server failed; do you have commit permissions?"
raise
except:
real_rollback()
raise
# We're committed. Upload final patch, close review, add commit message.
changeURL = hg_node.short(node)
url = ui.expandpath("default")
m = re.match("(^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/?)" + "|" +
"(^https?://([^@/]+@)?code\.google\.com/p/([^/.]+)(\.[^./]+)?/?)", url)
if m:
if m.group(1): # prj.googlecode.com/hg/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s" % (m.group(3), changeURL)
elif m.group(4) and m.group(7): # code.google.com/p/prj.subrepo/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s&repo=%s" % (m.group(6), changeURL, m.group(7)[1:])
elif m.group(4): # code.google.com/p/prj/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s" % (m.group(6), changeURL)
else:
print >>sys.stderr, "URL: ", url
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + message
# When posting, move reviewers to CC line,
# so that the issue stops showing up in their "My Issues" page.
PostMessage(ui, cl.name, pmsg, reviewers="", cc=JoinComma(cl.reviewer+cl.cc))
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
c = repo[None]
if c.branch() == releaseBranch and not c.modified() and not c.added() and not c.removed():
ui.write("switching from %s to default branch.\n" % releaseBranch)
err = hg_clean(repo, "default")
if err:
return err
return 0
def needLGTM(cl):
rev = cl.reviewer
isGobot = 'gobot' in rev or '<EMAIL>' in rev or '<EMAIL>' in rev
# A+C CLs generated by addca do not need LGTM
if cl.desc.startswith('A+C:') and 'Generated by a+c.' in cl.desc and isGobot:
return False
# CLs modifying only go1.x.txt do not need LGTM
if len(cl.files) == 1 and cl.files[0].startswith('doc/go1.') and cl.files[0].endswith('.txt'):
return False
# Other CLs need LGTM
return True
#######################################################################
# hg sync
@hgcommand
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
if not opts["local"]:
# If there are incoming CLs, pull -u will do the update.
# If there are no incoming CLs, do hg update to make sure
# that an update always happens regardless. This is less
# surprising than update depending on incoming CLs.
# It is important not to do both hg pull -u and hg update
# in the same command, because the hg update will end
# up marking resolve conflicts from the hg pull -u as resolved,
# causing files with <<< >>> markers to not show up in
# hg resolve -l. Yay Mercurial.
if hg_incoming(ui, repo):
err = hg_pull(ui, repo, update=True)
else:
err = hg_update(ui, repo)
if err:
return err
sync_changes(ui, repo)
def sync_changes(ui, repo):
# Look through recent change log descriptions to find
# potential references to http://.*/our-CL-number.
# Double-check them by looking at the Rietveld log.
for rev in hg_log(ui, repo, limit=100, template="{node}\n").split():
desc = repo[rev].description().strip()
for clname in re.findall('(?m)^https?://(?:[^\n]+)/([0-9]+)$', desc):
if IsLocalCL(ui, repo, clname) and IsRietveldSubmitted(ui, clname, repo[rev].hex()):
ui.warn("CL %s submitted as %s; closing\n" % (clname, repo[rev]))
cl, err = LoadCL(ui, repo, clname, web=False)
if err != "":
ui.warn("loading CL %s: %s\n" % (clname, err))
continue
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
# Remove files that are not modified from the CLs in which they appear.
all = LoadAllCL(ui, repo, web=False)
changed = ChangedFiles(ui, repo, [])
for cl in all.values():
extra = Sub(cl.files, changed)
if extra:
ui.warn("Removing unmodified files from CL %s:\n" % (cl.name,))
for f in extra:
ui.warn("\t%s\n" % (f,))
cl.files = Sub(cl.files, extra)
cl.Flush(ui, repo)
if not cl.files:
if not cl.copied_from:
ui.warn("CL %s has no files; delete (abandon) with hg change -d %s\n" % (cl.name, cl.name))
else:
ui.warn("CL %s has no files; delete locally with hg change -D %s\n" % (cl.name, cl.name))
return 0
#######################################################################
# hg upload
@hgcommand
def upload(ui, repo, name, **opts):
"""upload diffs to the code review server
Uploads the current modifications for a given change to the server.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
raise hg_util.Abort(err)
if not cl.local:
raise hg_util.Abort("cannot upload non-local change")
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return 0
#######################################################################
# Table of commands, supplied to Mercurial for installation.
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('D', 'deletelocal', None, 'delete locally, but do not change CL on server'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
('p', 'pending', None, 'print pending summary to standard output'),
],
"[-d | -D] [-i] [-o] change# or FILE ..."
),
"^clpatch": (
clpatch,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# Would prefer to call this codereview-login, but then
# hg help codereview prints the help for this command
# instead of the help for the extension.
"code-login": (
code_login,
[],
"",
),
"^download": (
download,
[],
"change#"
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^gofmt": (
gofmt,
[
('l', 'list', None, 'list files that would change, but do not edit them'),
],
"FILE ..."
),
"^pending|p": (
pending,
[
('s', 'short', False, 'show short result form'),
('', 'quick', False, 'do not consult codereview server'),
],
"[FILE ...]"
),
"^ps": (
ps,
[],
"[FILE ...]"
),
"^pq": (
pq,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + hg_commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^release-apply": (
release_apply,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# TODO: release-start, release-tag, weekly-tag
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
] + hg_commands.walkopts + hg_commands.commitopts + hg_commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[
('', 'local', None, 'do not pull changes from remote repository')
],
"[--local]",
),
"^undo": (
undo,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Mercurial extension initialization
def norollback(*pats, **opts):
"""(disabled when using this extension)"""
raise hg_util.Abort("codereview extension enabled; use undo instead of rollback")
codereview_init = False
def uisetup(ui):
global testing
testing = ui.config("codereview", "testing")
# Disable the Mercurial commands that might change the repository.
# Only commands in this extension are supposed to do that.
ui.setconfig("hooks", "pre-commit.codereview", precommithook) # runs before 'hg commit'
ui.setconfig("hooks", "precommit.codereview", precommithook) # catches all cases
def reposetup(ui, repo):
global codereview_disabled
global defaultcc
# reposetup gets called both for the local repository
# and also for any repository we are pulling or pushing to.
# Only initialize the first time.
global codereview_init
if codereview_init:
return
codereview_init = True
start_status_thread()
# Read repository-specific options from lib/codereview/codereview.cfg or codereview.cfg.
root = ''
try:
root = repo.root
except:
# Yes, repo might not have root; see issue 959.
codereview_disabled = 'codereview disabled: repository has no root'
return
repo_config_path = ''
p1 = root + '/lib/codereview/codereview.cfg'
p2 = root + '/codereview.cfg'
if os.access(p1, os.F_OK):
repo_config_path = p1
else:
repo_config_path = p2
try:
f = open(repo_config_path)
for line in f:
if line.startswith('defaultcc:'):
defaultcc = SplitCommaSpace(line[len('defaultcc:'):])
if line.startswith('contributors:'):
global contributorsURL
contributorsURL = line[len('contributors:'):].strip()
except:
codereview_disabled = 'codereview disabled: cannot open ' + repo_config_path
return
remote = ui.config("paths", "default", "")
if remote.find("://") < 0 and not testing:
raise hg_util.Abort("codereview: default path '%s' is not a URL" % (remote,))
InstallMatch(ui, repo)
RietveldSetup(ui, repo)
# Rollback removes an existing commit. Don't do that either.
global real_rollback
real_rollback = repo.rollback
repo.rollback = norollback
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
from HTMLParser import HTMLParser
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data
def JSONGet(ui, path):
try:
data = MySend(path, force_auth=False)
typecheck(data, str)
d = fix_json(json.loads(data))
except:
ui.warn("JSONGet %s: %s\n" % (path, ExceptionDetail()))
return None
return d
# Clean up json parser output to match our expectations:
# * all strings are UTF-8-encoded str, not unicode.
# * missing fields are missing, not None,
# so that d.get("foo", defaultvalue) works.
def fix_json(x):
if type(x) in [str, int, float, bool, type(None)]:
pass
elif type(x) is unicode:
x = x.encode("utf-8")
elif type(x) is list:
for i in range(len(x)):
x[i] = fix_json(x[i])
elif type(x) is dict:
todel = []
for k in x:
if x[k] is None:
todel.append(k)
else:
x[k] = fix_json(x[k])
for k in todel:
del x[k]
else:
raise hg_util.Abort("unknown type " + str(type(x)) + " in fix_json")
if type(x) is str:
x = x.replace('\r\n', '\n')
return x
def IsRietveldSubmitted(ui, clname, hex):
dict = JSONGet(ui, "/api/" + clname + "?messages=true")
if dict is None:
return False
for msg in dict.get("messages", []):
text = msg.get("text", "")
regex = '\*\*\* Submitted as [^*]*?r=([0-9a-f]+)[^ ]* \*\*\*'
if testing:
regex = '\*\*\* Submitted as ([0-9a-f]+) \*\*\*'
m = re.match(regex, text)
if m is not None and len(m.group(1)) >= 8 and hex.startswith(m.group(1)):
return True
return False
def IsRietveldMailed(cl):
for msg in cl.dict.get("messages", []):
if msg.get("text", "").find("I'd like you to review this change") >= 0:
return True
return False
def DownloadCL(ui, repo, clname):
set_status("downloading CL " + clname)
cl, err = LoadCL(ui, repo, clname, web=True)
if err != "":
return None, None, None, "error loading CL %s: %s" % (clname, err)
# Find most recent diff
diffs = cl.dict.get("patchsets", [])
if not diffs:
return None, None, None, "CL has no patch sets"
patchid = diffs[-1]
patchset = JSONGet(ui, "/api/" + clname + "/" + str(patchid))
if patchset is None:
return None, None, None, "error loading CL patchset %s/%d" % (clname, patchid)
if patchset.get("patchset", 0) != patchid:
return None, None, None, "malformed patchset information"
vers = ""
msg = patchset.get("message", "").split()
if len(msg) >= 3 and msg[0] == "diff" and msg[1] == "-r":
vers = msg[2]
diff = "/download/issue" + clname + "_" + str(patchid) + ".diff"
diffdata = MySend(diff, force_auth=False)
# Print warning if email is not in CONTRIBUTORS file.
email = cl.dict.get("owner_email", "")
if not email:
return None, None, None, "cannot find owner for %s" % (clname)
him = FindContributor(ui, repo, email)
me = FindContributor(ui, repo, None)
if him == me:
cl.mailed = IsRietveldMailed(cl)
else:
cl.copied_from = email
return cl, vers, diffdata, ""
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500: # only retry on HTTP 500 error
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend1(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated and force_auth:
self._Authenticate()
if request_path is None:
return
if timeout is None:
timeout = 30 # seconds
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "https://%s%s" % (self.host, request_path)
if testing:
url = url.replace("https://", "http://")
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
# Translate \r\n into \n, because Rietveld doesn't.
response = response.replace('\r\n', '\n')
# who knows what urllib will give us
if type(response) == unicode:
response = response.encode("utf-8")
typecheck(response, str)
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(ustr(MySend(url))) # f.feed wants unicode
f.close()
# convert back to utf-8 to restore sanity
m = {}
for k,v in f.map.items():
m[k.encode("utf-8")] = v.replace("\r\n", "\n").encode("utf-8")
return m
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=False, private=False):
set_status("uploading change to description")
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed:
form_fields['closed'] = "checked"
if private:
form_fields['private'] = "checked"
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(ui, issue, message, reviewers=None, cc=None, send_mail=True, subject=None):
set_status("uploading message")
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail:
form_fields['send_mail'] = "checked"
else:
del form_fields['send_mail']
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1' # Don't include draft comments
if reviewers is not None or cc is not None:
form_fields['message_only'] = '' # Must set '' in order to override cc/reviewer
ctype = "applications/x-www-form-urlencoded"
body = urllib.urlencode(form_fields)
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def RietveldSetup(ui, repo):
global force_google_account
global rpc
global server
global server_url_base
global upload_options
global verbosity
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
server_url_base = "https://" + server + "/"
if testing:
server_url_base = server_url_base.replace("https://", "http://")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = None
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "<EMAIL>"
rpc = None
global releaseBranch
tags = repo.branchmap().keys()
if 'release-branch.go10' in tags:
# NOTE(rsc): This tags.sort is going to get the wrong
# answer when comparing release-branch.go9 with
# release-branch.go10. It will be a while before we care.
raise hg_util.Abort('tags.sort needs to be fixed for release-branch.go10')
tags.sort()
for t in tags:
if t.startswith('release-branch.go'):
releaseBranch = t
def workbranch(name):
return name == "default" or name.startswith('dev.') or name == 'release-branch.go1.4'
#######################################################################
# http://codereview.appspot.com/static/upload.py, heavily edited.
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = [
'application/javascript',
'application/x-javascript',
'application/x-freemind'
]
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
# .reason is now a read-only property based on .msg
# this means we ignore 'msg', but that seems to work fine.
self.msg = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The <PASSWORD>
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=") for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
reqUrl = "https://%s/_ah/login?%s" % (self.host, urllib.urlencode(args))
if testing:
reqUrl = reqUrl.replace("https://", "http://")
req = self._CreateRequest(reqUrl)
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.msg == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.msg == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.msg == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.msg == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.msg == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.msg == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.msg == "ServiceDisabled":
print >>sys.stderr, "The user's access to the service has been disabled."
break
if e.msg == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "https://%s%s" % (self.host, request_path)
if testing:
url = url.replace("https://", "http://")
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Disable status prints so they don't obscure the password prompt.
global global_status
st = global_status
global_status = None
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
# Put status back.
global_status = st
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "<EMAIL>"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host, save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True, env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines, env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
set_status("uploading " + filename)
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [
("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)])
response_body = rpc_server.Send(url, body, content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
# Don't want to spawn too many threads, nor do we want to
# hit Rietveld too hard, or it will start serving 500 errors.
# When 8 works, it's no better than 4, and sometimes 8 is
# too many for Rietveld to handle.
MAX_PARALLEL_UPLOADS = 4
sema = threading.BoundedSemaphore(MAX_PARALLEL_UPLOADS)
upload_threads = []
finished_upload_threads = []
class UploadFileThread(threading.Thread):
def __init__(self, args):
threading.Thread.__init__(self)
self.args = args
def run(self):
UploadFile(*self.args)
finished_upload_threads.append(self)
sema.release()
def StartUploadFile(*args):
sema.acquire()
while len(finished_upload_threads) > 0:
t = finished_upload_threads.pop()
upload_threads.remove(t)
t.join()
t = UploadFileThread(args)
upload_threads.append(t)
t.start()
def WaitForUploads():
for t in upload_threads:
t.join()
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
StartUploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
StartUploadFile(filename, file_id, new_content, is_binary, status, False)
WaitForUploads()
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class FakeMercurialUI(object):
def __init__(self):
self.quiet = True
self.output = ''
self.debugflag = False
def write(self, *args, **opts):
self.output += ' '.join(args)
def copy(self):
return self
def status(self, *args, **opts):
pass
def formatter(self, topic, opts):
from mercurial.formatter import plainformatter
return plainformatter(self, topic, opts)
def readconfig(self, *args, **opts):
pass
def expandpath(self, *args, **opts):
return global_ui.expandpath(*args, **opts)
def configitems(self, *args, **opts):
return global_ui.configitems(*args, **opts)
def config(self, *args, **opts):
return global_ui.config(*args, **opts)
use_hg_shell = False # set to True to shell out to hg always; slower
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, ui, repo):
super(MercurialVCS, self).__init__(options)
self.ui = ui
self.repo = repo
self.status = None
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo.root)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
mqparent, err = RunShellWithReturnCode(['hg', 'log', '--rev', 'qparent', '--template={node}'])
if not err and mqparent != "":
self.base_rev = mqparent
else:
out = RunShell(["hg", "parents", "-q", "--template={node} {branch}"], silent_ok=True).strip()
if not out:
# No revisions; use 0 to mean a repository with nothing.
out = "0:0 default"
# Find parent along current branch.
branch = repo[None].branch()
base = ""
for line in out.splitlines():
fields = line.strip().split(' ')
if fields[1] == branch:
base = fields[0]
break
if base == "":
# Use the first parent
base = out.strip().split(' ')[0]
self.base_rev = base
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def get_hg_status(self, rev, path):
# We'd like to use 'hg status -C path', but that is buggy
# (see http://mercurial.selenic.com/bts/issue3023).
# Instead, run 'hg status -C' without a path
# and skim the output for the path we want.
if self.status is None:
if use_hg_shell:
out = RunShell(["hg", "status", "-C", "--rev", rev])
else:
fui = FakeMercurialUI()
ret = hg_commands.status(fui, self.repo, *[], **{'rev': [rev], 'copies': True})
if ret:
raise hg_util.Abort(ret)
out = fui.output
self.status = out.splitlines()
for i in range(len(self.status)):
# line is
# A path
# M path
# etc
line = to_slash(self.status[i])
if line[2:] == path:
if i+1 < len(self.status) and self.status[i+1][:2] == ' ':
return self.status[i:i+2]
return self.status[i:i+1]
raise hg_util.Abort("no status for " + path)
def GetBaseFile(self, filename):
set_status("inspecting " + filename)
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
out = self.get_hg_status(self.base_rev, relpath)
status, what = out[0].split(' ', 1)
if len(out) > 1 and status == "A" and what == relpath:
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
if use_hg_shell:
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath], silent_ok=True)
else:
base_content = str(self.repo[base_rev][oldrelpath].data())
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content and use_hg_shell:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = to_slash(temp_filename.strip())
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
set_status("uploading patch for " + patch[0])
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
|
moto/organizations/urls.py | gtourkas/moto | 5,460 | 11196575 | from .responses import OrganizationsResponse
url_bases = [r"https?://organizations\.(.+)\.amazonaws\.com"]
url_paths = {"{0}/$": OrganizationsResponse.dispatch}
|
unit6/spiders/p5_downloader_middleware_handson/p5_downloader_middleware_handson/middlewares.py | nulearn3296/scrapy-training | 182 | 11196580 | <gh_stars>100-1000
from scrapy import signals
from scrapy.http import HtmlResponse
from scrapy.exceptions import NotConfigured
from selenium import webdriver
class SeleniumDownloaderMiddleware(object):
def __init__(self):
self.driver = webdriver.PhantomJS()
@classmethod
def from_crawler(cls, crawler):
m = cls()
if not crawler.settings.getbool('SELENIUM_ENABLED'):
raise NotConfigured()
crawler.signals.connect(m.spider_closed, signal=signals.spider_closed)
return m
def process_request(self, request, spider):
if request.meta.get('nojs'):
# disable js rendering in a per-request basis
return
self.driver.get(request.url)
content = self.driver.page_source
return HtmlResponse(request.url, body=content, encoding='utf-8')
def spider_closed(self, spider):
self.driver.close()
|
fawkes/utils.py | rohankumardubey/fawkes | 4,667 | 11196583 | <gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-05-17
# @Author : <NAME> (<EMAIL>)
# @Link : https://www.shawnshan.com/
import errno
import glob
import gzip
import hashlib
import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import PIL
import pkg_resources
import six
from keras.utils import Progbar
from six.moves.urllib.error import HTTPError, URLError
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import keras
sys.stderr = stderr
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from fawkes.align_face import align
from six.moves.urllib.request import urlopen
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def clip_img(X, preprocessing='raw'):
X = reverse_preprocess(X, preprocessing)
X = np.clip(X, 0.0, 255.0)
X = preprocess(X, preprocessing)
return X
IMG_SIZE = 112
PREPROCESS = 'raw'
def load_image(path):
try:
img = Image.open(path)
except PIL.UnidentifiedImageError:
return None
except IsADirectoryError:
return None
try:
info = img._getexif()
except OSError:
return None
if info is not None:
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
exif = dict(img._getexif().items())
if orientation in exif.keys():
if exif[orientation] == 3:
img = img.rotate(180, expand=True)
elif exif[orientation] == 6:
img = img.rotate(270, expand=True)
elif exif[orientation] == 8:
img = img.rotate(90, expand=True)
else:
pass
img = img.convert('RGB')
image_array = image.img_to_array(img)
return image_array
def filter_image_paths(image_paths):
print("Identify {} files in the directory".format(len(image_paths)))
new_image_paths = []
new_images = []
for p in image_paths:
img = load_image(p)
if img is None:
print("{} is not an image file, skipped".format(p.split("/")[-1]))
continue
new_image_paths.append(p)
new_images.append(img)
print("Identify {} images in the directory".format(len(new_image_paths)))
return new_image_paths, new_images
class Faces(object):
def __init__(self, image_paths, loaded_images, aligner, verbose=1, eval_local=False, preprocessing=True,
no_align=False):
self.image_paths = image_paths
self.verbose = verbose
self.no_align = no_align
self.aligner = aligner
self.margin = 30
self.org_faces = []
self.cropped_faces = []
self.cropped_faces_shape = []
self.cropped_index = []
self.start_end_ls = []
self.callback_idx = []
self.images_without_face = []
for i in range(0, len(loaded_images)):
cur_img = loaded_images[i]
p = image_paths[i]
self.org_faces.append(cur_img)
if not no_align:
align_img = align(cur_img, self.aligner)
if align_img is None:
print("Find 0 face(s) in {}".format(p.split("/")[-1]))
self.images_without_face.append(i)
continue
cur_faces = align_img[0]
else:
cur_faces = [cur_img]
cur_faces = [face for face in cur_faces if face.shape[0] != 0 and face.shape[1] != 0]
cur_shapes = [f.shape[:-1] for f in cur_faces]
cur_faces_square = []
if verbose and not no_align:
print("Find {} face(s) in {}".format(len(cur_faces), p.split("/")[-1]))
if eval_local:
cur_faces = cur_faces[:1]
for img in cur_faces:
if eval_local:
base = resize(img, (IMG_SIZE, IMG_SIZE))
else:
long_size = max([img.shape[1], img.shape[0]]) + self.margin
base = np.ones((long_size, long_size, 3)) * np.mean(img, axis=(0, 1))
start1, end1 = get_ends(long_size, img.shape[0])
start2, end2 = get_ends(long_size, img.shape[1])
base[start1:end1, start2:end2, :] = img
cur_start_end = (start1, end1, start2, end2)
self.start_end_ls.append(cur_start_end)
cur_faces_square.append(base)
cur_faces_square = [resize(f, (IMG_SIZE, IMG_SIZE)) for f in cur_faces_square]
self.cropped_faces.extend(cur_faces_square)
if not self.no_align:
cur_index = align_img[1]
self.cropped_faces_shape.extend(cur_shapes)
self.cropped_index.extend(cur_index[:len(cur_faces_square)])
self.callback_idx.extend([i] * len(cur_faces_square))
if len(self.cropped_faces) == 0:
return
self.cropped_faces = np.array(self.cropped_faces)
if preprocessing:
self.cropped_faces = preprocess(self.cropped_faces, PREPROCESS)
self.cloaked_cropped_faces = None
self.cloaked_faces = np.copy(self.org_faces)
def get_faces(self):
return self.cropped_faces
def merge_faces(self, protected_images, original_images):
if self.no_align:
return np.clip(protected_images, 0.0, 255.0), self.images_without_face
self.cloaked_faces = np.copy(self.org_faces)
for i in range(len(self.cropped_faces)):
cur_protected = protected_images[i]
cur_original = original_images[i]
org_shape = self.cropped_faces_shape[i]
old_square_shape = max([org_shape[0], org_shape[1]]) + self.margin
cur_protected = resize(cur_protected, (old_square_shape, old_square_shape))
cur_original = resize(cur_original, (old_square_shape, old_square_shape))
start1, end1, start2, end2 = self.start_end_ls[i]
reshape_cloak = cur_protected - cur_original
reshape_cloak = reshape_cloak[start1:end1, start2:end2, :]
callback_id = self.callback_idx[i]
bb = self.cropped_index[i]
self.cloaked_faces[callback_id][bb[0]:bb[2], bb[1]:bb[3], :] += reshape_cloak
for i in range(0, len(self.cloaked_faces)):
self.cloaked_faces[i] = np.clip(self.cloaked_faces[i], 0.0, 255.0)
return self.cloaked_faces, self.images_without_face
def get_ends(longsize, window):
start = (longsize - window) // 2
end = start + window
return start, end
def dump_dictionary_as_json(dict, outfile):
j = json.dumps(dict)
with open(outfile, "wb") as f:
f.write(j.encode())
def load_victim_model(number_classes, teacher_model=None, end2end=False):
for l in teacher_model.layers:
l.trainable = end2end
x = teacher_model.layers[-1].output
x = Dense(number_classes)(x)
x = Activation('softmax', name="act")(x)
model = Model(teacher_model.input, x)
opt = keras.optimizers.Adadelta()
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def resize(img, sz):
assert np.min(img) >= 0 and np.max(img) <= 255.0
from keras.preprocessing import image
im_data = image.array_to_img(img).resize((sz[1], sz[0]))
im_data = image.img_to_array(im_data)
return im_data
def init_gpu(gpu):
''' code to initialize gpu in tf2'''
if isinstance(gpu, list):
gpu_num = ','.join([str(i) for i in gpu])
else:
gpu_num = str(gpu)
if "CUDA_VISIBLE_DEVICES" in os.environ:
print('GPU already initiated')
return
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_num
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
except RuntimeError as e:
print(e)
def fix_gpu_memory(mem_fraction=1):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf_config = None
if tf.test.is_gpu_available():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_fraction)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
tf_config.gpu_options.allow_growth = True
tf_config.log_device_placement = False
init_op = tf.global_variables_initializer()
sess = tf.Session(config=tf_config)
sess.run(init_op)
K.set_session(sess)
return sess
def preprocess(X, method):
assert method in {'raw', 'imagenet', 'inception', 'mnist'}
if method == 'raw':
pass
elif method == 'imagenet':
X = imagenet_preprocessing(X)
else:
raise Exception('unknown method %s' % method)
return X
def reverse_preprocess(X, method):
assert method in {'raw', 'imagenet', 'inception', 'mnist'}
if method == 'raw':
pass
elif method == 'imagenet':
X = imagenet_reverse_preprocessing(X)
else:
raise Exception('unknown method %s' % method)
return X
def imagenet_preprocessing(x, data_format=None):
if data_format is None:
data_format = K.image_data_format()
assert data_format in ('channels_last', 'channels_first')
x = np.array(x)
if data_format == 'channels_first':
# 'RGB'->'BGR'
if x.ndim == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
# Zero-center by mean pixel
if data_format == 'channels_first':
if x.ndim == 3:
x[0, :, :] -= mean[0]
x[1, :, :] -= mean[1]
x[2, :, :] -= mean[2]
if std is not None:
x[0, :, :] /= std[0]
x[1, :, :] /= std[1]
x[2, :, :] /= std[2]
else:
x[:, 0, :, :] -= mean[0]
x[:, 1, :, :] -= mean[1]
x[:, 2, :, :] -= mean[2]
if std is not None:
x[:, 0, :, :] /= std[0]
x[:, 1, :, :] /= std[1]
x[:, 2, :, :] /= std[2]
else:
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
if std is not None:
x[..., 0] /= std[0]
x[..., 1] /= std[1]
x[..., 2] /= std[2]
return x
def imagenet_reverse_preprocessing(x, data_format=None):
import keras.backend as K
x = np.array(x)
if data_format is None:
data_format = K.image_data_format()
assert data_format in ('channels_last', 'channels_first')
if data_format == 'channels_first':
if x.ndim == 3:
# Zero-center by mean pixel
x[0, :, :] += 103.939
x[1, :, :] += 116.779
x[2, :, :] += 123.68
# 'BGR'->'RGB'
x = x[::-1, :, :]
else:
x[:, 0, :, :] += 103.939
x[:, 1, :, :] += 116.779
x[:, 2, :, :] += 123.68
x = x[:, ::-1, :, :]
else:
# Zero-center by mean pixel
x[..., 0] += 103.939
x[..., 1] += 116.779
x[..., 2] += 123.68
# 'BGR'->'RGB'
x = x[..., ::-1]
return x
def reverse_process_cloaked(x, preprocess='imagenet'):
# x = clip_img(x, preprocess)
return reverse_preprocess(x, preprocess)
def build_bottleneck_model(model, cut_off):
bottleneck_model = Model(model.input, model.get_layer(cut_off).output)
bottleneck_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return bottleneck_model
def load_extractor(name):
hash_map = {"extractor_2": "ce703d481db2b83513bbdafa27434703",
"extractor_0": "94854151fd9077997d69ceda107f9c6b"}
assert name in ["extractor_2", 'extractor_0']
model_file = pkg_resources.resource_filename("fawkes", "model/{}.h5".format(name))
cur_hash = hash_map[name]
model_dir = pkg_resources.resource_filename("fawkes", "model/")
os.makedirs(model_dir, exist_ok=True)
get_file("{}.h5".format(name), "http://mirror.cs.uchicago.edu/fawkes/files/{}.h5".format(name),
cache_dir=model_dir, cache_subdir='', md5_hash=cur_hash)
model = keras.models.load_model(model_file)
model = Extractor(model)
return model
class Extractor(object):
def __init__(self, model):
self.model = model
def predict(self, imgs):
imgs = imgs / 255.0
embeds = l2_norm(self.model(imgs))
return embeds
def __call__(self, x):
return self.predict(x)
def get_dataset_path(dataset):
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
if not os.path.exists(os.path.join(model_dir, "config.json")):
raise Exception("Please config the datasets before running protection code. See more in README and config.py.")
config = json.load(open(os.path.join(model_dir, "config.json"), 'r'))
if dataset not in config:
raise Exception(
"Dataset {} does not exist, please download to data/ and add the path to this function... Abort".format(
dataset))
return config[dataset]['train_dir'], config[dataset]['test_dir'], config[dataset]['num_classes'], config[dataset][
'num_images']
def dump_image(x, filename, format="png", scale=False):
img = image.array_to_img(x, scale=scale)
img.save(filename, format)
return
def load_embeddings(feature_extractors_names):
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
for extractor_name in feature_extractors_names:
fp = gzip.open(os.path.join(model_dir, "{}_emb.p.gz".format(extractor_name)), 'rb')
path2emb = pickle.load(fp)
fp.close()
return path2emb
def extractor_ls_predict(feature_extractors_ls, X):
feature_ls = []
for extractor in feature_extractors_ls:
cur_features = extractor.predict(X)
feature_ls.append(cur_features)
concated_feature_ls = np.concatenate(feature_ls, axis=1)
return concated_feature_ls
def pairwise_l2_distance(A, B):
BT = B.transpose()
vecProd = np.dot(A, BT)
SqA = A ** 2
sumSqA = np.matrix(np.sum(SqA, axis=1))
sumSqAEx = np.tile(sumSqA.transpose(), (1, vecProd.shape[1]))
SqB = B ** 2
sumSqB = np.sum(SqB, axis=1)
sumSqBEx = np.tile(sumSqB, (vecProd.shape[0], 1))
SqED = sumSqBEx + sumSqAEx - 2 * vecProd
SqED[SqED < 0] = 0.0
ED = np.sqrt(SqED)
return ED
def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, metric='l2'):
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
original_feature_x = extractor_ls_predict(feature_extractors_ls, imgs)
path2emb = load_embeddings(feature_extractors_names)
items = list([(k, v) for k, v in path2emb.items()])
paths = [p[0] for p in items]
embs = [p[1] for p in items]
embs = np.array(embs)
pair_dist = pairwise_l2_distance(original_feature_x, embs)
pair_dist = np.array(pair_dist)
max_sum = np.min(pair_dist, axis=0)
max_id_ls = np.argsort(max_sum)[::-1]
max_id = random.choice(max_id_ls[:20])
target_data_id = paths[int(max_id)]
print("target ID: {}".format(target_data_id))
image_dir = os.path.join(model_dir, "target_data/{}".format(target_data_id))
os.makedirs(os.path.join(model_dir, "target_data"), exist_ok=True)
os.makedirs(image_dir, exist_ok=True)
for i in range(10):
if os.path.exists(os.path.join(model_dir, "target_data/{}/{}.jpg".format(target_data_id, i))):
continue
try:
get_file("{}.jpg".format(i),
"http://mirror.cs.uchicago.edu/fawkes/files/target_data/{}/{}.jpg".format(target_data_id, i),
cache_dir=model_dir, cache_subdir='target_data/{}/'.format(target_data_id))
except Exception:
pass
image_paths = glob.glob(image_dir + "/*.jpg")
target_images = [image.img_to_array(image.load_img(cur_path)) for cur_path in
image_paths]
target_images = np.array([resize(x, (IMG_SIZE, IMG_SIZE)) for x in target_images])
target_images = preprocess(target_images, PREPROCESS)
target_images = list(target_images)
while len(target_images) < len(imgs):
target_images += target_images
target_images = random.sample(target_images, len(imgs))
return np.array(target_images)
def l2_norm(x, axis=1):
"""l2 norm"""
norm = tf.norm(x, axis=axis, keepdims=True)
output = x / norm
return output
""" TensorFlow implementation get_file
https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/keras/utils/data_utils.py#L168-L297
"""
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
# fname = path_to_string(fname)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _extract_archive(file_path, path='.', archive_format='auto'):
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def _makedirs_exist_ok(datadir):
if six.PY2:
# Python 2 doesn't have the exist_ok arg, so we try-except here.
try:
os.makedirs(datadir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Arguments:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
_hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Arguments:
fpath: path to the file being validated
algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`.
The default `'auto'` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
|
src/tests/helpers/test_i18n.py | fabm3n/pretix | 1,248 | 11196586 | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django.utils.translation import get_language
from pretix.base.i18n import get_language_without_region, language
from pretix.helpers.i18n import get_javascript_format, get_moment_locale
def test_js_formats():
with language('de'):
assert get_javascript_format('DATE_INPUT_FORMATS') == 'DD.MM.YYYY'
with language('en'):
assert get_javascript_format('DATE_INPUT_FORMATS') == 'YYYY-MM-DD'
with language('en-US'):
assert get_javascript_format('DATE_INPUT_FORMATS') == 'MM/DD/YYYY'
def test_get_locale():
assert get_moment_locale('af') == 'af'
assert get_moment_locale('de_Informal') == 'de'
assert get_moment_locale('de-US') == 'de'
assert get_moment_locale('en-US') == 'en'
assert get_moment_locale('en-CA') == 'en-ca'
def test_set_region():
with language('de'):
assert get_language() == 'de'
assert get_language_without_region() == 'de'
with language('de', 'US'):
assert get_language() == 'de-us'
assert get_language_without_region() == 'de'
with language('de', 'DE'):
assert get_language() == 'de-de'
assert get_language_without_region() == 'de'
with language('de-informal', 'DE'):
assert get_language() == 'de-informal'
assert get_language_without_region() == 'de-informal'
with language('pt', 'PT'):
assert get_language() == 'pt-pt'
assert get_language_without_region() == 'pt-pt'
with language('pt-pt', 'BR'):
assert get_language() == 'pt-pt'
assert get_language_without_region() == 'pt-pt'
|
tests/integration/by_text_test.py | pupsikpic/selene | 572 | 11196608 | # MIT License
#
# Copyright (c) 2015-2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from selene import by
from tests.integration.helpers.givenpage import GivenPage
def test_nested_elements_search(session_browser):
page = GivenPage(session_browser.driver)
page.opened_with_body(
'''
<div id="container">
<div>
<div>
<label>First</label>
</div>
<div>
<a href="#first">go to Heading 1</a>
</div>
</div>
<div>
<div>
<label>Second</label>
<div>
<a href="#second">go to Heading 2</a>
<a href="#third">go to Heading 3</a>
</div>
</div>
<div>
</div>
</div>
</div>
<h1 id="first">Heading 1</h2>
<h2 id="second">Heading 2</h2>
<h2 id="third">Heading 3</h2>
'''
)
session_browser.element('#container').element(by.text('Second')).element(
'./following-sibling::*'
).element(by.partial_text('Heading 3')).click()
assert "third" in session_browser.driver.current_url
|
modules/dbnd/src/dbnd/_core/tracking/python_tracking.py | busunkim96/dbnd | 224 | 11196610 | import logging
import sys
from types import FunctionType, ModuleType
from dbnd import task
from dbnd._core.tracking.no_tracking import should_not_track
logger = logging.getLogger(__name__)
def _is_function(obj):
return isinstance(obj, FunctionType)
def _is_task(obj):
"""
checks if obj is decorated func (dbnd generated object)
"""
return hasattr(obj, "__is_dbnd_task__")
def _track_function(function):
if not _is_function(function) or should_not_track(function) or _is_task(function):
return
decorated_function = task(function)
# We modify all modules since each module has its own pointers to local and imported functions.
# If a module has already imported the function we need to change the pointer in that module.
for module in sys.modules.copy().values():
if not _is_module(module):
continue
for k, v in module.__dict__.items():
if v is function:
module.__dict__[k] = decorated_function
def track_functions(*args):
""" Track functions by decorating them with @task """
for arg in args:
try:
_track_function(arg)
except Exception:
logger.exception("Failed to track %s" % arg)
def _is_module(obj):
return isinstance(obj, ModuleType)
def track_module_functions(module):
"""
Track functions inside module by decorating them with @task.
Only functions implemented in module will be tracked, imported functions won't be tracked.
"""
try:
if not _is_module(module):
return
module_objects = module.__dict__.values()
module_functions = [i for i in module_objects if _is_module_function(i, module)]
track_functions(*module_functions)
except Exception:
logger.exception("Failed to track %s" % module)
def track_modules(*args):
"""
Track functions inside modules by decorating them with @task.
Only functions implemented in module will be tracked, imported functions won't be tracked.
"""
for arg in args:
try:
track_module_functions(arg)
except Exception:
logger.exception("Failed to track %s" % arg)
def _is_module_function(function, module):
try:
if not _is_function(function):
return False
if not hasattr(function, "__globals__"):
return False
return function.__globals__ is module.__dict__
except Exception:
logger.exception("Failed to track %s" % function)
return False
|
lib/__init__.py | hadisfr/ExportHTML | 118 | 11196616 | """ExportHtml Lib."""
|
qiskit/chemistry/core/chemistry_operator.py | stefan-woerner/aqua | 504 | 11196622 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
This module contains the definition of a base class for a chemistry operator.
Such an operator takes a QMolecule and produces an input for
a quantum algorithm
"""
from abc import ABC, abstractmethod
import warnings
import logging
from typing import Dict, Union, List, Tuple, Optional, cast
import numpy as np
from qiskit.aqua.algorithms import MinimumEigensolverResult, EigensolverResult, AlgorithmResult
from qiskit.chemistry import QMolecule
logger = logging.getLogger(__name__)
# A dipole moment, when present as X, Y and Z components will normally have float values for all
# the components. However when using Z2Symmetries, if the dipole component operator does not
# commute with the symmetry then no evaluation is done and None will be used as the 'value'
# indicating no measurement of the observable took place
DipoleTuple = Tuple[Optional[float], Optional[float], Optional[float]]
class ChemistryOperator(ABC):
"""
Base class for ChemistryOperator.
"""
INFO_NUM_PARTICLES = 'num_particles'
INFO_NUM_ORBITALS = 'num_orbitals'
INFO_TWO_QUBIT_REDUCTION = 'two_qubit_reduction'
INFO_Z2SYMMETRIES = 'z2symmetries'
@abstractmethod
def __init__(self):
warnings.warn('The ChemistryOperator is deprecated as of Qiskit Aqua 0.8.0 and will be '
'removed no earlier than 3 months after the release date. Instead, the '
'FermionicTransformation can be used to transform QMolecules and construct '
'ground state result objects.', DeprecationWarning, stacklevel=2)
self._molecule_info = {}
@abstractmethod
def run(self, qmolecule):
"""
Convert the qmolecule, according to the ChemistryOperator, into an Operator
that can be given to a QuantumAlgorithm
Args:
qmolecule (QMolecule): from a chemistry driver
Returns:
Tuple: (qubit_op, aux_ops)
"""
raise NotImplementedError
def process_algorithm_result(
self, algo_result: Union[dict,
MinimumEigensolverResult,
EigensolverResult]) -> Union[Tuple[List[str], dict],
'MolecularGroundStateResult',
'MolecularExcitedStatesResult']:
"""
Takes the algorithm result and processes it as required, e.g. by
combination of any parts that were classically computed, for the
final result.
Args:
algo_result: Result from algorithm
Returns:
Final chemistry result computed from the algorithm result
"""
if isinstance(algo_result, MinimumEigensolverResult):
return self._process_algorithm_result(algo_result)
elif isinstance(algo_result, EigensolverResult):
return self._process_algorithm_result(algo_result)
else:
lines, result = self._process_algorithm_result(algo_result)
result['algorithm_retvals'] = algo_result
return lines, result
@abstractmethod
def _process_algorithm_result(self, algo_result):
raise NotImplementedError
@property
def molecule_info(self):
""" returns molecule info """
return self._molecule_info
def _add_molecule_info(self, key, value):
self._molecule_info[key] = value
class MolecularChemistryResult(AlgorithmResult):
"""
Molecular chemistry Result
Energies are in Hartree and dipole moments in A.U unless otherwise stated.
"""
def __init__(self, a_dict: Optional[Dict] = None) -> None:
super().__init__(a_dict)
warnings.warn('The qiskit.chemistry.chemistry_operator.MolecularChemistryResult object is '
'deprecated as of 0.8.0 and will be removed no sooner than 3 months after the'
' release. You should use qiskit.chemistry.algorithms.ground_state_solvers.'
'FermionicGroundStateResult instead.', DeprecationWarning, stacklevel=2)
@property
def algorithm_result(self) -> AlgorithmResult:
""" Returns raw algorithm result """
return self.get('algorithm_result')
@algorithm_result.setter
def algorithm_result(self, value: AlgorithmResult) -> None:
""" Sets raw algorithm result """
self.data['algorithm_result'] = value
@property
def hartree_fock_energy(self) -> float:
""" Returns Hartree-Fock energy """
return self.get('hartree_fock_energy')
@hartree_fock_energy.setter
def hartree_fock_energy(self, value: float) -> None:
""" Sets Hartree-Fock energy """
self.data['hartree_fock_energy'] = value
@property
def nuclear_repulsion_energy(self) -> Optional[float]:
""" Returns nuclear repulsion energy when available from driver """
return self.get('nuclear_repulsion_energy')
@nuclear_repulsion_energy.setter
def nuclear_repulsion_energy(self, value: float) -> None:
""" Sets nuclear repulsion energy """
self.data['nuclear_repulsion_energy'] = value
@property
def nuclear_dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns nuclear dipole moment X,Y,Z components in A.U when available from driver """
return self.get('nuclear_dipole_moment')
@nuclear_dipole_moment.setter
def nuclear_dipole_moment(self, value: DipoleTuple) -> None:
""" Sets nuclear dipole moment in A.U """
self.data['nuclear_dipole_moment'] = value
class MolecularGroundStateResult(MolecularChemistryResult):
"""
Molecular Ground State Energy Result.
Energies are in Hartree and dipole moments in A.U unless otherwise stated.
"""
def __init__(self, a_dict: Optional[Dict] = None) -> None:
super().__init__(a_dict)
warnings.warn('The qiskit.chemistry.chemistry_operator.MolecularGroundStateResult object '
'is deprecated as of 0.8.0 and will be removed no sooner than 3 months after '
'the release. You should use qiskit.chemistry.algorithms.'
'ground_state_solvers.FermionicGroundStateResult instead.',
DeprecationWarning, stacklevel=2)
@property
def energy(self) -> Optional[float]:
""" Returns ground state energy if nuclear_repulsion_energy is available from driver """
nre = self.nuclear_repulsion_energy
return self.electronic_energy + nre if nre is not None else None
@property
def electronic_energy(self) -> float:
""" Returns electronic part of ground state energy """
return (self.computed_electronic_energy
+ self.ph_extracted_energy
+ self.frozen_extracted_energy)
@property
def computed_electronic_energy(self) -> float:
""" Returns computed electronic part of ground state energy """
return self.get('computed_electronic_energy')
@computed_electronic_energy.setter
def computed_electronic_energy(self, value: float) -> None:
""" Sets computed electronic part of ground state energy """
self.data['computed_electronic_energy'] = value
@property
def ph_extracted_energy(self) -> float:
""" Returns particle hole extracted part of ground state energy """
return self.get('ph_extracted_energy')
@ph_extracted_energy.setter
def ph_extracted_energy(self, value: float) -> None:
""" Sets particle hole extracted part of ground state energy """
self.data['ph_extracted_energy'] = value
@property
def frozen_extracted_energy(self) -> float:
""" Returns frozen extracted part of ground state energy """
return self.get('frozen_extracted_energy')
@frozen_extracted_energy.setter
def frozen_extracted_energy(self, value: float) -> None:
""" Sets frozen extracted part of ground state energy """
self.data['frozen_extracted_energy'] = value
# Dipole moment results. Note dipole moments of tuples of X, Y and Z components. Chemistry
# drivers either support dipole integrals or not. Note that when using Z2 symmetries of
def has_dipole(self) -> bool:
""" Returns whether dipole moment is present in result or not """
return self.nuclear_dipole_moment is not None and self.electronic_dipole_moment is not None
@property
def reverse_dipole_sign(self) -> bool:
""" Returns if electronic dipole moment sign should be reversed when adding to nuclear """
return self.get('reverse_dipole_sign')
@reverse_dipole_sign.setter
def reverse_dipole_sign(self, value: bool) -> None:
""" Sets if electronic dipole moment sign should be reversed when adding to nuclear """
self.data['reverse_dipole_sign'] = value
@property
def total_dipole_moment(self) -> Optional[float]:
""" Returns total dipole of moment """
if self.dipole_moment is None:
return None # No dipole at all
if np.any(np.equal(list(self.dipole_moment), None)):
return None # One or more components in the dipole is None
return np.sqrt(np.sum(np.power(list(self.dipole_moment), 2)))
@property
def total_dipole_moment_in_debye(self) -> Optional[float]:
""" Returns total dipole of moment in Debye """
tdm = self.total_dipole_moment
return tdm / QMolecule.DEBYE if tdm is not None else None
@property
def dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns dipole moment """
edm = self.electronic_dipole_moment
if self.reverse_dipole_sign:
edm = cast(DipoleTuple, tuple(-1 * x if x is not None else None for x in edm))
return _dipole_tuple_add(edm, self.nuclear_dipole_moment)
@property
def dipole_moment_in_debye(self) -> Optional[DipoleTuple]:
""" Returns dipole moment in Debye """
dipm = self.dipole_moment
if dipm is None:
return None
dipmd0 = dipm[0]/QMolecule.DEBYE if dipm[0] is not None else None
dipmd1 = dipm[1]/QMolecule.DEBYE if dipm[1] is not None else None
dipmd2 = dipm[2]/QMolecule.DEBYE if dipm[2] is not None else None
return dipmd0, dipmd1, dipmd2
@property
def electronic_dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns electronic dipole moment """
return _dipole_tuple_add(self.computed_dipole_moment,
_dipole_tuple_add(self.ph_extracted_dipole_moment,
self.frozen_extracted_dipole_moment))
@property
def computed_dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns computed electronic part of dipole moment """
return self.get('computed_dipole_moment')
@computed_dipole_moment.setter
def computed_dipole_moment(self, value: DipoleTuple) -> None:
""" Sets computed electronic part of dipole moment """
self.data['computed_dipole_moment'] = value
@property
def ph_extracted_dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns particle hole extracted part of dipole moment """
return self.get('ph_extracted_dipole_moment')
@ph_extracted_dipole_moment.setter
def ph_extracted_dipole_moment(self, value: DipoleTuple) -> None:
""" Sets particle hole extracted part of dipole moment """
self.data['ph_extracted_dipole_moment'] = value
@property
def frozen_extracted_dipole_moment(self) -> Optional[DipoleTuple]:
""" Returns frozen extracted part of dipole moment """
return self.get('frozen_extracted_dipole_moment')
@frozen_extracted_dipole_moment.setter
def frozen_extracted_dipole_moment(self, value: DipoleTuple) -> None:
""" Sets frozen extracted part of dipole moment """
self.data['frozen_extracted_dipole_moment'] = value
# Other measured operators. If these are not evaluated then None will be returned
# instead of any measured value.
def has_observables(self):
""" Returns whether result has aux op observables such as spin, num particles """
return self.total_angular_momentum is not None \
or self.num_particles is not None \
or self.magnetization is not None
@property
def total_angular_momentum(self) -> Optional[float]:
""" Returns total angular momentum (S^2) """
return self.get('total_angular_momentum')
@total_angular_momentum.setter
def total_angular_momentum(self, value: float) -> None:
""" Sets total angular momentum """
self.data['total_angular_momentum'] = value
@property
def spin(self) -> Optional[float]:
""" Returns computed spin """
if self.total_angular_momentum is None:
return None
return (-1.0 + np.sqrt(1 + 4 * self.total_angular_momentum)) / 2
@property
def num_particles(self) -> Optional[float]:
""" Returns measured number of particles """
return self.get('num_particles')
@num_particles.setter
def num_particles(self, value: float) -> None:
""" Sets measured number of particles """
self.data['num_particles'] = value
@property
def magnetization(self) -> Optional[float]:
""" Returns measured magnetization """
return self.get('magnetization')
@magnetization.setter
def magnetization(self, value: float) -> None:
""" Sets measured magnetization """
self.data['magnetization'] = value
def __str__(self) -> str:
""" Printable formatted result """
return '\n'.join(self.formatted)
@property
def formatted(self) -> List[str]:
""" Formatted result as a list of strings """
lines = []
lines.append('=== GROUND STATE ENERGY ===')
lines.append(' ')
lines.append('* Electronic ground state energy (Hartree): {}'.
format(round(self.electronic_energy, 12)))
lines.append(' - computed part: {}'.
format(round(self.computed_electronic_energy, 12)))
lines.append(' - frozen energy part: {}'.
format(round(self.frozen_extracted_energy, 12)))
lines.append(' - particle hole part: {}'
.format(round(self.ph_extracted_energy, 12)))
if self.nuclear_repulsion_energy is not None:
lines.append('~ Nuclear repulsion energy (Hartree): {}'.
format(round(self.nuclear_repulsion_energy, 12)))
lines.append('> Total ground state energy (Hartree): {}'.
format(round(self.energy, 12)))
if self.has_observables():
line = ' Measured::'
if self.num_particles is not None:
line += ' # Particles: {:.3f}'.format(self.num_particles)
if self.spin is not None:
line += ' S: {:.3f}'.format(self.spin)
if self.total_angular_momentum is not None:
line += ' S^2: {:.3f}'.format(self.total_angular_momentum)
if self.magnetization is not None:
line += ' M: {:.5f}'.format(self.magnetization)
lines.append(line)
if self.has_dipole():
lines.append(' ')
lines.append('=== DIPOLE MOMENT ===')
lines.append(' ')
lines.append('* Electronic dipole moment (a.u.): {}'
.format(_dipole_to_string(self.electronic_dipole_moment)))
lines.append(' - computed part: {}'
.format(_dipole_to_string(self.computed_dipole_moment)))
lines.append(' - frozen energy part: {}'
.format(_dipole_to_string(self.frozen_extracted_dipole_moment)))
lines.append(' - particle hole part: {}'
.format(_dipole_to_string(self.ph_extracted_dipole_moment)))
if self.nuclear_dipole_moment is not None:
lines.append('~ Nuclear dipole moment (a.u.): {}'
.format(_dipole_to_string(self.nuclear_dipole_moment)))
lines.append('> Dipole moment (a.u.): {} Total: {}'
.format(_dipole_to_string(self.dipole_moment),
_float_to_string(self.total_dipole_moment)))
lines.append(' (debye): {} Total: {}'
.format(_dipole_to_string(self.dipole_moment_in_debye),
_float_to_string(self.total_dipole_moment_in_debye)))
return lines
class MolecularExcitedStatesResult(MolecularChemistryResult):
"""
Molecular Excited States Result
Energies are in Hartree and dipole moments in A.U unless otherwise stated.
"""
# TODO This needs completing once EigenSolver interface/result is final
@property
def energies(self) -> Tuple:
""" Returns ground state energy """
return self.get('energies')
@energies.setter
def energies(self, value: Tuple) -> None:
""" Sets ground state energy """
self.data['energies'] = value
def _dipole_tuple_add(x: Optional[DipoleTuple],
y: Optional[DipoleTuple]) -> Optional[DipoleTuple]:
""" Utility to add two dipole tuples element-wise for dipole additions """
if x is None or y is None:
return None
return _element_add(x[0], y[0]), _element_add(x[1], y[1]), _element_add(x[2], y[2])
def _element_add(x: Optional[float], y: Optional[float]):
""" Add dipole elements where a value may be None then None is returned """
return x + y if x is not None and y is not None else None
def _dipole_to_string(dipole: DipoleTuple):
dips = [round(x, 8) if x is not None else x for x in dipole]
value = '['
for i, _ in enumerate(dips):
value += _float_to_string(dips[i]) if dips[i] is not None else 'None'
value += ' ' if i < len(dips)-1 else ']'
return value
def _float_to_string(value: Optional[float], precision: int = 8) -> str:
if value is None:
return 'None'
else:
return '0.0' if value == 0 else ('{:.' + str(precision) + 'f}').format(value).rstrip('0')
|
test/test_date.py | timgates42/uliweb | 202 | 11196667 | from uliweb.utils import date
from datetime import datetime
def test():
"""
>>> date.get_timezones().keys()
['GMT -12', 'GMT -11', 'GMT -10', 'GMT -9', 'GMT -8', 'GMT -7', 'GMT -6', 'GMT -5', 'GMT -4', 'GMT -3', 'GMT -2', 'GMT -1', 'GMT +1', 'GMT +2', 'GMT +3', 'GMT +4', 'GMT +5', 'GMT +6', 'GMT +7', 'GMT +8', 'GMT +9', 'GMT +10', 'GMT +11', 'GMT +12', 'UTC']
>>> date.timezone('GMT +8') # doctest:+ELLIPSIS
<tzinfo GMT +8>
>>> GMT8 = date.timezone('GMT +8')
>>> d = datetime(2011, 9, 13, 20, 14, 15, tzinfo=GMT8)
>>> date.to_timezone(d, date.UTC).isoformat()
'2011-09-13T12:14:15+00:00'
>>> date.to_datetime('2011-9-13 20:14:15', tzinfo=date.UTC)
datetime.datetime(2011, 9, 13, 20, 14, 15, tzinfo=<tzinfo UTC>)
>>> d = date.to_datetime('2011-9-13 20:14:15', tzinfo=GMT8)
>>> d
datetime.datetime(2011, 9, 13, 20, 14, 15, tzinfo=<tzinfo GMT +8>)
>>> c = datetime(2011, 9, 13, 20, 14, 15)
>>> date.to_datetime(c, tzinfo=GMT8)
datetime.datetime(2011, 9, 13, 20, 14, 15, tzinfo=<tzinfo GMT +8>)
>>> date.to_datetime(d, tzinfo=date.UTC)
datetime.datetime(2011, 9, 13, 12, 14, 15, tzinfo=<tzinfo UTC>)
>>> date.set_timezone(date.UTC)
>>> date.to_datetime(d)
datetime.datetime(2011, 9, 13, 12, 14, 15, tzinfo=<tzinfo UTC>)
>>> date.to_date('2011-9-13 20:14:15')
datetime.date(2011, 9, 13)
>>> date.to_datetime('2011-9-13 20:14:15')
datetime.datetime(2011, 9, 13, 20, 14, 15, tzinfo=<tzinfo UTC>)
>>> date.to_date('2011-9-13 20:14:15', tzinfo=date.UTC)
datetime.date(2011, 9, 13)
>>> date.to_time('2011-9-13 20:14:15')
datetime.time(20, 14, 15, tzinfo=<tzinfo UTC>)
>>> date.to_time('2011-9-13 20:14:15', tzinfo=date.UTC)
datetime.time(20, 14, 15, tzinfo=<tzinfo UTC>)
>>> date.to_string(date.to_date('2011-9-13 20:14:15'))
'2011-09-13'
>>> date.to_string(date.to_datetime('2011-9-13 20:14:15'))
'2011-09-13 20:14:15 UTC'
>>> date.to_string(date.to_time('2011-9-13 20:14:15'))
'20:14:15'
>>> date.to_timezone(None)
>>> date.to_datetime(None)
>>> date.to_date(None)
>>> date.to_time(None)
>>> date.set_local_timezone('GMT +8')
>>> date.to_local(d)
datetime.datetime(2011, 9, 13, 20, 14, 15, tzinfo=<tzinfo GMT +8>)
>>> date.fix_gmt_timezone('GMT8')
'GMT +8'
>>> date.fix_gmt_timezone('GMT-8')
'GMT -8'
>>> date.fix_gmt_timezone('GMT+8')
'GMT +8'
>>> date.fix_gmt_timezone('gmt -8')
'GMT -8'
>>> date.fix_gmt_timezone('gmt -0')
'UTC'
>>> date.fix_gmt_timezone('asia/shanghai')
'asia/shanghai'
>>> date.timezone('gmt8')
<tzinfo GMT +8>
"""
def test_microsecond():
"""
>>> date.to_datetime('2012-08-01 16:41:12.5200')
datetime.datetime(2012, 8, 1, 16, 41, 12, 520000, tzinfo=<tzinfo UTC>)
>>> a = datetime(2012,8,1,16,41,12,5200)
>>> print a
2012-08-01 16:41:12.005200
>>> b = date.to_datetime(a)
>>> b
datetime.datetime(2012, 8, 1, 16, 41, 12, 5200, tzinfo=<tzinfo UTC>)
>>> date.to_string(b, microsecond=True)
'2012-08-01 16:41:12.005200 UTC'
>>> date.to_string(b, timezone=False)
'2012-08-01 16:41:12'
""" |
Clients/ParaView/Testing/Python/TestPythonMPI.py | xj361685640/ParaView | 815 | 11196677 | #/usr/bin/env python
# Global python import
import exceptions, logging, random, sys, threading, time, os
# Update python path to have ParaView libs
build_path='/Volumes/SebKitSSD/Kitware/code/ParaView/build-ninja'
sys.path.append('%s/lib'%build_path)
sys.path.append('%s/lib/site-packages'%build_path)
# iPython import
#from IPython.display import HTML
#from IPython.parallel import Client
import paraview
from paraview.web import ipython as pv_ipython
from vtk import *
iPythonClient = None
paraviewHelper = pv_ipython.ParaViewIPython()
webArguments = pv_ipython.WebArguments('/.../path-to-web-directory')
def _start_paraview():
paraviewHelper.Initialize()
paraviewHelper.SetWebProtocol(IPythonProtocol, webArguments)
return paraviewHelper.Start()
def _stop_paraview():
paraviewHelper.Finalize()
def _pv_activate_dataset():
IPythonProtocol.ActivateDataSet('iPython-demo')
def _push_new_timestep():
# processing code generating new vtkDataSet
# newDataset = ...
IPythonProtocol.RegisterDataSet('iPython-demo', newDataset)
def StartParaView(height=600, path='/apps/WebVisualizer/'):
global iPythonClient, paraviewHelper
if not iPythonClient:
iPythonClient = Client()
urls = iPythonClient[:].apply_sync(lambda:_start_paraview())
url = ""
for i in urls:
if len(i) > 0:
url = i
return HTML("<iframe src='%s/%s' width='100%%' height='%i'></iframe>"%(url, path, height))
def StopParaView():
global iPythonClient, paraviewHelper
iPythonClient[:].apply_sync(lambda:_stop_paraview())
def ActivateDataSet():
iPythonClient[:].apply_sync(lambda:_pv_activate_dataset())
def ComputeNextTimeStep(ds):
iPythonClient[:].apply_sync(lambda:_push_new_timestep())
print ("Start waiting")
time.sleep(10)
print ("Done")
|
src/networkx/classes/graphviews.py | MarletteFunding/aws-kube-codesuite | 184 | 11196695 | <gh_stars>100-1000
# Copyright (C) 2004-2017 by
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
#
# Author: <NAME> (<EMAIL>),
# <NAME> (<EMAIL>),
# <NAME>(<EMAIL>)
"""View of Graphs as SubGraph, Reverse, Directed, Undirected.
In some algorithms it is convenient to temporarily morph
a graph to exclude some nodes or edges. It should be better
to do that via a view than to remove and then re-add.
In other algorithms it is convenient to temporarily morph
a graph to reverse directed edges, or treat a directed graph
as undirected, etc. This module provides those graph views.
The resulting views are essentially read-only graphs that
report data from the orginal graph object. We provide three
attributes related to the underlying graph object.
G._graph : the parent graph used for looking up graph data.
G.root_graph : the root graph of the potential chain of views.
For example, if you have a subgraph of a reversed view of
an edge_subgraph of a graph, this points to original graph.
G.fresh_copy() : a method to return a null copy of the graph
represented by the view. This is useful if you want to
create a graph with the same data structure (directed/multi)
as the current view. This is similar to G.root_graph.__class__()
but reflects the fact that (Un)DirectedView could make the
type of data structure different from the root_graph.
Note: Since graphviews look like graphs, one can end up with
view-of-view-of-view chains. Be careful with chains because
they become very slow with about 15 nested views.
For the common simple case of node induced subgraphs created
from the graph class, we short-cut the chain by returning a
subgraph of the original graph directly rather than a subgraph
of a subgraph. We are careful not to disrupt any edge filter in
the middle subgraph. In general, determining how to short-cut
the chain is tricky and much harder with restricted_views than
with induced subgraphs.
Often it is easiest to use `.copy()` to avoid chains.
"""
from collections import Mapping
from networkx.classes import Graph, DiGraph, MultiGraph, MultiDiGraph
from networkx.classes.coreviews import ReadOnlyGraph, \
AtlasView, AdjacencyView, MultiAdjacencyView, \
FilterAtlas, FilterAdjacency, FilterMultiAdjacency, \
UnionAdjacency, UnionMultiAdjacency
from networkx.classes.filters import no_filter, show_nodes, show_edges
from networkx.exception import NetworkXError, NetworkXNotImplemented
from networkx.utils import not_implemented_for
__all__ = ['SubGraph', 'SubDiGraph', 'SubMultiGraph', 'SubMultiDiGraph',
'ReverseView', 'MultiReverseView',
'DiGraphView', 'MultiDiGraphView',
'GraphView', 'MultiGraphView',
]
class SubGraph(ReadOnlyGraph, Graph):
def __init__(self, graph, filter_node=no_filter, filter_edge=no_filter):
self._graph = graph
self.root_graph = graph.root_graph
self._NODE_OK = filter_node
self._EDGE_OK = filter_edge
# Set graph interface
self.graph = graph.graph
self._node = FilterAtlas(graph._node, filter_node)
self._adj = FilterAdjacency(graph._adj, filter_node, filter_edge)
class SubDiGraph(ReadOnlyGraph, DiGraph):
def __init__(self, graph, filter_node=no_filter, filter_edge=no_filter):
self._graph = graph
self.root_graph = graph
while hasattr(self.root_graph, '_graph'):
self.root_graph = self.root_graph._graph
self._NODE_OK = filter_node
self._EDGE_OK = filter_edge
# Set graph interface
self.graph = graph.graph
self._node = FilterAtlas(graph._node, filter_node)
self._adj = FilterAdjacency(graph._adj, filter_node, filter_edge)
self._pred = FilterAdjacency(graph._pred, filter_node,
lambda u, v: filter_edge(v, u))
self._succ = self._adj
class SubMultiGraph(ReadOnlyGraph, MultiGraph):
def __init__(self, graph, filter_node=no_filter, filter_edge=no_filter):
self._graph = graph
self.root_graph = graph
while hasattr(self.root_graph, '_graph'):
self.root_graph = self.root_graph._graph
self._NODE_OK = filter_node
self._EDGE_OK = filter_edge
# Set graph interface
self.graph = graph.graph
self._node = FilterAtlas(graph._node, filter_node)
self._adj = FilterMultiAdjacency(graph._adj, filter_node, filter_edge)
class SubMultiDiGraph(ReadOnlyGraph, MultiDiGraph):
def __init__(self, graph, filter_node=no_filter, filter_edge=no_filter):
self._graph = graph
self.root_graph = graph
while hasattr(self.root_graph, '_graph'):
self.root_graph = self.root_graph._graph
self._NODE_OK = filter_node
self._EDGE_OK = filter_edge
# Set graph interface
self.graph = graph.graph
self._node = FilterAtlas(graph._node, filter_node)
FMA = FilterMultiAdjacency
self._adj = FMA(graph._adj, filter_node, filter_edge)
self._pred = FMA(graph._pred, filter_node,
lambda u, v, k: filter_edge(v, u, k))
self._succ = self._adj
class ReverseView(ReadOnlyGraph, DiGraph):
def __init__(self, graph):
if not graph.is_directed():
msg = "not implemented for undirected type"
raise NetworkXNotImplemented(msg)
self._graph = graph
self.root_graph = graph
while hasattr(self.root_graph, '_graph'):
self.root_graph = self.root_graph._graph
# Set graph interface
self.graph = graph.graph
self._node = graph._node
self._adj = graph._pred
self._pred = graph._succ
self._succ = self._adj
class MultiReverseView(ReadOnlyGraph, MultiDiGraph):
def __init__(self, graph):
if not graph.is_directed():
msg = "not implemented for undirected type"
raise NetworkXNotImplemented(msg)
self._graph = graph
self.root_graph = graph
while hasattr(self.root_graph, '_graph'):
self.root_graph = self.root_graph._graph
# Set graph interface
self.graph = graph.graph
self._node = graph._node
self._adj = graph._pred
self._pred = graph._succ
self._succ = self._adj
class DiGraphView(ReadOnlyGraph, DiGraph):
def __init__(self, graph):
if graph.is_multigraph():
msg = 'Wrong View class. Use MultiDiGraphView.'
raise NetworkXError(msg)
self._graph = graph
self.root_graph = graph
while hasattr(self.root_graph, '_graph'):
self.root_graph = self.root_graph._graph
self.graph = graph.graph
self._node = graph._node
if graph.is_directed():
self._pred = graph._pred
self._succ = graph._succ
else:
self._pred = graph._adj
self._succ = graph._adj
self._adj = self._succ
class MultiDiGraphView(ReadOnlyGraph, MultiDiGraph):
def __init__(self, graph):
if not graph.is_multigraph():
msg = 'Wrong View class. Use DiGraphView.'
raise NetworkXError(msg)
self._graph = graph
self.root_graph = graph
while hasattr(self.root_graph, '_graph'):
self.root_graph = self.root_graph._graph
self.graph = graph.graph
self._node = graph._node
if graph.is_directed():
self._pred = graph._pred
self._succ = graph._succ
else:
self._pred = graph._adj
self._succ = graph._adj
self._adj = self._succ
class GraphView(ReadOnlyGraph, Graph):
UnionAdj = UnionAdjacency
def __init__(self, graph):
if graph.is_multigraph():
msg = 'Wrong View class. Use MultiGraphView.'
raise NetworkXError(msg)
self._graph = graph
self.root_graph = graph
while hasattr(self.root_graph, '_graph'):
self.root_graph = self.root_graph._graph
self.graph = graph.graph
self._node = graph._node
if graph.is_directed():
self._adj = self.UnionAdj(graph._succ, graph._pred)
else:
self._adj = graph._adj
class MultiGraphView(ReadOnlyGraph, MultiGraph):
UnionAdj = UnionMultiAdjacency
def __init__(self, graph):
if not graph.is_multigraph():
msg = 'Wrong View class. Use GraphView.'
raise NetworkXError(msg)
self._graph = graph
self.root_graph = graph
while hasattr(self.root_graph, '_graph'):
self.root_graph = self.root_graph._graph
self.graph = graph.graph
self._node = graph._node
if graph.is_directed():
self._adj = self.UnionAdj(graph._succ, graph._pred)
else:
self._adj = graph._adj
|
tests/sites_framework/models.py | JBKahn/django | 5,079 | 11196709 | from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class AbstractArticle(models.Model):
title = models.CharField(max_length=50)
objects = models.Manager()
on_site = CurrentSiteManager()
class Meta:
abstract = True
def __str__(self):
return self.title
class SyndicatedArticle(AbstractArticle):
sites = models.ManyToManyField(Site)
class ExclusiveArticle(AbstractArticle):
site = models.ForeignKey(Site, models.CASCADE)
class CustomArticle(AbstractArticle):
places_this_article_should_appear = models.ForeignKey(Site, models.CASCADE)
objects = models.Manager()
on_site = CurrentSiteManager("places_this_article_should_appear")
|
tests/test_vector/test_polygon.py | rbavery/solaris | 367 | 11196716 | <reponame>rbavery/solaris
import os
import pandas as pd
from affine import Affine
from shapely.geometry import Polygon
from shapely.wkt import loads, dumps
import geopandas as gpd
import rasterio
from solaris.data import data_dir
from solaris.vector.polygon import convert_poly_coords, \
affine_transform_gdf, georegister_px_df, geojson_to_px_gdf, \
gdf_to_yolo
square = Polygon([(10, 20), (10, 10), (20, 10), (20, 20)])
forward_result = loads("POLYGON ((733606 3725129, 733606 3725134, 733611 3725134, 733611 3725129, 733606 3725129))")
reverse_result = loads("POLYGON ((-1467182 7450238, -1467182 7450258, -1467162 7450258, -1467162 7450238, -1467182 7450238))")
# note that the xform below is the same as in cw_geodata/data/sample_geotiff.tif
aff = Affine(0.5, 0.0, 733601.0, 0.0, -0.5, 3725139.0)
affine_list = [0.5, 0.0, 733601.0, 0.0, -0.5, 3725139.0]
long_affine_list = [0.5, 0.0, 733601.0, 0.0, -0.5, 3725139.0,
0.0, 0.0, 1.0]
gdal_affine_list = [733601.0, 0.5, 0.0, 3725139.0, 0.0, -0.5]
class TestConvertPolyCoords(object):
"""Test the convert_poly_coords functionality."""
def test_square_pass_affine(self):
"""Test both forward and inverse transforms when passed affine obj."""
xform_result = convert_poly_coords(square, affine_obj=aff)
assert xform_result == forward_result
rev_xform_result = convert_poly_coords(square,
affine_obj=aff,
inverse=True)
assert rev_xform_result == reverse_result
def test_square_pass_raster(self):
"""Test forward affine transform when passed a raster reference."""
raster_src = os.path.join(data_dir, 'sample_geotiff.tif')
xform_result = convert_poly_coords(square, raster_src=raster_src)
assert xform_result == forward_result
def test_square_pass_list(self):
"""Test forward and reverse affine transform when passed a list."""
fwd_xform_result = convert_poly_coords(square,
affine_obj=affine_list)
assert fwd_xform_result == forward_result
rev_xform_result = convert_poly_coords(square,
affine_obj=affine_list,
inverse=True)
assert rev_xform_result == reverse_result
def test_square_pass_gdal_list(self):
"""Test forward affine transform when passed a list in gdal order."""
fwd_xform_result = convert_poly_coords(square,
affine_obj=gdal_affine_list
)
assert fwd_xform_result == forward_result
def test_square_pass_long_list(self):
"""Test forward affine transform when passed a full 9-element xform."""
fwd_xform_result = convert_poly_coords(
square, affine_obj=long_affine_list
)
assert fwd_xform_result == forward_result
class TestAffineTransformGDF(object):
"""Test the affine_transform_gdf functionality."""
def test_transform_csv(self):
truth_gdf = pd.read_csv(os.path.join(data_dir, 'aff_gdf_result.csv'))
input_df = os.path.join(data_dir, 'sample.csv')
output_gdf = affine_transform_gdf(input_df, aff,
geom_col="PolygonWKT_Pix",
precision=0)
output_gdf['geometry'] = output_gdf['geometry'].apply(dumps, trim=True)
assert output_gdf.equals(truth_gdf)
class TestGeoregisterPxDF(object):
"""Test the georegister_px_df functionality."""
def test_transform_using_raster(self):
input_df = os.path.join(data_dir, 'sample.csv')
input_im = os.path.join(data_dir, 'sample_geotiff.tif')
output_gdf = georegister_px_df(input_df, im_path=input_im,
geom_col='PolygonWKT_Pix', precision=0)
truth_df = pd.read_csv(os.path.join(data_dir, 'aff_gdf_result.csv'))
truth_df['geometry'] = truth_df['geometry'].apply(loads)
truth_gdf = gpd.GeoDataFrame(
truth_df,
crs=rasterio.open(os.path.join(data_dir, 'sample_geotiff.tif')).crs
)
assert truth_gdf.equals(output_gdf)
def test_transform_using_aff_crs(self):
input_df = os.path.join(data_dir, 'sample.csv')
crs = rasterio.open(os.path.join(data_dir, 'sample_geotiff.tif')).crs
output_gdf = georegister_px_df(input_df, affine_obj=aff, crs=crs,
geom_col='PolygonWKT_Pix', precision=0)
truth_df = pd.read_csv(os.path.join(data_dir, 'aff_gdf_result.csv'))
truth_df['geometry'] = truth_df['geometry'].apply(loads)
truth_gdf = gpd.GeoDataFrame(
truth_df,
crs=rasterio.open(os.path.join(data_dir, 'sample_geotiff.tif')).crs
)
assert truth_gdf.equals(output_gdf)
class TestGeojsonToPxGDF(object):
"""Tests for geojson_to_px_gdf."""
def test_transform_to_px_coords(self):
output_gdf = geojson_to_px_gdf(
os.path.join(data_dir, 'geotiff_labels.geojson'),
os.path.join(data_dir, 'sample_geotiff.tif'),
precision=0
)
truth_gdf = gpd.read_file(os.path.join(data_dir,
'gj_to_px_result.geojson'))
truth_subset = truth_gdf[['geometry']]
output_subset = output_gdf[['geometry']].reset_index(drop=True)
assert truth_subset.equals(output_subset)
class TestGDFToYOLO(object):
"""Test the gdf_to_yolo function."""
def test_gdf_to_yolo(self):
gdf = gpd.read_file(os.path.join(data_dir, 'geotiff_labels.geojson'))
image = os.path.join(data_dir, 'sample_geotiff.tif')
output_gdf = gdf_to_yolo(gdf, image, data_dir, column='origlen')
truth_gdf = pd.read_csv(os.path.join(data_dir, 'yolo_gdf_result.csv'))
truth_gdf = truth_gdf.sort_values(by='area').reset_index(drop=True)
output_gdf = output_gdf.sort_values(by='area').reset_index(drop=True)
truth_gdf = truth_gdf['w'].round(4)
output_gdf = output_gdf['w'].round(4)
assert truth_gdf.equals(output_gdf)
|
tests/test_credentials.py | al3pht/cloud-custodian | 2,415 | 11196767 | <filename>tests/test_credentials.py
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import os
from botocore.exceptions import ClientError
import placebo
from c7n import credentials
from c7n.credentials import SessionFactory, assumed_session, get_sts_client
from c7n.version import version
from c7n.utils import local_session
from .common import BaseTest
class Credential(BaseTest):
def test_session_factory(self):
factory = SessionFactory("us-east-1")
session = factory()
self.assertTrue(
session._session.user_agent().startswith("CloudCustodian/%s" % version)
)
def test_regional_sts(self):
factory = self.replay_flight_data('test_credential_sts_regional')
self.patch(credentials, 'USE_STS_REGIONAL', True)
client = get_sts_client(factory(), region='us-east-2')
# unfortunately we have to poke at boto3 client internals to verify
self.assertEqual(client._client_config.region_name, 'us-east-2')
self.assertEqual(client._endpoint.host,
'https://sts.us-east-2.amazonaws.com')
self.assertEqual(
client.get_caller_identity()['Arn'],
'arn:aws:iam::644160558196:user/kapil')
def test_assumed_session(self):
factory = self.replay_flight_data("test_credential_sts")
session = assumed_session(
role_arn='arn:aws:iam::644160558196:role/CustodianGuardDuty',
session_name="custodian-dev",
session=factory(),
)
# attach the placebo flight recorder to the new session.
pill = placebo.attach(
session, os.path.join(self.placebo_dir, 'test_credential_sts'))
if self.recording:
pill.record()
else:
pill.playback()
self.addCleanup(pill.stop)
try:
identity = session.client("sts").get_caller_identity()
except ClientError as e:
self.assertEqual(e.response["Error"]["Code"], "ValidationError")
self.assertEqual(
identity['Arn'],
'arn:aws:sts::644160558196:assumed-role/CustodianGuardDuty/custodian-dev')
def test_policy_name_user_agent(self):
session = SessionFactory("us-east-1")
session.policy_name = "test-policy-name-ua"
client = session().client('s3')
self.assertTrue(
client._client_config.user_agent.startswith(
"CloudCustodian(test-policy-name-ua)/%s" % version
)
)
def test_local_session_agent_update(self):
factory = SessionFactory('us-east-1')
factory.policy_name = "check-ebs"
client = local_session(factory).client('ec2')
self.assertTrue(
'check-ebs' in client._client_config.user_agent)
factory.policy_name = "check-ec2"
factory.update(local_session(factory))
client = local_session(factory).client('ec2')
self.assertTrue(
'check-ec2' in client._client_config.user_agent)
|
douban_movie/douban_movie/middlewares.py | FrozenmChen/yyy | 551 | 11196768 | <reponame>FrozenmChen/yyy
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
from selenium import webdriver
from scrapy.http import HtmlResponse
from lxml import etree
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from random import choice
# header
ua_list = [
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/48.0.2564.82 Chrome/48.0.2564.82 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36",
"Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36"
]
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.resourceTimeout"] = 15
dcap["phantomjs.page.settings.loadImages"] = False
dcap["phantomjs.page.settings.userAgent"] = choice(ua_list)
#driver = webdriver.PhantomJS(executable_path='/home/icgoo/pywork/spider/phantomjs',desired_capabilities=dcap)
#driver = webdriver.PhantomJS(executable_path=u'/home/fank/pywork/spider/phantomjs',desired_capabilities=dcap)
#driver = webdriver.Firefox()
#driver = webdriver.Chrome()
class SeleniumMiddleware(object):
#
def process_request(self, request, spider):
if spider.name == 'douban-people': #
try:
driver = webdriver.PhantomJS(executable_path= './phantomjs-2.1.1-linux-x86_64/bin/phantomjs') # PhantomJS
driver.get(request.url) #
driver.implicitly_wait(3) # 3s
time.sleep(5) # 5s
page = driver.page_source
driver.close()
#print(page)
return HtmlResponse(request.url,
body = page,
encoding = 'utf-8',
request = request,)
except:
print("get douban-people data failed")
class DoubanMovieSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
packages/pyright-internal/src/tests/samples/match9.py | Jasha10/pyright | 3,934 | 11196784 | <reponame>Jasha10/pyright<gh_stars>1000+
# This sample tests class-based pattern matching when the class is
# marked final and can be discriminated based on the argument patterns.
from typing import final
class A:
title: str
class B:
name: str
class C:
name: str
def func1(r: A | B | C):
match r:
case object(title=_):
reveal_type(r, expected_text='A | B | C')
case object(name=_):
reveal_type(r, expected_text='A | B | C')
case _:
reveal_type(r, expected_text='A | B | C')
@final
class AFinal:
title: str
@final
class BFinal:
name: str
@final
class CFinal:
name: str
@final
class DFinal:
nothing: str
def func2(r: AFinal | BFinal | CFinal | DFinal):
match r:
case object(title=_):
reveal_type(r, expected_text='AFinal')
case object(name=_):
reveal_type(r, expected_text='BFinal | CFinal')
case _:
reveal_type(r, expected_text='DFinal')
|
docx/oxml/coreprops.py | revvsales/python-docx-1 | 3,031 | 11196786 | <filename>docx/oxml/coreprops.py
# encoding: utf-8
"""Custom element classes for core properties-related XML elements"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import re
from datetime import datetime, timedelta
from docx.compat import is_string
from docx.oxml import parse_xml
from docx.oxml.ns import nsdecls, qn
from docx.oxml.xmlchemy import BaseOxmlElement, ZeroOrOne
class CT_CoreProperties(BaseOxmlElement):
"""
``<cp:coreProperties>`` element, the root element of the Core Properties
part stored as ``/docProps/core.xml``. Implements many of the Dublin Core
document metadata elements. String elements resolve to an empty string
('') if the element is not present in the XML. String elements are
limited in length to 255 unicode characters.
"""
category = ZeroOrOne('cp:category', successors=())
contentStatus = ZeroOrOne('cp:contentStatus', successors=())
created = ZeroOrOne('dcterms:created', successors=())
creator = ZeroOrOne('dc:creator', successors=())
description = ZeroOrOne('dc:description', successors=())
identifier = ZeroOrOne('dc:identifier', successors=())
keywords = ZeroOrOne('cp:keywords', successors=())
language = ZeroOrOne('dc:language', successors=())
lastModifiedBy = ZeroOrOne('cp:lastModifiedBy', successors=())
lastPrinted = ZeroOrOne('cp:lastPrinted', successors=())
modified = ZeroOrOne('dcterms:modified', successors=())
revision = ZeroOrOne('cp:revision', successors=())
subject = ZeroOrOne('dc:subject', successors=())
title = ZeroOrOne('dc:title', successors=())
version = ZeroOrOne('cp:version', successors=())
_coreProperties_tmpl = (
'<cp:coreProperties %s/>\n' % nsdecls('cp', 'dc', 'dcterms')
)
@classmethod
def new(cls):
"""
Return a new ``<cp:coreProperties>`` element
"""
xml = cls._coreProperties_tmpl
coreProperties = parse_xml(xml)
return coreProperties
@property
def author_text(self):
"""
The text in the `dc:creator` child element.
"""
return self._text_of_element('creator')
@author_text.setter
def author_text(self, value):
self._set_element_text('creator', value)
@property
def category_text(self):
return self._text_of_element('category')
@category_text.setter
def category_text(self, value):
self._set_element_text('category', value)
@property
def comments_text(self):
return self._text_of_element('description')
@comments_text.setter
def comments_text(self, value):
self._set_element_text('description', value)
@property
def contentStatus_text(self):
return self._text_of_element('contentStatus')
@contentStatus_text.setter
def contentStatus_text(self, value):
self._set_element_text('contentStatus', value)
@property
def created_datetime(self):
return self._datetime_of_element('created')
@created_datetime.setter
def created_datetime(self, value):
self._set_element_datetime('created', value)
@property
def identifier_text(self):
return self._text_of_element('identifier')
@identifier_text.setter
def identifier_text(self, value):
self._set_element_text('identifier', value)
@property
def keywords_text(self):
return self._text_of_element('keywords')
@keywords_text.setter
def keywords_text(self, value):
self._set_element_text('keywords', value)
@property
def language_text(self):
return self._text_of_element('language')
@language_text.setter
def language_text(self, value):
self._set_element_text('language', value)
@property
def lastModifiedBy_text(self):
return self._text_of_element('lastModifiedBy')
@lastModifiedBy_text.setter
def lastModifiedBy_text(self, value):
self._set_element_text('lastModifiedBy', value)
@property
def lastPrinted_datetime(self):
return self._datetime_of_element('lastPrinted')
@lastPrinted_datetime.setter
def lastPrinted_datetime(self, value):
self._set_element_datetime('lastPrinted', value)
@property
def modified_datetime(self):
return self._datetime_of_element('modified')
@modified_datetime.setter
def modified_datetime(self, value):
self._set_element_datetime('modified', value)
@property
def revision_number(self):
"""
Integer value of revision property.
"""
revision = self.revision
if revision is None:
return 0
revision_str = revision.text
try:
revision = int(revision_str)
except ValueError:
# non-integer revision strings also resolve to 0
revision = 0
# as do negative integers
if revision < 0:
revision = 0
return revision
@revision_number.setter
def revision_number(self, value):
"""
Set revision property to string value of integer *value*.
"""
if not isinstance(value, int) or value < 1:
tmpl = "revision property requires positive int, got '%s'"
raise ValueError(tmpl % value)
revision = self.get_or_add_revision()
revision.text = str(value)
@property
def subject_text(self):
return self._text_of_element('subject')
@subject_text.setter
def subject_text(self, value):
self._set_element_text('subject', value)
@property
def title_text(self):
return self._text_of_element('title')
@title_text.setter
def title_text(self, value):
self._set_element_text('title', value)
@property
def version_text(self):
return self._text_of_element('version')
@version_text.setter
def version_text(self, value):
self._set_element_text('version', value)
def _datetime_of_element(self, property_name):
element = getattr(self, property_name)
if element is None:
return None
datetime_str = element.text
try:
return self._parse_W3CDTF_to_datetime(datetime_str)
except ValueError:
# invalid datetime strings are ignored
return None
def _get_or_add(self, prop_name):
"""
Return element returned by 'get_or_add_' method for *prop_name*.
"""
get_or_add_method_name = 'get_or_add_%s' % prop_name
get_or_add_method = getattr(self, get_or_add_method_name)
element = get_or_add_method()
return element
@classmethod
def _offset_dt(cls, dt, offset_str):
"""
Return a |datetime| instance that is offset from datetime *dt* by
the timezone offset specified in *offset_str*, a string like
``'-07:00'``.
"""
match = cls._offset_pattern.match(offset_str)
if match is None:
raise ValueError(
"'%s' is not a valid offset string" % offset_str
)
sign, hours_str, minutes_str = match.groups()
sign_factor = -1 if sign == '+' else 1
hours = int(hours_str) * sign_factor
minutes = int(minutes_str) * sign_factor
td = timedelta(hours=hours, minutes=minutes)
return dt + td
_offset_pattern = re.compile(r'([+-])(\d\d):(\d\d)')
@classmethod
def _parse_W3CDTF_to_datetime(cls, w3cdtf_str):
# valid W3CDTF date cases:
# yyyy e.g. '2003'
# yyyy-mm e.g. '2003-12'
# yyyy-mm-dd e.g. '2003-12-31'
# UTC timezone e.g. '2003-12-31T10:14:55Z'
# numeric timezone e.g. '2003-12-31T10:14:55-08:00'
templates = (
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%d',
'%Y-%m',
'%Y',
)
# strptime isn't smart enough to parse literal timezone offsets like
# '-07:30', so we have to do it ourselves
parseable_part = w3cdtf_str[:19]
offset_str = w3cdtf_str[19:]
dt = None
for tmpl in templates:
try:
dt = datetime.strptime(parseable_part, tmpl)
except ValueError:
continue
if dt is None:
tmpl = "could not parse W3CDTF datetime string '%s'"
raise ValueError(tmpl % w3cdtf_str)
if len(offset_str) == 6:
return cls._offset_dt(dt, offset_str)
return dt
def _set_element_datetime(self, prop_name, value):
"""
Set date/time value of child element having *prop_name* to *value*.
"""
if not isinstance(value, datetime):
tmpl = (
"property requires <type 'datetime.datetime'> object, got %s"
)
raise ValueError(tmpl % type(value))
element = self._get_or_add(prop_name)
dt_str = value.strftime('%Y-%m-%dT%H:%M:%SZ')
element.text = dt_str
if prop_name in ('created', 'modified'):
# These two require an explicit 'xsi:type="dcterms:W3CDTF"'
# attribute. The first and last line are a hack required to add
# the xsi namespace to the root element rather than each child
# element in which it is referenced
self.set(qn('xsi:foo'), 'bar')
element.set(qn('xsi:type'), 'dcterms:W3CDTF')
del self.attrib[qn('xsi:foo')]
def _set_element_text(self, prop_name, value):
"""Set string value of *name* property to *value*."""
if not is_string(value):
value = str(value)
if len(value) > 255:
tmpl = (
"exceeded 255 char limit for property, got:\n\n'%s'"
)
raise ValueError(tmpl % value)
element = self._get_or_add(prop_name)
element.text = value
def _text_of_element(self, property_name):
"""
Return the text in the element matching *property_name*, or an empty
string if the element is not present or contains no text.
"""
element = getattr(self, property_name)
if element is None:
return ''
if element.text is None:
return ''
return element.text
|
vunit/vhdl/logging/run.py | eataesierp/vunit | 507 | 11196797 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2021, <NAME> <EMAIL>
from os import getenv
import glob
from pathlib import Path
from vunit import VUnit, location_preprocessor
def main():
vhdl_2019 = getenv("VUNIT_VHDL_STANDARD") == "2019"
root = Path(__file__).parent
ui = VUnit.from_argv()
vunit_lib = ui.library("vunit_lib")
files = glob.glob(str(root / "test" / "*.vhd"))
files.remove(str(root / "test" / "tb_location.vhd"))
vunit_lib.add_source_files(files)
preprocessor = location_preprocessor.LocationPreprocessor()
preprocessor.add_subprogram("print_pre_vhdl_2019_style")
preprocessor.remove_subprogram("info")
vunit_lib.add_source_files(root / "test" / "tb_location.vhd", preprocessors=[preprocessor])
if vhdl_2019:
testbenches = vunit_lib.get_source_files("*tb*")
testbenches.set_compile_option("rivierapro.vcom_flags", ["-dbg"])
ui.set_sim_option("rivierapro.vsim_flags", ["-filter RUNTIME_0375"])
vunit_lib.test_bench("tb_location").set_generic("vhdl_2019", vhdl_2019)
ui.main()
if __name__ == "__main__":
main()
|
examples/pipeline_tune.py | rozlana-g/FEDOT | 358 | 11196836 | import numpy as np
from sklearn.metrics import roc_auc_score as roc_auc
from cases.data.data_utils import get_scoring_case_data_paths
from fedot.core.data.data import InputData
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.pipelines.tuning.unified import PipelineTuner
def get_case_train_test_data():
""" Function for getting data for train and validation """
train_file_path, test_file_path = get_scoring_case_data_paths()
train_data = InputData.from_csv(train_file_path)
test_data = InputData.from_csv(test_file_path)
return train_data, test_data
def get_simple_pipeline():
""" Function return simple pipeline with the following structure:
xgboost \
-> logit
knn |
"""
first = PrimaryNode(operation_type='xgboost')
second = PrimaryNode(operation_type='knn')
final = SecondaryNode(operation_type='logit',
nodes_from=[first, second])
pipeline = Pipeline(final)
return pipeline
def pipeline_tuning(pipeline: Pipeline, train_data: InputData,
test_data: InputData, local_iter: int,
tuner_iter_num: int = 30) -> (float, list):
""" Function for tuning pipeline with PipelineTuner
:param pipeline: pipeline to tune
:param train_data: InputData for train
:param test_data: InputData for validation
:param local_iter: amount of tuner launches
:param tuner_iter_num: amount of iterations, which tuner will perform
:return mean_metric: mean value of ROC AUC metric
:return several_iter_scores_test: list with metrics
"""
several_iter_scores_test = []
for iteration in range(local_iter):
print(f'current local iteration {iteration}')
# Pipeline tuning
pipeline_tuner = PipelineTuner(pipeline=pipeline,
task=train_data.task,
iterations=tuner_iter_num)
tuned_pipeline = pipeline_tuner.tune_pipeline(input_data=train_data,
loss_function=roc_auc)
# After tuning prediction
tuned_pipeline.fit(train_data)
after_tuning_predicted = tuned_pipeline.predict(test_data)
# Metrics
aft_tun_roc_auc = roc_auc(y_true=test_data.target,
y_score=after_tuning_predicted.predict)
several_iter_scores_test.append(aft_tun_roc_auc)
mean_metric = float(np.mean(several_iter_scores_test))
return mean_metric, several_iter_scores_test
if __name__ == '__main__':
train_data, test_data = get_case_train_test_data()
# Pipeline composition
pipeline = get_simple_pipeline()
# Before tuning prediction
pipeline.fit(train_data, use_fitted=False)
before_tuning_predicted = pipeline.predict(test_data)
bfr_tun_roc_auc = roc_auc(y_true=test_data.target,
y_score=before_tuning_predicted.predict)
local_iter = 5
# Pipeline tuning
after_tune_roc_auc, several_iter_scores_test = pipeline_tuning(pipeline=pipeline,
train_data=train_data,
test_data=test_data,
local_iter=local_iter)
print(f'Several test scores {several_iter_scores_test}')
print(f'Mean test score over {local_iter} iterations: {after_tune_roc_auc}')
print(round(bfr_tun_roc_auc, 3))
print(round(after_tune_roc_auc, 3))
|
kashgari/callbacks/__init__.py | SharpKoi/Kashgari | 2,422 | 11196841 | <reponame>SharpKoi/Kashgari
# encoding: utf-8
# author: BrikerMan
# contact: <EMAIL>
# blog: https://eliyar.biz
# file: __init__.py
# time: 8:08 下午
from kashgari.callbacks.eval_callBack import EvalCallBack
if __name__ == "__main__":
pass
|
examples/sort_by_distance.py | codingApprentice/daftlistings | 126 | 11196872 | <filename>examples/sort_by_distance.py
# Search properties according to criteria then sort by nearness to Dublin Castle
from daftlistings import Daft, SearchType
daft = Daft()
daft.set_location("Dublin City")
daft.set_search_type(SearchType.RESIDENTIAL_RENT)
daft.set_min_price(1000)
daft.set_max_price(1500)
listings = daft.search(max_pages=1)
dublin_castle_coords = [53.3429, -6.2674]
listings.sort(key=lambda x: x.distance_to(dublin_castle_coords))
for listing in listings:
print(f'{listing.title}')
print(f'{listing.daft_link}')
print(f'{listing.price}')
print(f'{listing.distance_to(dublin_castle_coords):.3}km')
print('')
|
static/scripts/setLogLevel.py | imShakil/community-edition-setup | 178 | 11196887 | #!/usr/bin/python
import getopt, sys, os, string, json, base64, sys, os.path
from ldif import LDIFParser
from getpass import getpass
ldapsearch_cmd = "/opt/opendj/bin/ldapsearch"
ldapmodify_cmd = "/opt/opendj/bin/ldapmodify"
fn_search = "config-search.ldif"
fn_mod = "config-mod.ldif"
network_args = "-h localhost -p 1636 -Z -X"
bind_args = '-D "cn=directory manager" -j %s'
scope = "-s base"
levels = ["TRACE", "DEBUG", "WARN", "INFO", "ERROR", "FATAL", "OFF"]
systems = {'oxauth': ('oxAuthConfDynamic', 'loggingLevel'),
'oxtrust': ('oxTrustConfApplication', 'loggingLevel'),
'fido2': ('gluuConfDynamic', 'loggingLevel'),
'oxpassport': ('gluuPassportConfiguration', ''),
'casa': ('oxConfApplication', 'log_level')
}
def usage():
print("""
REQUIRED
-l --loglevel= : TRACE, DEBUG, WARN, INFO, ERROR, FATAL, OFF
-s --system= : oxauth, oxtrust, fido2, oxpassport, and casa
OPTIONAL
-h --help
-F --force: Don't prompt to proceed
The program looks for the ldap directory manager password in /root/.pw. If it's
not there, it will prompt you for the password, write it to .pw and then delete
it after the program finishes.
""")
def writeFile(s, fn):
f = open(fn, 'w')
f.write(s)
f.close()
def main():
system = None
loglevel = None
forceToProceed = False
fn_pw = '/root/.pw'
del_pw = False
try:
opts, args = getopt.getopt(sys.argv[1:], "-hFl:s:", ["help", "force" "loglevel=", "system="])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
for o, a in opts:
if o in ("-l", "--loglevel"):
loglevel = a.upper()
if loglevel not in levels:
print("\nloglevel %s not recognized" % loglevel)
usage()
sys.exit(3)
elif o in ("-F", "--force"):
forceToProceed = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-s", "--system"):
system = a.lower()
if system not in systems.keys():
print("\nsystem %s not recognized" % system)
usage()
sys.exit(4)
else:
assert False, "unhandled option"
if (system == None):
print("\nMissing -s -- you must specify the system")
usage()
sys.exit(5)
if (loglevel == None):
print("\nMissing -l -- you must specify the loglevel")
usage()
sys.exit(6)
if not os.path.isfile(fn_pw):
del_pw = True
fn_pw = ".pw"
pw = getpass("Enter 'cn=directory manager' password: ")
writeFile(pw, fn_pw)
configDN = "ou=%s,ou=configuration,o=gluu" % system
base = "-b %s" % configDN
cmd = [ldapsearch_cmd,
network_args,
bind_args % fn_pw,
base,
scope,
"objectclass=*",
"> %s" % fn_search]
os.system(" ".join(cmd))
parser = LDIFParser(open(fn_search, 'rb'))
confLDAPAttr = systems[system][0]
confValue = None
for dn, entry in parser.parse():
confValue = json.loads(entry[confLDAPAttr][0])
if system != "oxpassport":
logKey = systems[system][1]
confValue[logKey] = loglevel
else:
confValue["conf"]["logging"]["level"] = loglevel.lower()
confValue = json.dumps(confValue)
message_bytes = confValue.encode("ascii")
confValue= base64.b64encode(message_bytes)
ldifMod = """dn: %s
changetype: modify
replace: %s
%s:: %s
""" % (configDN, confLDAPAttr, confLDAPAttr, repr(confValue)[2:-1])
writeFile(ldifMod, fn_mod)
proceed = 'y'
if not forceToProceed:
proceed = input("Proceed with update? [N|y] ")
if proceed.lower()[0] == "y":
cmd = [ldapmodify_cmd,
network_args,
bind_args % fn_pw,
"-f %s" % fn_mod]
os.system(" ".join(cmd))
os.remove(fn_mod)
os.remove(fn_search)
if del_pw:
os.remove(fn_pw)
if __name__ == "__main__":
main()
|
thirdparty/pymdownx/keys.py | goodboyl/- | 182 | 11196893 | <reponame>goodboyl/-
"""
Keys.
pymdownx.keys
Markdown extension for keystroke (user keyboard input) formatting.
It wraps the syntax `++key+key+key++` (for individual keystrokes with modifiers)
or `++"string"++` (for continuous keyboard input) into HTML `<kbd>` elements.
If a key is found in the extension's database, its `<kbd>` element gets a matching class.
Common synonyms are included, e.g. `++pg-up++` will match as `++page-up++`.
## Config
If `strict` is `True`, the entire series of keystrokes is wrapped into an outer`<kbd>` element, and then,
each keystroke is wrapped into a separate inner `<kbd>` element, which matches the HTML5 spec.
If `strict` is `False`, an outer `<span>` is used, which matches the practice on Github or StackOverflow.
The resulting `<kbd>` elements are separated by `separator` (`+` by default, can be `''` or something else).
If `camel_case` is `True`, `++PageUp++` will match the same as `++page-up++`.
The database can be extended or modified with the `key_map` dict.
## Examples
### Input
~~~
Press ++Shift+Alt+PgUp++, type in ++"Hello"++ and press ++Enter++.
~~~
### Config 1
~~~.yaml
pymdownx.keys:
camel_case: true
strict: false
separator: '+'
~~~
### Output 1
~~~.html
<p>Press <span class="keys"><kbd class="key-shift">Shift</kbd><span>+</span><kbd
class="key-alt">Alt</kbd><span>+</span><kbd class="key-page-up">Page Up</kbd></span>, type in <span
class="keys"><kbd>Hello</kbd></span> and press <span class="keys"><kbd class="key-enter">Enter</kbd></span>.</p>
```
### Config 2
~~~.yaml
pymdownx.keys:
camel_case: true
strict: true
separator: ''
~~~
### Output 2
~~~.html
<p>Press <kbd class="keys"><kbd class="key-shift">Shift</kbd><kbd class="key-alt">Alt</kbd><kbd
class="key-page-up">Page Up</kbd></kbd>, type in <kbd class="keys"><kbd>Hello</kbd></kbd> and press <kbd
class="keys"><kbd class="key-enter">Enter</kbd></kbd>.</p>
~~~
Idea by <NAME> and coded by <NAME>.
Copyright (c) 2017 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.inlinepatterns import Pattern
from markdown import util as md_util
from . import util
from . import keymap_db as keymap
import re
RE_KBD = r'''(?x)
(?:
# Escape
(?<!\\)(?P<escapes>(?:\\{2})+)(?=\+)|
# Key
(?<!\\)\+{2}
(
(?:(?:[\w\-]+|"(?:\\.|[^"])+"|\'(?:\\.|[^\'])+\')\+)*?
(?:[\w\-]+|"(?:\\.|[^"])+"|\'(?:\\.|[^\'])+\')
)
\+{2}
)
'''
ESCAPE_RE = re.compile(r'''(?<!\\)(?:\\\\)*\\(.)''')
UNESCAPED_PLUS = re.compile(r'''(?<!\\)(?:\\\\)*(\+)''')
ESCAPED_BSLASH = '%s%s%s' % (md_util.STX, ord('\\'), md_util.ETX)
DOUBLE_BSLASH = '\\\\'
class KeysPattern(Pattern):
"""Return kbd tag."""
def __init__(self, pattern, config, md):
"""Initialize."""
self.ksep = config['separator']
self.markdown = md
self.strict = config['strict']
self.classes = config['class'].split(' ')
self.html_parser = util.HTMLParser()
self.map = self.merge(keymap.keymap, config['key_map'])
self.aliases = keymap.aliases
self.camel = config['camel_case']
super(KeysPattern, self).__init__(pattern)
def merge(self, x, y):
"""Given two dicts, merge them into a new dict."""
z = x.copy()
z.update(y)
return z
def normalize(self, key):
"""Normalize the value."""
if not self.camel:
return key
norm_key = []
last = ''
for c in key:
if c.isupper():
if not last or last == '-':
norm_key.append(c.lower())
else:
norm_key.extend(['-', c.lower()])
else:
norm_key.append(c)
last = c
return ''.join(norm_key)
def process_key(self, key):
"""Process key."""
if key.startswith(('"', "'")):
value = (None, self.html_parser.unescape(ESCAPE_RE.sub(r'\1', key[1:-1])).strip())
else:
norm_key = self.normalize(key)
canonical_key = self.aliases.get(norm_key, norm_key)
name = self.map.get(canonical_key, None)
value = (canonical_key, name) if name else None
return value
def handleMatch(self, m):
"""Handle kbd pattern matches."""
if m.group(2):
return m.group('escapes').replace(DOUBLE_BSLASH, ESCAPED_BSLASH)
content = [self.process_key(key) for key in UNESCAPED_PLUS.split(m.group(3)) if key != '+']
if None in content:
return
el = md_util.etree.Element(
('kbd' if self.strict else 'span'),
({'class': ' '.join(self.classes)} if self.classes else {})
)
last = None
for item_class, item_name in content:
classes = []
if item_class:
classes.append('key-' + item_class)
if last is not None and self.ksep:
span = md_util.etree.SubElement(el, 'span')
span.text = md_util.AtomicString(self.ksep)
attr = {}
if classes:
attr['class'] = ' '.join(classes)
kbd = md_util.etree.SubElement(el, 'kbd', attr)
kbd.text = md_util.AtomicString(item_name)
last = kbd
return el
class KeysExtension(Extension):
"""Add `keys`` extension to Markdown class."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'separator': ['+', "Provide a keyboard separator - Default: \"+\""],
'strict': [False, "Format keys and menus according to HTML5 spec - Default: False"],
'class': ['keys', "Provide class(es) for the kbd elements - Default: \"keys\""],
'camel_case': [False, 'Allow camelCase conversion for key names PgDn -> pg-dn - Default: False'],
'key_map': [{}, 'Additional keys to include or keys to override - Default: {}']
}
super(KeysExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
"""Add support for keys."""
util.escape_chars(md, ['+'])
md.inlinePatterns.add("keys", KeysPattern(RE_KBD, self.getConfigs(), md), "<escape")
def makeExtension(*args, **kwargs):
"""Return extension."""
return KeysExtension(*args, **kwargs)
|
qutip/tests/test_brmesolve.py | camponogaraviera/qutip | 1,205 | 11196910 | <reponame>camponogaraviera/qutip<filename>qutip/tests/test_brmesolve.py
import numpy as np
import pytest
import qutip
def pauli_spin_operators():
return [qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()]
_simple_qubit_gamma = 0.25
_m_c_op = np.sqrt(_simple_qubit_gamma) * qutip.sigmam()
_z_c_op = np.sqrt(_simple_qubit_gamma) * qutip.sigmaz()
_x_a_op = [qutip.sigmax(), lambda w: _simple_qubit_gamma * (w >= 0)]
@pytest.mark.parametrize("me_c_ops, brme_c_ops, brme_a_ops", [
pytest.param([_m_c_op], [], [_x_a_op], id="me collapse-br coupling"),
pytest.param([_m_c_op], [_m_c_op], [], id="me collapse-br collapse"),
pytest.param([_m_c_op, _z_c_op], [_z_c_op], [_x_a_op],
id="me collapse-br collapse-br coupling"),
])
def test_simple_qubit_system(me_c_ops, brme_c_ops, brme_a_ops):
"""
Test that the BR solver handles collapse and coupling operators correctly
relative to the standard ME solver.
"""
delta = 0.0 * 2*np.pi
epsilon = 0.5 * 2*np.pi
e_ops = pauli_spin_operators()
H = delta*0.5*qutip.sigmax() + epsilon*0.5*qutip.sigmaz()
psi0 = (2*qutip.basis(2, 0) + qutip.basis(2, 1)).unit()
times = np.linspace(0, 10, 100)
me = qutip.mesolve(H, psi0, times, c_ops=me_c_ops, e_ops=e_ops).expect
brme = qutip.brmesolve(H, psi0, times,
brme_a_ops, e_ops, brme_c_ops).expect
for me_expectation, brme_expectation in zip(me, brme):
np.testing.assert_allclose(me_expectation, brme_expectation, atol=1e-2)
def _harmonic_oscillator_spectrum_frequency(n_th, w0, kappa):
if n_th == 0:
return lambda w: kappa * (w >= 0)
w_th = w0 / np.log(1 + 1/n_th)
def f(w):
scale = np.exp(w / w_th) if w < 0 else 1
return (n_th + 1) * kappa * scale
return f
def _harmonic_oscillator_c_ops(n_th, kappa, dimension):
a = qutip.destroy(dimension)
if n_th == 0:
return [np.sqrt(kappa) * a]
return [np.sqrt(kappa * (n_th+1)) * a,
np.sqrt(kappa * n_th) * a.dag()]
@pytest.mark.parametrize("n_th", [0, 0.15])
def test_harmonic_oscillator(n_th):
N = 10
w0 = 1.0 * 2*np.pi
g = 0.05 * w0
kappa = 0.15
S_w = _harmonic_oscillator_spectrum_frequency(n_th, w0, kappa)
a = qutip.destroy(N)
H = w0*a.dag()*a + g*(a+a.dag())
psi0 = (qutip.basis(N, 4) + qutip.basis(N, 2) + qutip.basis(N, 0)).unit()
psi0 = qutip.ket2dm(psi0)
times = np.linspace(0, 25, 1000)
c_ops = _harmonic_oscillator_c_ops(n_th, kappa, N)
a_ops = [[a + a.dag(), S_w]]
e_ops = [a.dag()*a, a+a.dag()]
me = qutip.mesolve(H, psi0, times, c_ops, e_ops)
brme = qutip.brmesolve(H, psi0, times, a_ops, e_ops)
for me_expectation, brme_expectation in zip(me.expect, brme.expect):
np.testing.assert_allclose(me_expectation, brme_expectation, atol=1e-2)
num = qutip.num(N)
me_num = qutip.expect(num, me.states)
brme_num = qutip.expect(num, brme.states)
np.testing.assert_allclose(me_num, brme_num, atol=1e-2)
def test_jaynes_cummings_zero_temperature():
"""
brmesolve: Jaynes-Cummings model, zero temperature
"""
N = 10
a = qutip.tensor(qutip.destroy(N), qutip.qeye(2))
sp = qutip.tensor(qutip.qeye(N), qutip.sigmap())
psi0 = qutip.ket2dm(qutip.tensor(qutip.basis(N, 1), qutip.basis(2, 0)))
a_ops = [[(a + a.dag()), lambda w: kappa * (w >= 0)]]
e_ops = [a.dag()*a, sp.dag()*sp]
w0 = 1.0 * 2*np.pi
g = 0.05 * 2*np.pi
kappa = 0.05
times = np.linspace(0, 2 * 2*np.pi / g, 1000)
c_ops = [np.sqrt(kappa) * a]
H = w0*a.dag()*a + w0*sp.dag()*sp + g*(a+a.dag())*(sp+sp.dag())
me = qutip.mesolve(H, psi0, times, c_ops, e_ops)
brme = qutip.brmesolve(H, psi0, times, a_ops, e_ops)
for me_expectation, brme_expectation in zip(me.expect, brme.expect):
# Accept 5% error.
np.testing.assert_allclose(me_expectation, brme_expectation, atol=5e-2)
def test_pull_572_error():
"""
brmesolve: Check for #572 bug.
"""
w1, w2, w3 = 1, 2, 3
gamma2, gamma3 = 0.1, 0.1
id2 = qutip.qeye(2)
# Hamiltonian for three uncoupled qubits
H = (w1/2. * qutip.tensor(qutip.sigmaz(), id2, id2)
+ w2/2. * qutip.tensor(id2, qutip.sigmaz(), id2)
+ w3/2. * qutip.tensor(id2, id2, qutip.sigmaz()))
# White noise
def S2(w):
return gamma2
def S3(w):
return gamma3
qubit_2_x = qutip.tensor(id2, qutip.sigmax(), id2)
qubit_3_x = qutip.tensor(id2, id2, qutip.sigmax())
# Bloch-Redfield tensor including dissipation for qubits 2 and 3 only
R, ekets = qutip.bloch_redfield_tensor(H,
[[qubit_2_x, S2], [qubit_3_x, S3]])
# Initial state : first qubit is excited
grnd2 = qutip.sigmam() * qutip.sigmap() # 2x2 ground
exc2 = qutip.sigmap() * qutip.sigmam() # 2x2 excited state
ini = qutip.tensor(exc2, grnd2, grnd2) # Full system
# Projector on the excited state of qubit 1
proj_up1 = qutip.tensor(exc2, id2, id2)
# Solution of the master equation
times = np.linspace(0, 10./gamma3, 1000)
sol = qutip.bloch_redfield_solve(R, ekets, ini, times, [proj_up1])
np.testing.assert_allclose(sol[0], np.ones_like(times))
def test_solver_accepts_list_hamiltonian():
"""
brmesolve: input list of Qobj
"""
delta = 0.0 * 2*np.pi
epsilon = 0.5 * 2*np.pi
gamma = 0.25
c_ops = [np.sqrt(gamma) * qutip.sigmam()]
e_ops = pauli_spin_operators()
H = [delta*0.5*qutip.sigmax(), epsilon*0.5*qutip.sigmaz()]
psi0 = (2*qutip.basis(2, 0) + qutip.basis(2, 1)).unit()
times = np.linspace(0, 10, 100)
me = qutip.mesolve(H, psi0, times, c_ops=c_ops, e_ops=e_ops).expect
brme = qutip.brmesolve(H, psi0, times, [], e_ops, c_ops).expect
for me_expectation, brme_expectation in zip(me, brme):
np.testing.assert_allclose(me_expectation, brme_expectation, atol=1e-8)
|
labs/02_backprop/solutions/worst_predictions.py | soufiomario/labs-Deep-learning | 1,398 | 11196922 | <gh_stars>1000+
test_losses = -np.sum(np.log(EPSILON + model.forward(X_test))
* one_hot(10, y_test), axis=1)
# Sort by ascending loss: best predictions first, worst
# at the end
ranked_by_loss = test_losses.argsort()
# Extract and display the top 5 worst predictions at
# the end:
worst_idx = ranked_by_loss[-5:]
print("test losses:", test_losses[worst_idx])
for idx in worst_idx:
plot_prediction(model, sample_idx=idx) |
bin/ssa-end-to-end-testing/modules/ssa_utils.py | pageinsec/security_content | 348 | 11196936 | import os
import json
import hashlib
#import urllib.request
from requests import get
import re
import logging
from modules.testing_utils import log
SSML_CWD = ".humvee"
HUMVEE_ARTIFACT_SEARCH = "https://repo.splunk.com/artifactory/api/search/artifact?name=humvee&repos=maven-splunk-local"
def get_latest_humvee_object():
#res = json.loads(urllib.request.urlopen(HUMVEE_ARTIFACT_SEARCH).read().decode('utf-8'))
res = get(HUMVEE_ARTIFACT_SEARCH).json()
for r in res['results']:
if re.match(r".*/latest/humvee-.*\.jar$", r['uri']):
#latest_humvee = json.loads(urllib.request.urlopen(r['uri']).read().decode('utf-8'))
latest_humvee = get(r['uri']).json()
return latest_humvee
return ""
def build_humvee(path):
if not os.path.exists(path):
os.mkdir(path)
latest_humvee_object = get_latest_humvee_object()
humvee_path = "%s/humvee.jar" % path
humvee_sha256 = ""
if os.path.exists(humvee_path):
with open(humvee_path, 'rb') as jar_fh:
humvee_sha256 = hashlib.sha256(jar_fh.read()).hexdigest()
log(logging.DEBUG, "Current local checksum of Humvee", detail=humvee_sha256)
if humvee_sha256 != latest_humvee_object['checksums']['sha256']:
log(logging.INFO, "Downloading Latest Humvee")
log(logging.DEBUG, "Humvee details", detail=latest_humvee_object)
#urllib.request.urlretrieve(latest_humvee_object['downloadUri'], humvee_path)
with open(humvee_path, 'wb') as f:
f.write(get(latest_humvee_object['downloadUri']).content)
else:
log(logging.DEBUG, "Already latest checksum %s" % humvee_sha256, detail=latest_humvee_object)
#def convert_to_ssa(detection):
# '''
# curl -H 'Content-type: text/yaml' -H 'Authorization: Bearer TOKEN'
# https://app-admin.playground.scp.splunk.com/secanalytics/ssa-tenant-management/v1alpha1/admin/detection-spl/research2
# --data-binary @detections/endpoint/ssa___first_time_seen_cmd_line.yml
# @param detection:
# @return:
# '''
|
src/samples/wx/hello_wx.py | risa2000/pyopenvr | 204 | 11196938 | #!/bin/env python
# file hello_wx.py
from openvr.gl_renderer import OpenVrGlRenderer
from openvr.color_cube_actor import ColorCubeActor
from openvr.glframework.wx_app import WxApp
"""
Minimal wxPython programming example which colored OpenGL cube scene that can be closed by pressing ESCAPE.
"""
if __name__ == "__main__":
# Show a blue OpenGL window
actor = ColorCubeActor()
renderer0 = OpenVrGlRenderer(actor, (800,600))
with WxApp(renderer0, "wx OpenVR color cube") as app:
app.run_loop()
|
tapas/utils/table_pruning.py | apurvak/tapas | 816 | 11196950 | <filename>tapas/utils/table_pruning.py<gh_stars>100-1000
# coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Defines the methods to use for table pruning."""
from typing import Any, Callable, Dict, List, Optional, Text
import dataclasses
from tapas.models import segmented_tensor
from tapas.models import tapas_classifier_model_utils as utils
from tapas.models.bert import modeling
from tapas.models.bert import table_bert
from tapas.protos import table_pruning_pb2
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
PRUNING_SCOPE = "pruning"
_SEQUENCE_OUTPUT_KEEP_PROB = 0.9
_Loss = table_pruning_pb2.Loss
_TablePruningModel = table_pruning_pb2.TablePruningModel
_Tapas = table_pruning_pb2.TAPAS
_AvgCosSimilarity = table_pruning_pb2.AvgCosSimilarity
_FirstTokens = table_pruning_pb2.FirstTokens
_HardSelection = _Loss.HardSelection
_Unsupervised = _Loss.Unsupervised
_Regularization = _Unsupervised.Regularization
@dataclasses.dataclass(frozen=True)
class Scores:
column_scores: Optional[tf.Tensor] = None
column_probs: Optional[tf.Tensor] = None
column_score_mask: Optional[tf.Tensor] = None
token_scores: Optional[tf.Tensor] = None
reduced_token_scores: Optional[tf.Tensor] = None
gather_op: Optional[Callable[[Dict[Text, tf.Tensor], tf.Tensor],
Dict[Text, tf.Tensor]]] = None
class TablePruningSelector:
"""Base class for table pruning."""
def __init__(self, config, max_num_tokens, max_num_columns):
self._config = config
self._max_num_columns = max_num_columns
self._max_num_tokens = max_num_tokens
# The default value of bert_init_checkpoint is None.
# It is called in tapas_classifier_model.
self.bert_init_checkpoint = None
def select_columns(self, mode, features):
return self._select_columns(mode, features)
def _select_columns(self, mode, features):
raise NotImplementedError()
def compute_loss(self, required_columns, column_scores,
column_score_mask,
token_scores):
del required_columns, column_scores, column_score_mask, token_scores
return None
def apply_hard_selection(self, mode, scores):
del mode
return scores
def compute_scores(self, mode, features):
del mode, features
return Scores()
def get_sequence_output(self):
return None
def get_pooled_output(self):
return None
def should_add_classification_loss(self):
return False
class NoTablePruning(TablePruningSelector):
"""Disables the use of table pruning."""
def __init__(self):
super(NoTablePruning, self).__init__(
config=None, max_num_columns=-1, max_num_tokens=-1)
self._model = None
def _select_columns(self, mode, features):
return None
class ModelPruningSelector(TablePruningSelector):
"""Runs an independent model to select columns."""
def _init_create_model_selector(self, config, max_num_tokens):
return create_loss_selector(config, max_num_tokens)
def __init__(self, config, max_num_columns, max_num_tokens):
super(ModelPruningSelector, self).__init__(
config=config,
max_num_columns=max_num_columns,
max_num_tokens=max_num_tokens)
# create the model selector that precise the type of learning:
# supervised or unsupervised.
self._model = self._init_create_model_selector(config, max_num_tokens)
self._mask_top_k = MaskTopK(max_num_tokens=self._max_num_tokens)
def compute_loss(self, required_columns, column_scores,
column_score_mask,
token_scores):
return self._model.compute_loss(required_columns, column_scores,
column_score_mask, token_scores)
def apply_hard_selection(self, mode, scores):
return self._model.apply_hard_selection(mode, scores)
def _gather_op(
self,
):
"""function used by the bert model to gather half of the seqence."""
def gather_top_k(features, scores):
input_mask = features["input_mask"]
# <int32>[batch_size, max_num_tokens, 1]
indexes = self._get_index_gather(scores=scores, input_mask=input_mask)
input_shape = modeling.get_shape_list(input_mask, expected_rank=2)
seq_len = input_shape[1]
reduced_features = {}
for k, v in features.items():
if v is not None:
v_shape = v.shape.as_list()
if len(v_shape) == 2 and v_shape[1] == seq_len:
# <float32>[batch_size, max_num_tokens]
reduced_features[k] = tf.gather_nd(
params=v, indices=indexes, batch_dims=1)
else:
reduced_features[k] = v
return reduced_features
return gather_top_k
def _get_index_gather(self, scores,
input_mask):
"""Computes the indexes of gather_op given a scores tensor."""
# <float32>[batch_size, seq_len]
scores_mask = self._mask_top_k.apply_hard_selection(
scores=scores) * tf.cast(
input_mask, dtype=tf.float32)
# <int32>[batch_size, max_num_tokens]
sorted_indexes = tf.math.top_k(scores_mask, self._max_num_tokens)[1]
# <int32>[batch_size, max_num_tokens, 1]
return tf.expand_dims(sorted_indexes, -1)
def _gather_scores(self, scores,
input_mask):
"""Gather the smaller tensor of scores."""
# <int32>[batch_size, max_num_tokens, 1]
indexes = self._get_index_gather(scores=scores, input_mask=input_mask)
# <float32>[batch_size, max_num_tokens]
return tf.gather_nd(indices=indexes, params=scores, batch_dims=1)
def compute_scores(self, mode, features):
"""Computes column_scores, column_probs, column_score_mask, token_scores."""
stats = self._computes_column_and_token_scores(mode, features)
hard_selection = self.apply_hard_selection(
mode=mode, scores=stats.token_scores)
input_mask = features["input_mask"]
token_scores = hard_selection * tf.cast(input_mask, dtype=tf.float32)
reduced_token_scores = self._gather_scores(
scores=token_scores, input_mask=input_mask)
return Scores(
column_scores=stats.column_scores,
column_probs=stats.column_probs,
column_score_mask=stats.column_score_mask,
token_scores=token_scores,
reduced_token_scores=reduced_token_scores,
gather_op=self._gather_op())
def _compute_columns_stats(self, column_scores,
column_ids):
"""Computes the scores probability and mask scores."""
# <float32>[batch_size, max_num_columns]
column_probs = tf.sigmoid(column_scores)
# <float32>[batch_size, max_num_columns]
column_score_mask = get_mask_columns_scores(
max_num_columns=self._max_num_columns,
column_ids=column_ids,
scores=column_probs)
column_scores = column_scores * column_score_mask
column_probs = column_probs * column_score_mask
return Scores(
column_scores=column_scores,
column_probs=column_probs,
column_score_mask=column_score_mask)
def _computes_column_and_token_scores(
self, mode, features):
"""Computes column_scores, column_probs, column_score_mask, token_scores."""
column_ids = features["column_ids"]
input_mask = features["input_mask"]
# <float32>[batch_size, max_num_columns]
column_scores = self.select_columns(mode, features)
column_stats = self._compute_columns_stats(column_scores, column_ids)
column_probs = column_stats.column_probs
# <float32>[batch_size, seq_length]
token_scores = get_token_scores_from_column_scores(
column_ids=column_ids,
column_probs=column_probs,
input_mask=input_mask,
max_num_columns=self._max_num_columns)
return Scores(
column_scores=column_stats.column_scores,
column_probs=column_probs,
column_score_mask=column_stats.column_score_mask,
token_scores=token_scores)
def should_add_classification_loss(self):
raise NotImplementedError()
class OnesTablePruning(ModelPruningSelector):
"""Empty class for selecting all columns."""
def _init_create_model_selector(self, config, max_num_tokens):
return MaskTopK(max_num_tokens=max_num_tokens)
def __init__(self, config, max_num_columns,
max_num_tokens):
super(OnesTablePruning, self).__init__(
config=config,
max_num_columns=max_num_columns,
max_num_tokens=max_num_tokens)
def _select_columns(self, mode, features):
del mode
input_shape = modeling.get_shape_list(
features["input_ids"], expected_rank=2)
batch_size = input_shape[0]
return tf.ones([batch_size, self._max_num_columns])
def apply_hard_selection(self, mode, scores):
# Selects only the first tokens.
return self._model.apply_hard_selection(scores)
def compute_loss(self, required_columns, column_scores,
column_score_mask,
token_scores):
return None
def should_add_classification_loss(self):
return False
class TapasPruningSelector(ModelPruningSelector):
"""Runs an independent Tapas model to select columns."""
def __init__(self, config, max_num_columns, max_num_rows,
max_num_tokens):
super(TapasPruningSelector, self).__init__(
config=config,
max_num_columns=max_num_columns,
max_num_tokens=max_num_tokens)
self._max_num_rows = max_num_rows
self._bert_config = modeling.BertConfig.from_json_file(
config.bert_config_file)
self.bert_init_checkpoint = config.bert_init_checkpoint
self._reset_position_index_per_cell = config.reset_position_index_per_cell
def _compute_column_scores_from_token_scores(
self, mode, output_layer,
features):
"""Gets the columns scores by avereging the tokens scores."""
with tf.variable_scope(PRUNING_SCOPE, reuse=tf.AUTO_REUSE):
if mode == tf.estimator.ModeKeys.TRAIN:
output_layer = tf.nn.dropout(
output_layer, keep_prob=_SEQUENCE_OUTPUT_KEEP_PROB)
input_mask = features["input_mask"]
row_ids = features["row_ids"]
column_ids = features["column_ids"]
# Construct indices for the table.
row_index = segmented_tensor.IndexMap(
indices=tf.minimum(row_ids, self._max_num_rows - 1),
num_segments=self._max_num_rows,
batch_dims=1)
col_index = segmented_tensor.IndexMap(
indices=tf.minimum(column_ids, self._max_num_columns),
num_segments=self._max_num_columns + 1,
batch_dims=1)
cell_index = segmented_tensor.ProductIndexMap(row_index, col_index)
# Masks.
# <float32>[batch_size, seq_length]
input_mask_float = tf.cast(input_mask, tf.float32)
# Mask for cells that exist in the table (i.e. that are not padding).
cell_mask, _ = segmented_tensor.reduce_mean(input_mask_float, cell_index)
# Compute logits per column which can be used to select a column.
# <float32>[batch_size, max_num_columns]
column_scores = utils.compute_column_logits(
output_layer=output_layer,
cell_index=cell_index,
cell_mask=cell_mask,
init_cell_selection_weights_to_zero=False,
allow_empty_column_selection=False)[:, 1:]
column_scores = tf.debugging.assert_all_finite(
column_scores, "column_scores contains nan values.")
return column_scores
def _select_columns(self, mode, features):
with tf.variable_scope(PRUNING_SCOPE, reuse=tf.AUTO_REUSE):
model = table_bert.create_model(
features=features,
mode=mode,
bert_config=self._bert_config,
reset_position_index_per_cell=self._reset_position_index_per_cell)
output_layer = model.get_sequence_output()
self._output_layer = model.get_sequence_output()
self._pooled_output = model.get_pooled_output()
column_scores = self._compute_column_scores_from_token_scores(
mode=mode, output_layer=output_layer, features=features)
return column_scores
def _compute_token_scores(self, mode,
features):
"""Computes the token probabilities using the pruning tapas outputlayer."""
with tf.variable_scope(PRUNING_SCOPE, reuse=tf.AUTO_REUSE):
model = table_bert.create_model(
features=features,
mode=mode,
bert_config=self._bert_config,
reset_position_index_per_cell=self._reset_position_index_per_cell)
output_layer = model.get_sequence_output()
self._output_layer = model.get_sequence_output()
self._pooled_output = model.get_pooled_output()
if mode == tf.estimator.ModeKeys.TRAIN:
output_layer = tf.nn.dropout(
output_layer, keep_prob=_SEQUENCE_OUTPUT_KEEP_PROB)
# No temperature is used.
token_logits = utils.compute_token_logits(
output_layer=output_layer,
temperature=1.0,
init_cell_selection_weights_to_zero=False)
token_logits = tf.debugging.assert_all_finite(
token_logits, "token_logits contains nan values.")
proba_tokens = tf.sigmoid(token_logits)
input_mask = features["input_mask"]
column_ids = features["column_ids"]
question_mask_proba_tokens = tf.where(
column_ids <= tf.zeros_like(column_ids), tf.ones_like(proba_tokens),
proba_tokens)
input_mask_proba_tokens = question_mask_proba_tokens * tf.cast(
input_mask, dtype=tf.float32)
return input_mask_proba_tokens
def _computes_column_and_token_scores(
self, mode, features):
if self._config.selection == _Tapas.Selection.COLUMNS:
return super(TapasPruningSelector,
self)._computes_column_and_token_scores(mode, features)
elif self._config.selection == _Tapas.Selection.TOKENS:
# <float32>[batch_size, seq_length]
token_scores = self._compute_token_scores(mode, features)
# <float32>[batch_size, max_num_columns]
column_scores = self._compute_column_scores_from_token_scores(
mode=mode, output_layer=self._output_layer, features=features)
column_ids = features["column_ids"]
columns_stats = self._compute_columns_stats(column_scores, column_ids)
return Scores(
column_scores=columns_stats.column_scores,
column_probs=columns_stats.column_probs,
column_score_mask=columns_stats.column_score_mask,
token_scores=token_scores)
else:
raise NotImplementedError(
f"Tapas.Selection not implemented {self._config.selection}")
def get_sequence_output(self):
return self._output_layer
def get_pooled_output(self):
return self._pooled_output
def should_add_classification_loss(self):
return self._model.should_add_classification_loss()
class AverageCosineSimilaritySelector(ModelPruningSelector):
"""Computes the cosine similarity between the average question and column token."""
def __init__(self, vocab_size, hidden_size,
initializer_range, max_num_columns,
type_vocab_size, disabled_features,
disable_position_embeddings, max_position_embeddings,
config, max_num_tokens):
super(AverageCosineSimilaritySelector, self).__init__(
config=config,
max_num_columns=max_num_columns,
max_num_tokens=max_num_tokens)
self._vocab_size = vocab_size
self._hidden_size = hidden_size
self._initializer_range = initializer_range
self._max_num_columns = max_num_columns
self._type_vocab_size = type_vocab_size
self._use_position_embeddings = not disable_position_embeddings
self._disabled_features = disabled_features
self._max_position_embeddings = max_position_embeddings
self._use_positional_embeddings = config.use_positional_embeddings
def _select_columns(self, mode, features):
input_mask = features["input_mask"]
column_ids = features["column_ids"]
with tf.variable_scope("bert"):
with tf.variable_scope("embeddings", reuse=tf.compat.v1.AUTO_REUSE):
input_embeddings, _ = modeling.embedding_lookup(
input_ids=features["input_ids"],
vocab_size=self._vocab_size,
embedding_size=self._hidden_size,
initializer_range=self._initializer_range,
word_embedding_name="word_embeddings")
if self._use_positional_embeddings:
token_type_ids = []
token_type_features = [
"segment_ids", "column_ids", "row_ids", "prev_label_ids",
"column_ranks", "inv_column_ranks", "numeric_relations"
]
for key in token_type_features:
if self._disabled_features is not None and key in self._disabled_features:
token_type_ids.append(tf.zeros_like(features[key]))
else:
token_type_ids.append(features[key])
input_embeddings = modeling.embedding_postprocessor(
input_tensor=input_embeddings,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=self._type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=self._use_position_embeddings,
position_embedding_name="position_embeddings",
initializer_range=self._initializer_range,
max_position_embeddings=self._max_position_embeddings,
extra_embeddings=None,
dropout_prob=0.0)
# Indexes all the zero values from the input_mask by (max_num_columns+1)
# The index 0 is for the question and from 1 to max_num_columns included
# is for the columns.
masked_col_ids = column_ids * input_mask + (1 - input_mask) * (
self._max_num_columns + 1)
col_index = segmented_tensor.IndexMap(
indices=masked_col_ids,
num_segments=self._max_num_columns + 2,
batch_dims=1)
average_embeddings, _ = segmented_tensor.reduce_mean(
input_embeddings, col_index)
# Removes the last index as it contains the avg of non selected values
average_embeddings = average_embeddings[:, :-1]
normalize_average_embeddings = tf.math.l2_normalize(
average_embeddings, axis=2)
questions_embeddings = normalize_average_embeddings[:, :1]
columns_embeddings = normalize_average_embeddings[:, 1:]
multiply = columns_embeddings * questions_embeddings
multiply = tf.where(
tf.is_nan(multiply), tf.zeros_like(multiply), multiply)
column_scores = tf.math.reduce_sum(
multiply, axis=-1, name="column_scores")
return column_scores
def should_add_classification_loss(self):
return False
def create_selector(
table_pruning_config_file,
vocab_size,
hidden_size,
initializer_range,
max_num_columns,
max_num_rows,
type_vocab_size,
disabled_features,
disable_position_embeddings,
max_position_embeddings,
):
"""Activates the scoring model according to table pruning config."""
if not table_pruning_config_file:
return NoTablePruning()
config = table_pruning_pb2.TablePruningModel()
with tf.gfile.Open(table_pruning_config_file) as input_file:
# ParseLines
config = text_format.ParseLines(input_file,
table_pruning_pb2.TablePruningModel())
model = config.WhichOneof("table_pruning_model")
max_num_tokens = config.max_num_tokens
if model == "avg_cos_similarity":
return AverageCosineSimilaritySelector(
vocab_size=vocab_size,
hidden_size=hidden_size,
initializer_range=initializer_range,
max_num_columns=max_num_columns,
type_vocab_size=type_vocab_size,
disabled_features=disabled_features,
disable_position_embeddings=disable_position_embeddings,
max_position_embeddings=max_position_embeddings,
config=config.avg_cos_similarity,
max_num_tokens=max_num_tokens)
elif model == "tapas":
return TapasPruningSelector(
config=config.tapas,
max_num_columns=max_num_columns,
max_num_tokens=max_num_tokens,
max_num_rows=max_num_rows)
elif model == "first_tokens":
return OnesTablePruning(
config=config.first_tokens,
max_num_columns=max_num_columns,
max_num_tokens=max_num_tokens)
else:
raise NotImplementedError(f"TablePruningModel not implemented {model}")
class LossSelector:
"""Base class for model selection supervised and unsupervised."""
def __init__(self, config, max_num_tokens):
self._config = config
self._train_hard_selection = create_hard_selection_selector(
config.train, max_num_tokens=max_num_tokens)
self._eval_hard_selection = create_hard_selection_selector(
config.eval, max_num_tokens=max_num_tokens)
self._should_add_classification_loss = config.add_classification_loss
def compute_loss(self, required_columns, column_scores,
column_score_mask,
token_scores):
raise NotImplementedError()
def apply_hard_selection(self, mode, scores):
if mode == tf.estimator.ModeKeys.TRAIN:
return self._train_hard_selection.apply_hard_selection(scores)
return self._eval_hard_selection.apply_hard_selection(scores)
def should_add_classification_loss(self):
return self._should_add_classification_loss
class Unsupervised(LossSelector):
"""Computes the unsupervised loss acconrding to the config."""
def __init__(self, config, max_num_tokens):
super(Unsupervised, self).__init__(
config=config, max_num_tokens=max_num_tokens)
self._regularization = config.unsupervised.regularization
def compute_loss(self, required_columns, column_scores,
column_score_mask,
token_scores):
if self._regularization == _Regularization.NONE:
return None
elif self._regularization == _Regularization.L1:
# token_scores: <float32>[batch_size, seq_length]
return tf.reduce_mean(token_scores)
elif self._regularization == _Regularization.L2:
# token_scores: <float32>[batch_size, seq_length]
return tf.reduce_mean(token_scores**2)
elif self._regularization == _Regularization.L1_L2:
# token_scores: <float32>[batch_size, seq_length]
batch_l1 = tf.reduce_mean(token_scores, axis=1)
return tf.reduce_mean(batch_l1 * batch_l1)
else:
raise NotImplementedError(
f"Unsupervised loss is not implemented {self._regularization}")
def create_loss_selector(config, max_num_tokens):
"""Creates the loss selector according to the config."""
config_loss = config.loss
loss = config_loss.WhichOneof("loss")
if loss == "unsupervised":
return Unsupervised(config_loss, max_num_tokens)
else:
raise NotImplementedError(f"LossSelector not implemented {loss}")
class HardSelection:
"""Defines the hard selection strategy used for train or for evaluation."""
def apply_hard_selection(self, scores):
raise NotImplementedError()
class NoHardSelection(HardSelection):
def apply_hard_selection(self, scores):
return scores
class MaskTopK(HardSelection):
"""Selects the topk tokens. Returns the mask when aplying selection."""
def __init__(self, max_num_tokens):
self._max_num_tokens = max_num_tokens
def apply_hard_selection(self, scores):
# <int32>[batch_size, seq_length]
# Using argsort(argsort(.)) gives the rank of the score.
sorted_indexes = tf.argsort(
tf.argsort(scores, direction="DESCENDING", axis=-1, stable=True))
# <float32>[batch_size, seq_length]
mask = tf.cast(
sorted_indexes < tf.ones_like(sorted_indexes) * self._max_num_tokens,
dtype=tf.float32)
return mask
class TopK(MaskTopK):
"""Selects the topk tokens scores.
Returns the scores when aplying selection.
"""
def apply_hard_selection(self, scores):
mask = super(TopK, self).apply_hard_selection(scores)
return mask * scores
def create_hard_selection_selector(config,
max_num_tokens):
if config.selection_fn == _HardSelection.SelectionFn.ALL:
return NoHardSelection()
elif config.selection_fn == _HardSelection.SelectionFn.TOP_K:
return TopK(max_num_tokens=max_num_tokens)
elif config.selection_fn == _HardSelection.SelectionFn.MASK_TOP_K:
return MaskTopK(max_num_tokens=max_num_tokens)
else:
raise NotImplementedError(
f"HardSelection not implemented {config.selection_fn}")
def get_mask_columns_scores(max_num_columns, column_ids,
scores):
"""Extracts the columns mask.
Contains 1 for the scored columns and 0 the other columns.
Args:
max_num_columns: float contains the maximum number of columns.
column_ids: <int32>[batch_size, seq_length] additional to the columns' ids
(1,to max_number_columns), the value 0 refers to question tokens and
padding.
scores: <float32>[batch_size, max_num_columns] contains the column scores.
Returns:
<float32>[batch_size, max_num_columns]: Extracts the columns mask scores.
Using the input mask and the column_ids recovers the columns mask scores.
"""
# <float32>[batch_size, max_num_columns]
ranges = tf.ones_like(scores) * tf.cast(
tf.range(max_num_columns), dtype=tf.float32)
# <float32>[batch_size, max_num_columns]
max_num_columns_ids = tf.expand_dims(
tf.cast(tf.reduce_max(column_ids, axis=1), dtype=tf.float32), -1)
# <bool>[batch_size, max_num_columns]
# max_num_columns_ids = max_num_columns + 1 as the value 0 refers to the
# padding and to the questions tokens.
# That follows selecting ranges < max_num_columns_ids.
condition = tf.math.less(ranges, max_num_columns_ids)
# <float32>[batch_size, max_num_columns]
column_score_mask = tf.cast(condition, dtype=tf.float32)
return column_score_mask
def get_token_scores_from_column_scores(
column_ids,
column_probs,
input_mask,
max_num_columns,
):
"""Given the columns scores in [0,1] extracts the tokens scores.
It also gives a score of 1.0 for the question's tokens and padding.
Args:
column_ids: <int32>[batch_size, seq_length] additional to the columns' ids
[1, max_num_columns] the value 0 refers to question tokens and padding.
column_probs: <float32>[batch_size, max_column_id]: contains only the
columns' scores: question score or padding not included. The expected
values are in [0,1].
input_mask: <float32>[batch_size, seq_length] used to zero-out the padding.
max_num_columns: the maximum number of columns.
Returns:
<float32>[batch_size, seq_length]: The tokens' scores.
"""
col_index = segmented_tensor.IndexMap(
indices=column_ids, num_segments=max_num_columns + 1, batch_dims=1)
# <float32>[batch size, max_num_columns+1]: it contains the question at pos 0.
# The scores for the question and padding is 1.
padded_column_scores = tf.pad(
column_probs, paddings=[[0, 0], [1, 0]], constant_values=1.0)
# <float32>[batch_size, seq_length]
return segmented_tensor.gather(
index=col_index, values=padded_column_scores) * tf.cast(
input_mask, dtype=tf.float32)
def get_table_pruning_loss(
table_selector,
table_selector_output,
do_model_aggregation,
do_model_classification,
initial_features,
is_training,
config,
classification_fun):
"""Returns the table pruning loss if applicable."""
table_pruning_loss = None
if (table_selector.should_add_classification_loss() and
table_selector.get_sequence_output() is not None):
init_row_ids = initial_features["row_ids"]
init_table_mask = tf.cast(init_row_ids > 0, dtype=tf.int32)
init_aggregation_function_id = (
tf.squeeze(initial_features["aggregation_function_id"], axis=[1])
if do_model_aggregation else None)
init_classification_class_index = (
tf.squeeze(initial_features["classification_class_index"], axis=[1])
if do_model_classification else None)
init_answer, init_numeric_values, init_numeric_values_scale = (
utils.extract_answer_from_features(
features=initial_features,
use_answer_as_supervision=config.use_answer_as_supervision))
with tf.variable_scope(PRUNING_SCOPE, reuse=tf.AUTO_REUSE):
outputs_pruning = classification_fun(
config=config,
output_layer=table_selector.get_sequence_output(),
output_layer_aggregation=table_selector.get_pooled_output(),
label_ids=initial_features["label_ids"],
input_mask=initial_features["input_mask"],
table_mask=init_table_mask,
aggregation_function_id=init_aggregation_function_id,
answer=init_answer,
numeric_values=init_numeric_values,
numeric_values_scale=init_numeric_values_scale,
is_training=is_training,
row_ids=init_row_ids,
column_ids=initial_features["column_ids"],
classification_class_index=init_classification_class_index)
if table_pruning_loss is not None:
table_pruning_loss += outputs_pruning.total_loss
else:
table_pruning_loss = outputs_pruning.total_loss
return table_pruning_loss
|
ufora/distributed/S3/ActualS3Interface.py | ufora/ufora | 571 | 11196970 | <filename>ufora/distributed/S3/ActualS3Interface.py<gh_stars>100-1000
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ufora.distributed.S3.S3Interface as S3Interface
import datetime
import boto
import boto.utils
import logging
import os
import traceback
import StringIO
BUFFER_SIZE_OVERRIDE = 256 * 1024 * 1024
class BotoKeyFileObject(object):
def __init__(self, key):
self.key = key
self.bytesRead = 0
self.totalBytes = key.size
def __enter__(self):
return self
def __exit__(self, *args):
pass
def read(self, bytes):
if bytes <= 0:
return ""
data = self.key.read(bytes)
self.bytesRead += len(data)
while bytes > 0 and data == "" and self.bytesRead < self.totalBytes:
data = self.key.read(bytes)
self.bytesRead += len(data)
return data
def next(self):
if self.bytesRead == self.totalBytes:
raise StopIteration()
return self.read(BUFFER_SIZE_OVERRIDE)
def tell(self):
return self.bytesRead
def seek(self):
assert False, "Not implemented"
def write(self, *args):
assert False, "BotoKeyFileObjects are not writeable"
class ActualS3InterfaceFactory(S3Interface.S3InterfaceFactory):
def __call__(self, awsAccessKey='', awsSecretKey=''):
return ActualS3Interface((awsAccessKey, awsSecretKey))
def withMachine(self, machine):
return self
def parseS3Timestamp(timestamp):
diff_from_epoch = boto.utils.parse_ts(timestamp) - datetime.datetime.utcfromtimestamp(0)
return diff_from_epoch.total_seconds()
class ActualS3Interface(S3Interface.S3Interface):
"""implements a model for Amazon's S3 service using boto"""
def __init__(self, credentials):
self.credentials_ = credentials
def connectS3(self):
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
# override the default super-long timeout in boto.
# boto automatically retries timed out requests so it's best to keep a
# short timeout because S3 can sometimes (about 1 in 10 requests) stall
# for a long time.
boto.config.set('Boto', 'http_socket_timeout', '5')
boto.config.set('Boto', 'metadata_service_num_attempts', '10')
az = os.getenv('AWS_AVAILABILITY_ZONE')
boto_args = {}
if self.credentials_ != ('', ''):
boto_args = {
'aws_access_key_id': self.credentials_[0],
'aws_secret_access_key': self.credentials_[1]
}
if az:
return boto.s3.connect_to_region(az[:-1], **boto_args)
else:
return boto.connect_s3(**boto_args)
def initiateMultipartUpload(self, bucketName, eventualKeyName):
b = self.openOrCreateBucket_(bucketName)
return str(b.initiate_multipart_upload(eventualKeyName).id)
def completeMultipartUpload(self, bucketName, eventualKeyName, uploadId):
"""Complete a multipart upload"""
b = self.openOrCreateBucket_(bucketName)
mp = boto.s3.multipart.MultiPartUpload(b)
mp.key_name = eventualKeyName
mp.id = uploadId
mp.complete_upload()
def setMultipartUploadPart(self, bucketName, eventualKeyName, uploadId, oneBasedPartNumber, value):
"""Perform a portion of a multipart upload"""
stringAsFile = StringIO.StringIO(value)
b = self.openOrCreateBucket_(bucketName)
mp = boto.s3.multipart.MultiPartUpload(b)
mp.key_name = eventualKeyName
mp.id = uploadId
mp.upload_part_from_file(stringAsFile, oneBasedPartNumber)
def close(self):
pass
def listBuckets(self):
"""return a list of bucket names available in s3"""
s3 = self.connectS3()
return [str(bucket.name) for bucket in s3.get_all_buckets()]
def listKeysAndSizes(self, bucketName):
"""return a list of (keyname,keysize) tuples in a bucket"""
return self.listKeysWithPrefix(bucketName, None)
def listKeysWithPrefix(self, bucketName, prefix):
"""return a list of (keyname,keysize) tuples in a bucket"""
options = {} if prefix is None else {"prefix": prefix}
bucket = self.openBucket_(bucketName)
return [(str(key.name), key.size, parseS3Timestamp(key.last_modified))
for key in bucket.get_all_keys(**options)]
def getKeyValue(self, bucketName, keyName):
"""return the value of a key. raises KeyNotFound if it doesn't exist."""
key = self.openKey_(bucketName, keyName)
return key.get_contents_as_string()
def getKeyValueOverRange(self, bucketName, keyName, lowIndex, highIndex):
key = self.openKey_(bucketName, keyName)
return key.get_contents_as_string(
headers={"Range": "bytes=%s-%s" % (lowIndex, highIndex - 1)}
)
def getKeySize(self, bucketName, keyName):
return self.openKey_(bucketName, keyName).size
def openKey_(self, bucketName, keyName, overrides=None):
"""return a boto key object. raises KeyNotFound if it doesn't exist."""
# we don't verify access to the bucket because it's possible that
# we have acess to read the key but not the bucket
bucket = self.openBucket_(bucketName, verifyAccess=False)
key = bucket.get_key(keyName)
if key is None:
raise S3Interface.KeyNotFound(bucketName, keyName)
key.BufferSize = BUFFER_SIZE_OVERRIDE
return key
def tryOpenBucket_(self, bucketName, verifyAccess=True):
s3 = self.connectS3()
try:
bucket = s3.get_bucket(bucketName, validate=verifyAccess)
return bucket
except:
return None
def openBucket_(self, bucketName, verifyAccess=True):
bucket = self.tryOpenBucket_(bucketName, verifyAccess)
if bucket is None:
raise S3Interface.BucketNotFound(bucketName)
return bucket
def keyExists(self, bucketName, keyName):
"""Returns a bool indicating whether the key exists"""
# we don't verify access to the bucket because it's possible that
# we have acess to read the key but not the bucket
bucket = self.tryOpenBucket_(bucketName, verifyAccess=False)
if bucket is None:
return False
key = bucket.get_key(keyName)
return key is not None
def deleteKey(self, bucketName, keyName):
# we don't verify access to the bucket because it's possible that
# we have acess to read the key but not the bucket
bucket = self.tryOpenBucket_(bucketName, verifyAccess=False)
if bucket is None:
return False
key = bucket.get_key(keyName)
if key is None:
raise S3Interface.KeyNotFound(bucketName, keyName)
key.delete()
def bucketExists(self, bucketName):
"""Returns a bool indicating whether the bucket exists"""
bucket = self.tryOpenBucket_(bucketName)
return bucket is not None
def setKeyValue(self, bucketName, keyName, value):
"""sets key 'keyName' in bucket 'bucketName' to value.
creates the key and the bucket if they don't exist.
"""
bucket = self.openOrCreateBucket_(bucketName)
k = self.openOrCreateKey_(bucket, keyName)
k.set_contents_from_string(value)
def setKeyValueFromFile(self, bucketName, keyName, filePath):
bucket = self.openOrCreateBucket_(bucketName)
k = self.openOrCreateKey_(bucket, keyName)
k.set_contents_from_filename(filePath)
def openOrCreateBucket_(self, bucketName):
s3 = self.connectS3()
bucket = None
attempts = 0
while bucket is None:
bucket = s3.get_bucket(bucketName)
if bucket is None:
try:
bucket = s3.create_bucket(bucketName)
except:
attempts += 1
if attempts > 3:
logging.error("Failed to create a bucket. Giving up.:\n%s",
traceback.format_exc())
raise
logging.warn("error creating a bucket - trying again:\n %s",
traceback.format_exc())
return bucket
def openOrCreateKey_(self, bucket, keyName):
k = bucket.get_key(keyName)
if k is None:
k = bucket.new_key(keyName)
if k is None:
logging.warn("couldn't get amazon S3 bucket '%s'", bucket.name)
raise S3Interface.UnableToWriteKey(bucket.name, keyName)
return k
|
var/spack/repos/builtin/packages/xtrans/package.py | LiamBindle/spack | 2,360 | 11196980 | <gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xtrans(AutotoolsPackage, XorgPackage):
"""xtrans is a library of code that is shared among various X packages to
handle network protocol transport in a modular fashion, allowing a
single place to add new transport types. It is used by the X server,
libX11, libICE, the X font server, and related components."""
homepage = "https://cgit.freedesktop.org/xorg/lib/libxtrans"
xorg_mirror_path = "lib/xtrans-1.3.5.tar.gz"
version('1.3.5', sha256='b7a577c1b6c75030145e53b4793db9c88f9359ac49e7d771d4385d21b3e5945d')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
examples/oneshot.py | wcastello/splunk-sdk-python | 495 | 11196983 | #!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A command line utility for executing oneshot Splunk searches."""
from __future__ import absolute_import
from pprint import pprint
import socket
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
from splunklib.client import connect
import splunklib.results as results
try:
import utils
except ImportError:
raise Exception("Add the SDK repository to your PYTHONPATH to run the examples "
"(e.g., export PYTHONPATH=~/splunk-sdk-python.")
def pretty(response):
reader = results.ResultsReader(response)
for result in reader:
if isinstance(result, dict):
pprint(result)
def main():
usage = "usage: oneshot.py <search>"
opts = utils.parse(sys.argv[1:], {}, ".splunkrc", usage=usage)
if len(opts.args) != 1:
utils.error("Search expression required", 2)
search = opts.args[0]
service = connect(**opts.kwargs)
socket.setdefaulttimeout(None)
response = service.jobs.oneshot(search)
pretty(response)
if __name__ == "__main__":
main()
|
sparklingpandas/utils.py | michalmonselise/sparklingpandas | 245 | 11197002 | """
Simple common utils shared between the sparklingpandas modules
"""
import sys
import os
import logging
from glob import glob
def add_pyspark_path():
"""Add PySpark to the library path based on the value of SPARK_HOME. """
try:
spark_home = os.environ['SPARK_HOME']
sys.path.append(os.path.join(spark_home, 'python'))
py4j_src_zip = glob(os.path.join(spark_home, 'python',
'lib', 'py4j-*-src.zip'))
if len(py4j_src_zip) == 0:
raise ValueError('py4j source archive not found in %s'
% os.path.join(spark_home, 'python', 'lib'))
else:
py4j_src_zip = sorted(py4j_src_zip)[::-1]
sys.path.append(py4j_src_zip[0])
except KeyError:
logging.error("""SPARK_HOME was not set. please set it. e.g.
SPARK_HOME='/home/...' ./bin/pyspark [program]""")
exit(-1)
except ValueError as e:
logging.error(str(e))
exit(-1)
|
tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial1_Solution_51988471.py | amita-kapoor/course-content-dl | 473 | 11197024 | <filename>tutorials/W1D3_MultiLayerPerceptrons/solutions/W1D3_Tutorial1_Solution_51988471.py
"""
Nope, since we did not use any layers that have different behaviours during test and train.
But these layers are very common so it's a good practice to always do it!
"""; |
envs/video_wrapper.py | tomasruizt/dads | 141 | 11197039 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import gym
from gym import Wrapper
from gym.wrappers.monitoring import video_recorder
class VideoWrapper(Wrapper):
def __init__(self, env, base_path, base_name=None, new_video_every_reset=False):
super(VideoWrapper, self).__init__(env)
self._base_path = base_path
self._base_name = base_name
self._new_video_every_reset = new_video_every_reset
if self._new_video_every_reset:
self._counter = 0
self._recorder = None
else:
if self._base_name is not None:
self._vid_name = os.path.join(self._base_path, self._base_name)
else:
self._vid_name = self._base_path
self._recorder = video_recorder.VideoRecorder(self.env, path=self._vid_name + '.mp4')
def reset(self):
if self._new_video_every_reset:
if self._recorder is not None:
self._recorder.close()
self._counter += 1
if self._base_name is not None:
self._vid_name = os.path.join(self._base_path, self._base_name + '_' + str(self._counter))
else:
self._vid_name = self._base_path + '_' + str(self._counter)
self._recorder = video_recorder.VideoRecorder(self.env, path=self._vid_name + '.mp4')
return self.env.reset()
def step(self, action):
self._recorder.capture_frame()
return self.env.step(action)
def close(self):
self._recorder.encoder.proc.stdin.flush()
self._recorder.close()
return self.env.close() |
tests/framework/cli/hooks/test_manager.py | daniel-falk/kedro | 2,047 | 11197067 | import pytest
from kedro.framework.cli.hooks.manager import CLIHooksManager
from kedro.framework.cli.hooks.specs import CLICommandSpecs
@pytest.mark.parametrize(
"hook_specs,hook_name,hook_params",
[(CLICommandSpecs, "before_command_run", ("project_metadata", "command_args"))],
)
def test_hook_manager_can_call_hooks_defined_in_specs(
hook_specs, hook_name, hook_params
):
"""Tests to make sure that the hook manager can call all hooks defined by specs."""
cli_hook_manager = CLIHooksManager()
hook = getattr(cli_hook_manager.hook, hook_name)
assert hook.spec.namespace == hook_specs
kwargs = {param: None for param in hook_params}
result = hook(**kwargs)
# since there hasn't been any hook implementation, the result should be empty
# but it shouldn't have raised
assert result == []
|
tests/template_tests/filter_tests/test_addslashes.py | Fak3/django | 5,079 | 11197081 | <gh_stars>1000+
from django.template.defaultfilters import addslashes
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class AddslashesTests(SimpleTestCase):
@setup({'addslashes01': '{% autoescape off %}{{ a|addslashes }} {{ b|addslashes }}{% endautoescape %}'})
def test_addslashes01(self):
output = self.engine.render_to_string('addslashes01', {"a": "<a>'", "b": mark_safe("<a>'")})
self.assertEqual(output, r"<a>\' <a>\'")
@setup({'addslashes02': '{{ a|addslashes }} {{ b|addslashes }}'})
def test_addslashes02(self):
output = self.engine.render_to_string('addslashes02', {"a": "<a>'", "b": mark_safe("<a>'")})
self.assertEqual(output, r"<a>\' <a>\'")
class FunctionTests(SimpleTestCase):
def test_quotes(self):
self.assertEqual(
addslashes('"double quotes" and \'single quotes\''),
'\\"double quotes\\" and \\\'single quotes\\\'',
)
def test_backslashes(self):
self.assertEqual(addslashes(r'\ : backslashes, too'), '\\\\ : backslashes, too')
def test_non_string_input(self):
self.assertEqual(addslashes(123), '123')
|
llvm/bindings/python/llvm/common.py | medismailben/llvm-project | 2,338 | 11197084 | <gh_stars>1000+
#===- common.py - Python LLVM Bindings -----------------------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
from ctypes import POINTER
from ctypes import c_void_p
from ctypes import cdll
import ctypes.util
import platform
# LLVM_VERSION: sync with PACKAGE_VERSION in CMakeLists.txt
# but leave out the 'svn' suffix.
LLVM_VERSION = '10.0.0'
__all__ = [
'c_object_p',
'get_library',
]
c_object_p = POINTER(c_void_p)
class LLVMObject(object):
"""Base class for objects that are backed by an LLVM data structure.
This class should never be instantiated outside of this package.
"""
def __init__(self, ptr, ownable=True, disposer=None):
assert isinstance(ptr, c_object_p)
self._ptr = self._as_parameter_ = ptr
self._self_owned = True
self._ownable = ownable
self._disposer = disposer
self._owned_objects = []
def take_ownership(self, obj):
"""Take ownership of another object.
When you take ownership of another object, you are responsible for
destroying that object. In addition, a reference to that object is
placed inside this object so the Python garbage collector will not
collect the object while it is still alive in libLLVM.
This method should likely only be called from within modules inside
this package.
"""
assert isinstance(obj, LLVMObject)
self._owned_objects.append(obj)
obj._self_owned = False
def from_param(self):
"""ctypes function that converts this object to a function parameter."""
return self._as_parameter_
def __del__(self):
if not hasattr(self, '_self_owned') or not hasattr(self, '_disposer'):
return
if self._self_owned and self._disposer:
self._disposer(self)
class CachedProperty(object):
"""Decorator that caches the result of a property lookup.
This is a useful replacement for @property. It is recommended to use this
decorator on properties that invoke C API calls for which the result of the
call will be idempotent.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except: # pragma: no cover
pass
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value
def get_library():
"""Obtain a reference to the llvm library."""
# On Linux, ctypes.cdll.LoadLibrary() respects LD_LIBRARY_PATH
# while ctypes.util.find_library() doesn't.
# See http://docs.python.org/2/library/ctypes.html#finding-shared-libraries
#
# To make it possible to run the unit tests without installing the LLVM shared
# library into a default linker search path. Always Try ctypes.cdll.LoadLibrary()
# with all possible library names first, then try ctypes.util.find_library().
names = ['LLVM-' + LLVM_VERSION, 'LLVM-' + LLVM_VERSION + 'svn']
t = platform.system()
if t == 'Darwin':
pfx, ext = 'lib', '.dylib'
elif t == 'Windows':
pfx, ext = '', '.dll'
else:
pfx, ext = 'lib', '.so'
for i in names:
try:
lib = cdll.LoadLibrary(pfx + i + ext)
except OSError:
pass
else:
return lib
for i in names:
t = ctypes.util.find_library(i)
if t:
return cdll.LoadLibrary(t)
raise Exception('LLVM shared library not found!')
|
libraries/botframework-connector/botframework/connector/auth/channel_provider.py | Fl4v/botbuilder-python | 388 | 11197101 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import ABC, abstractmethod
class ChannelProvider(ABC):
"""
ChannelProvider interface. This interface allows Bots to provide their own
implementation for the configuration parameters to connect to a Bot.
Framework channel service.
"""
@abstractmethod
async def get_channel_service(self) -> str:
raise NotImplementedError()
@abstractmethod
def is_government(self) -> bool:
raise NotImplementedError()
@abstractmethod
def is_public_azure(self) -> bool:
raise NotImplementedError()
|
traces/belgium/plot_log_bandwidth.py | matvaibhav/pensieve | 462 | 11197117 | import numpy as np
import matplotlib.pyplot as plt
PACKET_SIZE = 1500.0 # bytes
TIME_INTERVAL = 5.0
BITS_IN_BYTE = 8.0
MBITS_IN_BITS = 1000000.0
MILLISECONDS_IN_SECONDS = 1000.0
N = 100
LINK_FILE = './logs/report_bus_0010.log'
time_ms = []
bytes_recv = []
recv_time = []
with open(LINK_FILE, 'rb') as f:
for line in f:
parse = line.split()
time_ms.append(int(parse[1]))
bytes_recv.append(float(parse[4]))
recv_time.append(float(parse[5]))
time_ms = np.array(time_ms)
bytes_recv = np.array(bytes_recv)
recv_time = np.array(recv_time)
throughput_all = bytes_recv / recv_time
time_ms = time_ms - time_ms[0]
time_ms = time_ms / MILLISECONDS_IN_SECONDS
throughput_all = throughput_all * BITS_IN_BYTE / MBITS_IN_BITS * MILLISECONDS_IN_SECONDS
plt.plot(time_ms, throughput_all)
plt.xlabel('Time (second)')
plt.ylabel('Throughput (Mbit/sec)')
plt.show()
|
facedancer/backends/greatdancer.py | hugmyndakassi/Facedancer | 345 | 11197119 | <reponame>hugmyndakassi/Facedancer<filename>facedancer/backends/greatdancer.py
# GreatDancerApp.py
import sys
import time
import codecs
import logging
import traceback
from ..core import *
from ..USB import *
from ..USBEndpoint import USBEndpoint
# FIXME: abstract this to the logging library
LOGLEVEL_TRACE = 5
class GreatDancerApp(FacedancerApp):
"""
Backend for using GreatFET devices as FaceDancers.
"""
app_name = "GreatDancer"
app_num = 0x00 # This doesn't have any meaning for us.
# Interrupt register (USBSTS) bits masks.
USBSTS_D_UI = (1 << 0)
USBSTS_D_URI = (1 << 6)
USBSTS_D_NAKI = (1 << 16)
# Number of supported USB endpoints.
# TODO: bump this up when we develop support using USB0 (cables flipped)
SUPPORTED_ENDPOINTS = 4
# USB directions
HOST_TO_DEVICE = 0
DEVICE_TO_HOST = 1
# Get status command indexes
GET_USBSTS = 0
GET_ENDPTSETUPSTAT = 1
GET_ENDPTCOMPLETE = 2
GET_ENDPTSTATUS = 3
GET_ENDPTNAK = 4
# Quirk flags
QUIRK_MANUAL_SET_ADDRESS = 0x01
@classmethod
def appropriate_for_environment(cls, backend_name):
"""
Determines if the current environment seems appropriate
for using the GreatDancer backend.
"""
# Check: if we have a backend name other than greatfet,
# the user is trying to use something else. Abort!
if backend_name and backend_name != "greatfet":
return False
# If we're not explicitly trying to use something else,
# see if there's a connected GreatFET.
try:
import greatfet
gf = greatfet.GreatFET()
return gf.supports_api('greatdancer')
except ImportError:
logging.info("Skipping GreatFET-based devices, as the greatfet python module isn't installed.")
return False
except:
return False
def __init__(self, device=None, verbose=0, quirks=None):
"""
Sets up a new GreatFET-backed Facedancer (GreatDancer) application.
device: The GreatFET device that will act as our GreatDancer.
verbose: The verbosity level of the given application.
"""
import greatfet
if device is None:
device = greatfet.GreatFET()
self.device = device
self.device.comms.get_exclusive_access()
FacedancerApp.__init__(self, device, verbose)
self.connected_device = None
# Grab the raw API object from the GreatFET object.
# This has the low-level RPCs used for raw USB control.
self.api = self.device.apis.greatdancer
# Initialize a dictionary that will store the last setup
# whether each endpoint is currently stalled.
self.endpoint_stalled = {}
for i in range(self.SUPPORTED_ENDPOINTS):
self.endpoint_stalled[i] = False
# Assume a max packet size of 64 until configured otherwise.
self.max_ep0_packet_size = 64
# Start off by assuming we're not waiting for an OUT control transfer's
# data stage. # See _handle_setup_complete_on_endpoint for details.
self.pending_control_request = None
# Store a reference to the device's active configuration,
# which we'll use to know which endpoints we'll need to check
# for data transfer readiness.
self.configuration = None
#
# Store our list of quirks to handle.
#
if quirks:
self.quirks = quirks
else:
self.quirks = []
def init_commands(self):
"""
API compatibility function; not necessary for GreatDancer.
"""
pass
def get_version(self):
"""
Returns information about the active GreatDancer version.
"""
# TODO: Return the GreatFET software version, or something indicating
# the GreatFET API number?
raise NotImplementedError()
def ack_status_stage(self, direction=HOST_TO_DEVICE, endpoint_number=0, blocking=False):
"""
Handles the status stage of a correctly completed control request,
by priming the appropriate endpoint to handle the status phase.
direction: Determines if we're ACK'ing an IN or OUT vendor request.
(This should match the direction of the DATA stage.)
endpoint_number: The endpoint number on which the control request
occurred.
blocking: True if we should wait for the ACK to be fully issued
before returning.
"""
if direction == self.HOST_TO_DEVICE:
# If this was an OUT request, we'll prime the output buffer to
# respond with the ZLP expected during the status stage.
self.send_on_endpoint(endpoint_number, data=[], blocking=blocking)
else:
# If this was an IN request, we'll need to set up a transfer descriptor
# so the status phase can operate correctly. This effectively reads the
# zero length packet from the STATUS phase.
self.read_from_endpoint(endpoint_number)
def _generate_endpoint_config_arguments(self, config):
"""
Generates the data content for an Endpoint Configuration command that will
set up the GreatDancer's endpoints to match the active configuration.
config: A USBConfiguration object that represents the configuration being
applied to the GreatDancer.
"""
arguments = []
# If our configuration is None, there's nothing to configure; bail out.
if config is None:
return arguments
for interface in config.get_interfaces():
for endpoint in interface.get_endpoints():
logging.info(f"Configuring {endpoint}.")
triple = (endpoint.get_address(), endpoint.max_packet_size, endpoint.transfer_type,)
arguments.append(triple)
return arguments
def connect(self, usb_device, max_ep0_packet_size=64):
"""
Prepares the GreatDancer to connect to the target host and emulate
a given device.
usb_device: The USBDevice object that represents the device to be
emulated.
"""
self.max_ep0_packet_size = max_ep0_packet_size
quirks = 0
# Compute our quirk flags.
if 'manual_set_address' in self.quirks:
logging.info("Handling SET_ADDRESS on the host side!")
quirks |= self.QUIRK_MANUAL_SET_ADDRESS
self.api.connect(self.max_ep0_packet_size, quirks)
self.connected_device = usb_device
logging.info("Connecting to host.")
def disconnect(self):
""" Disconnects the GreatDancer from its target host. """
logging.info("Disconnecting from host.")
self.device.comms.release_exclusive_access()
self.api.disconnect()
def _wait_until_ready_to_send(self, ep_num):
# If we're already ready, we don't need to do anything. Abort.
if self._is_ready_for_priming(ep_num, self.DEVICE_TO_HOST):
return
# Otherwise, wait until we're ready to send...
while not self._is_ready_for_priming(ep_num, self.DEVICE_TO_HOST):
pass
# ... and since we've blocked the app from cleaning up any transfer
# descriptors automatically by spinning in this thread, we'll clean up
# the relevant transfers here.
self._clean_up_transfers_for_endpoint(ep_num, self.DEVICE_TO_HOST)
def send_on_endpoint(self, ep_num, data, blocking=True):
"""
Sends a collection of USB data on a given endpoint.
ep_num: The number of the IN endpoint on which data should be sent.
data: The data to be sent.
blocking: If true, this function will wait for the transfer to complete.
"""
logging.log(LOGLEVEL_TRACE, f"EP{ep_num}/IN: <- {bytes(data)}")
self._wait_until_ready_to_send(ep_num)
self.api.send_on_endpoint(ep_num, bytes(data))
# If we're blocking, wait until the transfer completes.
if blocking:
while not self._transfer_is_complete(ep_num, self.DEVICE_TO_HOST):
pass
self._clean_up_transfers_for_endpoint(ep_num, self.DEVICE_TO_HOST)
def read_from_endpoint(self, ep_num):
"""
Reads a block of data from the given endpoint.
ep_num: The number of the OUT endpoint on which data is to be rx'd.
"""
# Start a nonblocking read from the given endpoint...
self._prime_out_endpoint(ep_num)
# ... and wait for the transfer to complete.
while not self._transfer_is_complete(ep_num, self.HOST_TO_DEVICE):
pass
# Finally, return the result.
return self._finish_primed_read_on_endpoint(ep_num)
@staticmethod
def _endpoint_address(ep_num, direction):
"""
Returns the endpoint number that corresponds to a given direction
and address.
"""
if direction:
return ep_num | 0x80
else:
return ep_num
def stall_endpoint(self, ep_num, direction=0):
"""
Stalls the provided endpoint, as defined in the USB spec.
ep_num: The number of the endpoint to be stalled.
"""
in_vs_out = "IN" if direction else "OUT"
logging.log(LOGLEVEL_TRACE, "Stalling EP{} {}".format(ep_num, in_vs_out))
self.endpoint_stalled[ep_num] = True
self.api.stall_endpoint(self._endpoint_address(ep_num, direction))
def stall_ep0(self):
"""
Convenience function that stalls the control endpoint zero.
"""
self.stall_endpoint(0)
def set_address(self, address, defer=False):
"""
Sets the device address of the GreatDancer. Usually only used during
initial configuration.
address: The address that the GreatDancer should assume.
defer: True iff the set_address request should wait for an active transaction to finish.
"""
self.api.set_address(address, 1 if defer else 0)
@staticmethod
def _decode_usb_register(transfer_result):
"""
Decodes a raw 32-bit register value from a form encoded
for transit as a USB control request.
transfer_result: The value returned by the vendor request.
returns: The raw integer value of the given register.
"""
status_hex = codecs.encode(transfer_result[::-1], 'hex')
return int(status_hex, 16)
def _fetch_irq_status(self):
"""
Fetch the USB controller's pending-IRQ bitmask, which indicates
which interrupts need to be serviced.
returns: A raw integer bitmap.
"""
return self.api.get_status(self.GET_USBSTS)
def _fetch_setup_status(self):
"""
Fetch the USB controller's "pending setup packet" bitmask, which
indicates which endpoints have setup packets to be read.
returns: A raw integer bitmap.
"""
return self.api.get_status(self.GET_ENDPTSETUPSTAT)
def _handle_setup_events(self):
"""
Handles any outstanding setup events on the USB controller.
"""
# Determine if we have setup packets on any of our endpoints.
status = self._fetch_setup_status()
# If we don't, abort.
if not status:
return
# Otherwise, figure out which endpoints have outstanding setup events,
# and handle them.
for i in range(self.SUPPORTED_ENDPOINTS):
if status & (1 << i):
self._handle_setup_event_on_endpoint(i)
def _handle_setup_event_on_endpoint(self, endpoint_number):
"""
Handles a known outstanding setup event on a given endpoint.
endpoint_number: The endpoint number for which a setup event should be serviced.
"""
# HACK: to maintain API compatibility with the existing facedancer API,
# we need to know if a stall happens at any point during our handler.
self.endpoint_stalled[endpoint_number] = False
# Read the data from the SETUP stage...
data = bytearray(self.api.read_setup(endpoint_number))
request = self.connected_device.create_request(data)
# If this is an OUT request, handle the data stage,
# and add it to the request.
is_out = request.get_direction() == self.HOST_TO_DEVICE
has_data = (request.length > 0)
# Special case: if this is an OUT request with a data stage, we won't
# handle the request until the data stage has been completed. Instead,
# we'll stash away the data received in the setup stage, prime the
# endpoint for the data stage, and then wait for the data stage to
# complete, triggering a corresponding code path in
# in _handle_transfer_complete_on_endpoint.
if is_out and has_data:
self._prime_out_endpoint(endpoint_number)
self.pending_control_request = request
return
self.connected_device.handle_request(request)
if not is_out and not self.endpoint_stalled[endpoint_number]:
self.ack_status_stage(direction=self.DEVICE_TO_HOST)
def _fetch_transfer_status(self):
"""
Fetch the USB controller's "completed transfer" bitmask, which
indicates which endpoints have recently completed transactions.
returns: A raw integer bitmap.
"""
return self.api.get_status(self.GET_ENDPTCOMPLETE)
def _transfer_is_complete(self, endpoint_number, direction):
"""
Returns true iff a given endpoint has just completed a transfer.
Can be used to check for completion of a non-blocking transfer.
endpoint_number: The endpoint number to be queried.
direction:
The direction of the transfer. Should be self.HOST_TO_DEVICE or
self.DEVICE_TO_HOST.
"""
status = self._fetch_transfer_status()
# From the LPC43xx manual: out endpoint completions start at bit zero,
# while in endpoint completions start at bit 16.
out_is_ready = (status & (1 << endpoint_number))
in_is_ready = (status & (1 << (endpoint_number + 16)))
if direction == self.HOST_TO_DEVICE:
return out_is_ready
else:
return in_is_ready
def _handle_transfer_events(self):
"""
Handles any outstanding setup events on the USB controller.
"""
# Determine if we have ready packets on any of our endpoints.
status = self._fetch_transfer_status()
# If we don't, abort.
if not status:
return
# Figure out which endpoints have recently completed transfers,
# and clean up any transactions on those endpoints. It's important
# that this be done /before/ the _handle_transfer_complete... section
# below, as those can generate further events which will need the freed
# transfer descriptors.
# [Note that it's safe to clean up the transfer descriptors before reading,
# here-- the GreatFET's USB controller has transparently moved any data
# from OUT transactions into a holding buffer for us. Nice of it!]
for i in range(self.SUPPORTED_ENDPOINTS):
if status & (1 << i):
self._clean_up_transfers_for_endpoint(i, self.HOST_TO_DEVICE)
if status & (1 << (i + 16)):
self._clean_up_transfers_for_endpoint(i, self.DEVICE_TO_HOST)
# Now that we've cleaned up all relevant transfer descriptors, trigger
# any events that should occur due to the completed transaction.
for i in range(self.SUPPORTED_ENDPOINTS):
if status & (1 << i):
self._handle_transfer_complete_on_endpoint(i, self.HOST_TO_DEVICE)
if status & (1 << (i + 16)):
self._handle_transfer_complete_on_endpoint(i, self.DEVICE_TO_HOST)
# Finally, after completing all of the above, we may now have idle
# (unprimed) endpoints. For OUT endpoints, we'll need to re-prime them
# so we're ready for receipt; for IN endpoints, we'll want to give the
# emulated device a chance to provide new data.
self._handle_transfer_readiness()
def _finish_primed_read_on_endpoint(self, endpoint_number):
"""
Completes a non-blocking (primed) read on an OUT endpoint by reading any data
received since the endpoint was primed. See read_from_endpoint for an example
of proper use.
endpoint_number: The endpoint to read from.
"""
return self.api.finish_nonblocking_read(endpoint_number)
def _clean_up_transfers_for_endpoint(self, endpoint_number, direction):
"""
Cleans up any outstanding transfers on the given endpoint. This must be
called for each completed transaction so the relevant transfer descriptors
can be re-used.
There's no harm in calling this if a transaction isn't complete, but it _must_
be called at least once for each completed transaction.
endpoint_number: The endpoint number whose transfer descriptors should be cleaned
up.
direction: The endpoint direction for which TD's should be cleaned.
"""
# Ask the device to clean up any transaction descriptors related to the transfer.
self.api.clean_up_transfer(self._endpoint_address(endpoint_number, direction))
def _is_control_endpoint(self, endpoint_number):
"""
Returns true iff the given endpoint number corresponds to a control endpoint.
"""
# FIXME: Support control endpoints other than EP0.
return endpoint_number == 0
def _handle_transfer_complete_on_endpoint(self, endpoint_number, direction):
"""
Handles a known-completed transfer on a given endpoint.
endpoint_number: The endpoint number for which a setup event should be serviced.
"""
# If a transfer has just completed on an OUT endpoint, we've just received data
# that we need to handle.
if direction == self.HOST_TO_DEVICE:
# Special case: if we've just received data on a control endpoint,
# we're completing a control request.
if self._is_control_endpoint(endpoint_number):
# If we received a setup packet to handle, handle it.
if self.pending_control_request:
# Read the rest of the data from the endpoint, completing
# the control request.
new_data = self._finish_primed_read_on_endpoint(endpoint_number)
# Append our new data to the pending control request.
self.pending_control_request.data.extend(new_data)
all_data_received = len(self.pending_control_request.data) == self.pending_control_request.length
is_short_packet = len(new_data) < self.max_ep0_packet_size
if all_data_received or is_short_packet:
# Handle the completed setup request...
self.connected_device.handle_request(self.pending_control_request)
# And clear our pending setup data.
self.pending_control_request = None
else:
# Otherwise, re-prime the endpoint to grab the next packet.
self._prime_out_endpoint(endpoint_number)
# Typical case: this isn't a control endpoint, so we don't have a
# defined packet format. Read the data and issue the corresponding
# callback.
else:
data = self._finish_primed_read_on_endpoint(endpoint_number)
logging.log(LOGLEVEL_TRACE, f"EP{endpoint_number}/OUT -> {data}")
self.connected_device.handle_data_available(endpoint_number, data)
def _fetch_transfer_readiness(self):
"""
Queries the GreatFET for a bitmap describing the endpoints that are not
currently primed, and thus ready to be primed again.
"""
return self.api.get_status(self.GET_ENDPTSTATUS)
def _fetch_endpoint_nak_status(self):
"""
Queries the GreatFET for a bitmap describing the endpoints that have issued
a NAK since the last time this was checked.
"""
return self.api.get_status(self.GET_ENDPTNAK)
def _prime_out_endpoint(self, endpoint_number):
"""
Primes an out endpoint, allowing it to receive data the next time the host chooses to send it.
endpoint_number: The endpoint that should be primed.
"""
self.api.start_nonblocking_read(endpoint_number)
def _handle_transfer_readiness(self):
"""
Check to see if any non-control IN endpoints are ready to
accept data from our device, and handle if they are.
"""
# If we haven't been configured yet, we can't have any
# endpoints other than the control endpoint, and we don't n
if not self.configuration:
return
# Fetch the endpoint status.
status = self._fetch_transfer_readiness()
# Check the status of every endpoint /except/ endpoint zero,
# which is always a control endpoint and set handled by our
# control transfer handler.
for interface in self.configuration.get_interfaces():
for endpoint in interface.get_endpoints():
# Check to see if the endpoint is ready to be primed.
if self._is_ready_for_priming(endpoint.number, endpoint.direction):
# If this is an IN endpoint, we're ready to accept data to be
# presented on the next IN token.
if endpoint.direction == USBEndpoint.direction_in:
self.connected_device.handle_buffer_available(endpoint.number)
# If this is an OUT endpoint, we'll need to prime the endpoint to
# accept new data. This provides a place for data to go once the
# host sends an OUT token.
else:
self._prime_out_endpoint(endpoint.number)
def _is_ready_for_priming(self, ep_num, direction):
"""
Returns true iff the endpoint is ready to be primed.
ep_num: The endpoint number in question.
direction: The endpoint direction in question.
"""
# Fetch the endpoint status.
status = self._fetch_transfer_readiness()
ready_for_in = (not status & (1 << (ep_num + 16)))
ready_for_out = (not status & (1 << (ep_num)))
if direction == self.HOST_TO_DEVICE:
return ready_for_out
else:
return ready_for_in
@classmethod
def _has_issued_nak(cls, ep_nak, ep_num, direction):
"""
Interprets an ENDPTNAK status result to determine
whether a given endpoint has NAK'd.
ep_nak: The status work read from the ENDPTNAK register
ep_num: The endpoint number in question.
direction: The endpoint direction in question.
"""
in_nak = (ep_nak & (1 << (ep_num + 16)))
out_nak = (ep_nak & (1 << (ep_num)))
if direction == cls.HOST_TO_DEVICE:
return out_nak
else:
return in_nak
def _bus_reset(self):
"""
Triggers the GreatDancer to perform its side of a bus reset.
"""
logging.debug("Host issued bus reset.")
if self.connected_device:
self.connected_device.handle_bus_reset()
else:
self.api.bus_reset()
def reset(self):
"""
Triggers the GreatFET to handle its side of a bus reset.
"""
self.api.bus_reset()
def _handle_nak_events(self):
"""
Handles an event in which the GreatDancer has NAK'd an IN token.
"""
# If we haven't been configured yet, we can't have any
# endpoints other than the control endpoint, and we don't need to
# handle any NAKs.
if not self.configuration:
return
# Fetch the endpoint status.
status = self._fetch_endpoint_nak_status()
# Iterate over each usable endpoint.
for interface in self.configuration.get_interfaces():
for endpoint in interface.get_endpoints():
# If the endpoint has NAK'd, issued the relevant callback.
if self._has_issued_nak(status, endpoint.number, endpoint.direction):
self.connected_device.handle_nak(endpoint.number)
def _configure_endpoints(self, configuration):
"""
Configures the GreatDancer's endpoints to match the provided configuration.
configurate: The USBConfiguration object that describes the endpoints provided.
"""
endpoint_triplets = self._generate_endpoint_config_arguments(configuration)
# If we need to issue a configuration command, issue one.
# (If there are no endpoints other than control, this command will be
# empty, and we can skip this.)
if endpoint_triplets:
self.api.set_up_endpoints(*endpoint_triplets)
def configured(self, configuration):
"""
Callback that's issued when a USBDevice is configured, e.g. by the
SET_CONFIGURATION request. Allows us to apply the new configuration.
configuration: The configuration applied by the SET_CONFIG request.
"""
self._configure_endpoints(configuration)
self.configuration = configuration
# If we've just set up endpoints, check to see if any of them
# need to be primed, or have NAKs waiting.
self._handle_transfer_readiness()
self._handle_nak_events()
def service_irqs(self):
"""
Core routine of the Facedancer execution/event loop. Continuously monitors the
GreatDancer's execution status, and reacts as events occur.
"""
status = self._fetch_irq_status()
# Other bits that may be of interest:
# D_SRI = start of frame received
# D_PCI = port change detect (switched between low, full, high speed state)
# D_SLI = device controller suspend
# D_UEI = USB error; completion of transaction caused error, see usb1_isr in firmware
# D_NAKI = both the tx/rx NAK bit and corresponding endpoint NAK enable are set
if status & self.USBSTS_D_UI:
self._handle_setup_events()
self._handle_transfer_events()
if status & self.USBSTS_D_URI:
self._bus_reset()
if status & self.USBSTS_D_NAKI:
self._handle_nak_events()
|
src/examples/pin/count_inst.py | patacca/Triton | 2,337 | 11197123 | #!/usr/bin/env python2
## -*- coding: utf-8 -*-
from pintool import *
from triton import ARCH
count = 0
def mycb(inst):
global count
count += 1
def fini():
print("Instruction count : ", count)
if __name__ == '__main__':
ctx = getTritonContext()
ctx.enableSymbolicEngine(False)
ctx.enableTaintEngine(False)
startAnalysisFromEntry()
insertCall(mycb, INSERT_POINT.BEFORE)
insertCall(fini, INSERT_POINT.FINI)
runProgram()
|
tests/test_remote_metadata.py | pmav99/OWSLib | 218 | 11197135 | <reponame>pmav99/OWSLib
import pytest
import owslib
from owslib.etree import etree
from owslib.wfs import WebFeatureService
from owslib.wms import WebMapService
from tests.utils import service_ok
WMS_SERVICE_URL = 'https://www.dov.vlaanderen.be/geoserver/gw_meetnetten/' \
'wms?request=GetCapabilities'
WFS_SERVICE_URL = 'https://www.dov.vlaanderen.be/geoserver/gw_meetnetten/' \
'wfs?request=GetCapabilities'
@pytest.fixture
def mp_wfs_100_nometadata(monkeypatch):
"""Monkeypatch the call to the remote GetCapabilities request of WFS
version 1.0.0, not containing MetadataURLs.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def read(*args, **kwargs):
with open('tests/resources/wfs_dov_getcapabilities_100_nometadata.xml',
'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeypatch.setattr(
owslib.feature.common.WFSCapabilitiesReader, 'read', read)
@pytest.fixture
def mp_wfs_110(monkeypatch):
"""Monkeypatch the call to the remote GetCapabilities request of WFS
version 1.1.0.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def read(*args, **kwargs):
with open('tests/resources/wfs_dov_getcapabilities_110.xml', 'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeypatch.setattr(
owslib.feature.common.WFSCapabilitiesReader, 'read', read)
@pytest.fixture
def mp_wfs_110_nometadata(monkeypatch):
"""Monkeypatch the call to the remote GetCapabilities request of WFS
version 1.1.0, not containing MetadataURLs.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def read(*args, **kwargs):
with open('tests/resources/wfs_dov_getcapabilities_110_nometadata.xml',
'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeypatch.setattr(
owslib.feature.common.WFSCapabilitiesReader, 'read', read)
@pytest.fixture
def mp_wfs_200(monkeypatch):
"""Monkeypatch the call to the remote GetCapabilities request of WFS
version 2.0.0.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def read(*args, **kwargs):
with open('tests/resources/wfs_dov_getcapabilities_200.xml',
'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeypatch.setattr(
owslib.feature.common.WFSCapabilitiesReader, 'read', read)
@pytest.fixture
def mp_wfs_200_nometadata(monkeypatch):
"""Monkeypatch the call to the remote GetCapabilities request of WFS
version 2.0.0, not containing MetadataURLs.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def read(*args, **kwargs):
with open('tests/resources/wfs_dov_getcapabilities_200_nometadata.xml',
'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeypatch.setattr(
owslib.feature.common.WFSCapabilitiesReader, 'read', read)
@pytest.fixture
def mp_wms_111_nometadata(monkeypatch):
"""Monkeypatch the call to the remote GetCapabilities request of WMS
version 1.1.1, not containing MetadataURLs.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def read(*args, **kwargs):
with open('tests/resources/wms_dov_getcapabilities_111_nometadata.xml',
'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeypatch.setattr(
owslib.map.common.WMSCapabilitiesReader, 'read', read)
@pytest.fixture
def mp_wms_130(monkeypatch):
"""Monkeypatch the call to the remote GetCapabilities request of WMS
version 1.3.0.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def read(*args, **kwargs):
with open('tests/resources/wms_dov_getcapabilities_130.xml', 'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeypatch.setattr(
owslib.map.common.WMSCapabilitiesReader, 'read', read)
@pytest.fixture
def mp_wms_130_nometadata(monkeypatch):
"""Monkeypatch the call to the remote GetCapabilities request of WMS
version 1.3.0, not containing MetadataURLs.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def read(*args, **kwargs):
with open('tests/resources/wms_dov_getcapabilities_130_nometadata'
'.xml', 'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeypatch.setattr(
owslib.map.common.WMSCapabilitiesReader, 'read', read)
@pytest.fixture
def mp_remote_md(monkeypatch):
def openURL(*args, **kwargs):
with open('tests/resources/csw_dov_getrecordbyid.xml', 'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeypatch.setattr(owslib.util, 'openURL', openURL)
class TestOffline(object):
def test_wfs_100_noremotemd_parse_all(self, mp_wfs_100_nometadata):
"""Test the remote metadata parsing for WFS 1.0.0.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wfs_100_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='1.0.0',
parse_remote_metadata=True)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wfs_100_noremotemd_parse_single(self, mp_wfs_100_nometadata):
"""Test the remote metadata parsing for WFS 1.0.0.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wfs_100_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='1.0.0',
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wfs_100_noremotemd_parse_none(self, mp_wfs_100_nometadata):
"""Test the remote metadata parsing for WFS 1.0.0.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
Parameters
----------
mp_wfs_100_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='1.0.0',
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wfs_110_noremotemd_parse_all(self, mp_wfs_110_nometadata):
"""Test the remote metadata parsing for WFS 1.1.0.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wfs_110_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='1.1.0',
parse_remote_metadata=True)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wfs_110_noremotemd_parse_single(self, mp_wfs_110_nometadata):
"""Test the remote metadata parsing for WFS 1.1.0.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wfs_110_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='1.1.0',
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wfs_110_noremotemd_parse_none(self, mp_wfs_110_nometadata):
"""Test the remote metadata parsing for WFS 1.1.0.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
Parameters
----------
mp_wfs_110_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='1.1.0',
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wfs_110_remotemd_parse_all(self, mp_wfs_110, mp_remote_md):
"""Test the remote metadata parsing for WFS 1.1.0.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns remote metadata
if MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wfs_110 : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
mp_remote_md : pytest.fixture
Monkeypatch the call to the remote metadata.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='1.1.0',
parse_remote_metadata=True)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 1
for m in mdrecords:
assert type(m) is owslib.iso.MD_Metadata
def test_wfs_110_remotemd_parse_single(self, mp_wfs_110, mp_remote_md):
"""Test the remote metadata parsing for WFS 1.1.0.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns remote metadata
if MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wfs_110 : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
mp_remote_md : pytest.fixture
Monkeypatch the call to the remote metadata.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='1.1.0',
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 1
for m in mdrecords:
assert type(m) is owslib.iso.MD_Metadata
def test_wfs_110_remotemd_parse_none(self, mp_wfs_110):
"""Test the remote metadata parsing for WFS 1.1.0.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
Parameters
----------
mp_wfs_110 : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='1.1.0',
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wfs_200_noremotemd_parse_all(self, mp_wfs_200_nometadata):
"""Test the remote metadata parsing for WFS 2.0.0.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wfs_200_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='2.0.0',
parse_remote_metadata=True)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wfs_200_noremotemd_parse_single(self, mp_wfs_200_nometadata):
"""Test the remote metadata parsing for WFS 2.0.0.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wfs_200_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='2.0.0',
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wfs_200_noremotemd_parse_none(self, mp_wfs_200_nometadata):
"""Test the remote metadata parsing for WFS 2.0.0.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
Parameters
----------
mp_wfs_200_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='2.0.0',
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wfs_200_remotemd_parse_all(self, mp_wfs_200, mp_remote_md):
"""Test the remote metadata parsing for WFS 2.0.0.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns remote metadata
if MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wfs_200 : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
mp_remote_md : pytest.fixture
Monkeypatch the call to the remote metadata.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='2.0.0',
parse_remote_metadata=True)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 1
for m in mdrecords:
assert type(m) is owslib.iso.MD_Metadata
def test_wfs_200_remotemd_parse_single(self, mp_wfs_200, mp_remote_md):
"""Test the remote metadata parsing for WFS 2.0.0.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns remote metadata
if MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wfs_200 : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
mp_remote_md : pytest.fixture
Monkeypatch the call to the remote metadata.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='2.0.0',
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 1
for m in mdrecords:
assert type(m) is owslib.iso.MD_Metadata
def test_wfs_200_remotemd_parse_none(self, mp_wfs_200):
"""Test the remote metadata parsing for WFS 2.0.0.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
Parameters
----------
mp_wfs_200 : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wfs = WebFeatureService(url='http://localhost/not_applicable',
version='2.0.0',
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wms_111_noremotemd_parse_all(self, mp_wms_111_nometadata):
"""Test the remote metadata parsing for WMS 1.1.1.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wms_111_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wms = WebMapService(url='http://localhost/not_applicable',
version='1.1.1',
parse_remote_metadata=True)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wms_111_noremotemd_parse_single(self, mp_wms_111_nometadata):
"""Test the remote metadata parsing for WMS 1.1.1.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wms_111_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wms = WebMapService(url='http://localhost/not_applicable',
version='1.1.1',
parse_remote_metadata=False)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wms_111_noremotemd_parse_none(self, mp_wms_111_nometadata):
"""Test the remote metadata parsing for WMS 1.1.1.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
Parameters
----------
mp_wms_111_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wms = WebMapService(url='http://localhost/not_applicable',
version='1.1.1',
parse_remote_metadata=False)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wms_130_remotemd_parse_all(self, mp_wms_130):
"""Test the remote metadata parsing for WMS 1.3.0.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns remote metadata
if MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wms_130 : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wms = WebMapService(url='http://localhost/not_applicable',
version='1.3.0',
parse_remote_metadata=True)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 1
for m in mdrecords:
assert type(m) is owslib.iso.MD_Metadata
def test_wms_130_remotemd_parse_single(self, mp_wms_130):
"""Test the remote metadata parsing for WMS 1.3.0.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns remote metadata
if MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wms_130 : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wms = WebMapService(url='http://localhost/not_applicable',
version='1.3.0',
parse_remote_metadata=False)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 1
for m in mdrecords:
assert type(m) is owslib.iso.MD_Metadata
def test_wms_130_remotemd_parse_none(self, mp_wms_130):
"""Test the remote metadata parsing for WMS 1.3.0.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
Parameters
----------
mp_wms_130 : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wms = WebMapService(url='http://localhost/not_applicable',
version='1.3.0',
parse_remote_metadata=False)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wms_130_noremotemd_parse_all(self, mp_wms_130_nometadata):
"""Test the remote metadata parsing for WMS 1.3.0.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wms_130_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wms = WebMapService(url='http://localhost/not_applicable',
version='1.3.0',
parse_remote_metadata=True)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wms_130_noremotemd_parse_single(self, mp_wms_130_nometadata):
"""Test the remote metadata parsing for WMS 1.3.0.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
Parameters
----------
mp_wms_130_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wms = WebMapService(url='http://localhost/not_applicable',
version='1.3.0',
parse_remote_metadata=False)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
def test_wms_130_noremotemd_parse_none(self, mp_wms_130_nometadata):
"""Test the remote metadata parsing for WMS 1.3.0.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
Parameters
----------
mp_wms_130_nometadata : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
"""
wms = WebMapService(url='http://localhost/not_applicable',
version='1.3.0',
parse_remote_metadata=False)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
class TestOnline(object):
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WFS_SERVICE_URL),
reason="WFS service is unreachable")
@pytest.mark.parametrize("wfs_version", ["1.1.0", "2.0.0"])
def test_wfs_remotemd_parse_single(self, wfs_version):
"""Test the remote metadata parsing for WFS.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns remote metadata.
"""
wfs = WebFeatureService(url=WFS_SERVICE_URL,
version=wfs_version,
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) > 0
for m in mdrecords:
assert type(m) is owslib.iso.MD_Metadata
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WFS_SERVICE_URL),
reason="WFS service is unreachable")
@pytest.mark.parametrize("wfs_version", ["1.1.0", "2.0.0"])
def test_wfs_remotemd_parse_all(self, wfs_version):
"""Test the remote metadata parsing for WFS.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns remote metadata.
"""
wfs = WebFeatureService(url=WFS_SERVICE_URL,
version=wfs_version,
parse_remote_metadata=True)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) > 0
for m in mdrecords:
assert type(m) is owslib.iso.MD_Metadata
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WFS_SERVICE_URL),
reason="WFS service is unreachable")
@pytest.mark.parametrize("wfs_version", ["1.1.0", "2.0.0"])
def test_wfs_remotemd_parse_none(self, wfs_version):
"""Test the remote metadata parsing for WFS.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
"""
wfs = WebFeatureService(url=WFS_SERVICE_URL,
version=wfs_version,
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WFS_SERVICE_URL),
reason="WFS service is unreachable")
@pytest.mark.parametrize("wfs_version", ["1.0.0"])
def test_wfs_noremotemd_parse_single(self, wfs_version):
"""Test the remote metadata parsing for WFS.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
"""
wfs = WebFeatureService(url=WFS_SERVICE_URL,
version=wfs_version,
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WFS_SERVICE_URL),
reason="WFS service is unreachable")
@pytest.mark.parametrize("wfs_version", ["1.0.0"])
def test_wfs_noremotemd_parse_all(self, wfs_version):
"""Test the remote metadata parsing for WFS.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns no remote
metadata if no MetadataURLs are available in the GetCapabilities.
"""
wfs = WebFeatureService(url=WFS_SERVICE_URL,
version=wfs_version,
parse_remote_metadata=True)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WFS_SERVICE_URL),
reason="WFS service is unreachable")
@pytest.mark.parametrize("wfs_version", ["1.0.0"])
def test_wfs_noremotemd_parse_none(self, wfs_version):
"""Test the remote metadata parsing for WFS.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
"""
wfs = WebFeatureService(url=WFS_SERVICE_URL,
version=wfs_version,
parse_remote_metadata=False)
assert 'gw_meetnetten:meetnetten' in wfs.contents
layer = wfs.contents['gw_meetnetten:meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WMS_SERVICE_URL),
reason="WMS service is unreachable")
@pytest.mark.parametrize("wms_version", ["1.3.0"])
def test_wms_remotemd_parse_single(self, wms_version):
"""Test the remote metadata parsing for WMS.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns remote metadata.
"""
wms = WebMapService(url=WMS_SERVICE_URL,
version=wms_version,
parse_remote_metadata=False)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) > 0
for m in mdrecords:
assert type(m) is owslib.iso.MD_Metadata
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WMS_SERVICE_URL),
reason="WMS service is unreachable")
@pytest.mark.parametrize("wms_version", ["1.3.0"])
def test_wms_remotemd_parse_all(self, wms_version):
"""Test the remote metadata parsing for WMS.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns remote metadata.
"""
wms = WebMapService(url=WMS_SERVICE_URL,
version=wms_version,
parse_remote_metadata=True)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) > 0
for m in mdrecords:
assert type(m) is owslib.iso.MD_Metadata
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WMS_SERVICE_URL),
reason="WMS service is unreachable")
@pytest.mark.parametrize("wms_version", ["1.3.0"])
def test_wms_remotemd_parse_none(self, wms_version):
"""Test the remote metadata parsing for WMS.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
"""
wms = WebMapService(url=WMS_SERVICE_URL,
version=wms_version,
parse_remote_metadata=False)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WMS_SERVICE_URL),
reason="WMS service is unreachable")
@pytest.mark.parametrize("wms_version", ["1.1.1"])
def test_wms_noremotemd_parse_single(self, wms_version):
"""Test the remote metadata parsing for WMS.
Tests parsing the remote metadata for a single layer.
Test whether the method is available and returns no remote metadata
if no MetadataURLs are available in the GetCapabilities.
"""
wms = WebMapService(url=WMS_SERVICE_URL,
version=wms_version,
parse_remote_metadata=False)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
layer.parse_remote_metadata()
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WMS_SERVICE_URL),
reason="WMS service is unreachable")
@pytest.mark.parametrize("wms_version", ["1.1.1"])
def test_wms_noremotemd_parse_all(self, wms_version):
"""Test the remote metadata parsing for WMS.
Tests parsing the remote metadata for all layers.
Test whether the method is available and returns no remote
metadata if no MetadataURLs are available in the GetCapabilities.
"""
wms = WebMapService(url=WMS_SERVICE_URL,
version=wms_version,
parse_remote_metadata=True)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
@pytest.mark.xfail
@pytest.mark.online
@pytest.mark.skipif(not service_ok(WMS_SERVICE_URL),
reason="WMS service is unreachable")
@pytest.mark.parametrize("wms_version", ["1.1.1"])
def test_wms_noremotemd_parse_none(self, wms_version):
"""Test the remote metadata parsing for WMS.
Tests the case when no remote metadata is parsed.
Test whether no remote metadata is returned.
"""
wms = WebMapService(url=WMS_SERVICE_URL,
version=wms_version,
parse_remote_metadata=False)
assert 'meetnetten' in wms.contents
layer = wms.contents['meetnetten']
mdrecords = layer.get_metadata()
assert type(mdrecords) is list
assert len(mdrecords) == 0
|
torchreid/components/dropout.py | Danish-VSL/deep-person-reid | 244 | 11197151 | import os
from torch.nn import functional as F
from torch import nn
class SimpleDropoutOptimizer(nn.Module):
def __init__(self, p):
super().__init__()
if p is not None:
self.dropout = nn.Dropout(p=p)
else:
self.dropout = None
def forward(self, x):
if self.dropout is not None:
x = self.dropout(x)
return x
class DropoutOptimizer(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.epoch = self.__p = 0
self.__training = not args.evaluate
def set_epoch(self, epoch):
self.epoch = epoch
def set_training(self, training):
self.__training = training
@property
def p(self):
if not self.__training:
return self.__p
dropout_settings = self.args.dropout
if dropout_settings == 'none':
return 0
try:
max_dropout = float(os.environ.get('max_dropout'))
except (ValueError, TypeError):
max_dropout = 0.5
if dropout_settings == 'fix':
return max_dropout
# print(self.epoch)
p = .2 + .1 * (self.epoch // 10)
p = min(p, max_dropout)
return p
def set_p(self, p):
if self.__training:
raise RuntimeError('Cannot explicitly set dropout during training')
assert isinstance(p, (int, float))
self.__p = p
def forward(self, x):
p = self.p
# print('Dropout p', p)
# print(p, self.__training)
if p > 0:
return F.dropout(x, p=p, training=self.__training)
else:
return x
|
chapter11/dags/02_dag_factory.py | add54/Data_PipeLine_Apache_Airflow | 303 | 11197153 | import os
import airflow.utils.dates
from airflow import DAG
from airflow.operators.bash import BashOperator
def generate_dag(dataset_name, raw_dir, processed_dir, preprocess_script):
with DAG(
dag_id=f"02_dag_factory_{dataset_name}",
start_date=airflow.utils.dates.days_ago(5),
schedule_interval="@daily",
) as dag:
raw_file_path = os.path.join(raw_dir, dataset_name, "{ds_nodash}.json")
processed_file_path = os.path.join(
processed_dir, dataset_name, "{ds_nodash}.json"
)
fetch_task = BashOperator(
task_id=f"fetch_{dataset_name}",
bash_command=f"echo 'curl http://example.com/{dataset_name}.json > {raw_file_path}.json'",
)
preprocess_task = BashOperator(
task_id=f"preprocess_{dataset_name}",
bash_command=f"echo '{preprocess_script} {raw_file_path} {processed_file_path}'",
)
fetch_task >> preprocess_task
return dag
for dataset in ["sales", "customers"]:
globals()[f"02_dag_factory_{dataset}"] = generate_dag(
dataset_name=dataset,
raw_dir="/data/raw",
processed_dir="/data/processed",
preprocess_script=f"preprocess_{dataset}.py",
)
|
apps/dash-vtk-explorer/explorer.py | JeroenvdSande/dash-sample-apps | 2,332 | 11197158 | <reponame>JeroenvdSande/dash-sample-apps
from importlib import import_module
from inspect import getsource
from copy import deepcopy
import json
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
def prepend_recursive(component, prefix: str) -> None:
"""in-place modifications"""
if hasattr(component, "id"):
if type(component.id) == str:
component.id = prefix + component.id
elif type(component.id) == dict:
key = "type"
if key in component.id:
component.id[key] = prefix + component.id[key]
if hasattr(component, "children") and component.children is not None:
for child in component.children:
prepend_recursive(child, prefix)
def prepend_list_of_dict(ls: list, prefix: str) -> list:
new_ls = []
for di in ls:
di = deepcopy(di)
try: # is a dictionary
di_id = json.loads(di["id"])
key = "type"
if key in di_id:
di_id[key] = prefix + di_id[key]
di["id"] = json.dumps(di_id).replace(" ", "")
except ValueError: # is a string
di["id"] = prefix + di["id"]
new_ls.append(di)
return new_ls
def prepend_callback_map(di: dict, prefix: str) -> dict:
new_di = {}
for k, v in di.items():
v = deepcopy(v)
v["inputs"] = prepend_list_of_dict(v["inputs"], prefix)
v["state"] = prepend_list_of_dict(v["state"], prefix)
new_di[prefix + k] = v
return new_di
def prepend_callback_list(ls: list, prefix: str) -> list:
new_ls = []
for di in ls:
di = deepcopy(di)
if type(di["output"]) == list:
di["output"] = prepend_list_of_dict(di["output"], prefix)
else:
di["output"] = prefix + di["output"]
di["inputs"] = prepend_list_of_dict(di["inputs"], prefix)
di["state"] = prepend_list_of_dict(di["state"], prefix)
new_ls.append(di)
return new_ls
def Header(name, app):
title = html.H2(name, style={"display": "inline-flex"})
logo = html.Img(
src=app.get_asset_url("dash-logo.png"),
style={
"height": 60,
"display": "inline-flex",
"margin-top": "-10px",
"margin-right": "5px",
},
)
link = html.A(logo, href="https://plotly.com/dash/", target="_blank")
return html.Div([link, title])
def display_demo(name, layout, code):
download_btn = html.A(
html.Button(
"Download",
style={
"width": "90px",
"margin": "auto",
"padding": "0px",
"font-size": "10px",
"height": "35px",
"border-radius": "2px",
},
),
href=app.get_asset_url(name + ".py"),
download="app.py",
style={"position": "absolute", "top": "1.5em", "right": "1.5em"},
)
return html.Div(
[
html.Div(
[download_btn, dcc.Markdown(f"```\n{code}\n```"),],
style={
"float": "left",
"width": "49%",
"height": "85vh",
"overflow-y": "auto",
"position": "relative",
"background-color": "#F7FAFC",
"border": "1px solid #A1ACC3",
"border-right": "none",
},
),
html.Div(
layout,
style={
"float": "left",
"width": "48%",
"padding": "5px 1% 5px 1%",
"height": "calc(85vh - 10px)",
"overflow-y": "auto",
"border": "1px solid #A1ACC3",
},
),
]
)
prefix_ignored = []
ignored_pages = ["data", "requirements.txt"]
app = dash.Dash(
__name__,
suppress_callback_exceptions=True,
external_stylesheets=[dbc.themes.COSMO],
)
server = app.server
app_subdomain = os.getenv("APP_SUBDOMAIN", "dash-vtk-explorer")
pages = [p for p in sorted(os.listdir("demos")) if p not in ignored_pages]
print(pages)
modules = {p: import_module(f"demos.{p}.app") for p in pages}
apps = {p: m.app for p, m in modules.items()}
source_codes = {p: getsource(m) for p, m in modules.items()}
notfound_404 = html.Div(
[
html.H1("404"),
"Webpage not found. Please contact us if a page is supposed to be here.",
]
)
app.layout = dbc.Container(
children=[
dbc.Row(
style={"height": "10%", "align-items": "center"},
children=[
dbc.Col([Header("VTK Explorer", app),], width=8,),
dbc.Col(
dbc.Spinner(
dbc.Select(
id="app-choice",
placeholder="Please select an app...",
style={"width": "100%"},
options=[
{"label": x.replace("-", " ").capitalize(), "value": x}
for x in pages
],
),
),
width=4,
),
],
),
html.Div(id="display", style={"height": "90%"}),
dcc.Location(id="url", refresh=False),
],
style={"height": "calc(100vh - 15px)"},
fluid=True,
)
for k in apps:
new_callback_map = apps[k].callback_map
new_callback_list = apps[k]._callback_list
# Prepend to layout IDs recursively in-place
# if k in prefix_ignored:
# new_callback_map = apps[k].callback_map
# new_callback_list = apps[k]._callback_list
# else:
# prepend_recursive(apps[k].layout, prefix=k + "-")
# new_callback_map = prepend_callback_map(apps[k].callback_map, prefix=k + "-")
# new_callback_list = prepend_callback_list(apps[k]._callback_list, prefix=k + "-")
app.callback_map.update(new_callback_map)
app._callback_list.extend(new_callback_list)
@app.callback(
[Output("url", "pathname"), Output("url", "refresh")], Input("app-choice", "value")
)
def update_url(name):
if name is None:
return dash.no_update, dash.no_update
return f"/{app_subdomain}/{name}", name == "slice-rendering"
@app.callback(
[Output("display", "children"), Output("app-choice", "options")],
[Input("url", "pathname")],
)
def display_content(pathname):
if app_subdomain in pathname:
name = pathname.split("/")[-1]
if name == "":
return html.P("Please select an app from the dropdown"), dash.no_update
elif name in pages:
# return display_demo(
# name=name, layout=apps[name].layout, code=source_codes[name]
# )
return apps[name].layout.children, dash.no_update
else:
return notfound_404, dash.no_update
return dash.no_update, dash.no_update
if __name__ == "__main__":
app.run_server(debug=True)
|
python/ray/autoscaler/_private/docker.py | jamesliu/ray | 21,382 | 11197162 | from pathlib import Path
from typing import Any, Dict
try: # py3
from shlex import quote
except ImportError: # py2
from pipes import quote
from ray.autoscaler._private.cli_logger import cli_logger
def _check_docker_file_mounts(file_mounts: Dict[str, str]) -> None:
"""Checks if files are passed as file_mounts. This is a problem for Docker
based clusters because when a file is bind-mounted in Docker, updates to
the file on the host do not always propagate to the container. Using
directories is recommended.
"""
for remote, local in file_mounts.items():
if Path(local).is_file():
cli_logger.warning(
f"File Mount: ({remote}:{local}) refers to a file.\n To ensure"
" this mount updates properly, please use a directory.")
def validate_docker_config(config: Dict[str, Any]) -> None:
"""Checks whether the Docker configuration is valid."""
if "docker" not in config:
return
_check_docker_file_mounts(config.get("file_mounts", {}))
docker_image = config["docker"].get("image")
cname = config["docker"].get("container_name")
head_docker_image = config["docker"].get("head_image", docker_image)
worker_docker_image = config["docker"].get("worker_image", docker_image)
image_present = docker_image or (head_docker_image and worker_docker_image)
if (not cname) and (not image_present):
return
else:
assert cname and image_present, "Must provide a container & image name"
return None
def with_docker_exec(cmds,
container_name,
docker_cmd,
env_vars=None,
with_interactive=False):
assert docker_cmd, "Must provide docker command"
env_str = ""
if env_vars:
env_str = " ".join(
["-e {env}=${env}".format(env=env) for env in env_vars])
return [
"docker exec {interactive} {env} {container} /bin/bash -c {cmd} ".
format(
interactive="-it" if with_interactive else "",
env=env_str,
container=container_name,
cmd=quote(cmd)) for cmd in cmds
]
def _check_helper(cname, template, docker_cmd):
return " ".join([
docker_cmd, "inspect", "-f", "'{{" + template + "}}'", cname, "||",
"true"
])
def check_docker_running_cmd(cname, docker_cmd):
return _check_helper(cname, ".State.Running", docker_cmd)
def check_bind_mounts_cmd(cname, docker_cmd):
return _check_helper(cname, "json .Mounts", docker_cmd)
def check_docker_image(cname, docker_cmd):
return _check_helper(cname, ".Config.Image", docker_cmd)
def docker_start_cmds(user, image, mount_dict, container_name, user_options,
cluster_name, home_directory, docker_cmd):
# Imported here due to circular dependency.
from ray.autoscaler.sdk import get_docker_host_mount_location
docker_mount_prefix = get_docker_host_mount_location(cluster_name)
mount = {f"{docker_mount_prefix}/{dst}": dst for dst in mount_dict}
mount_flags = " ".join([
"-v {src}:{dest}".format(
src=k, dest=v.replace("~/", home_directory + "/"))
for k, v in mount.items()
])
# for click, used in ray cli
env_vars = {"LC_ALL": "C.UTF-8", "LANG": "C.UTF-8"}
env_flags = " ".join(
["-e {name}={val}".format(name=k, val=v) for k, v in env_vars.items()])
user_options_str = " ".join(user_options)
docker_run = [
docker_cmd, "run", "--rm", "--name {}".format(container_name), "-d",
"-it", mount_flags, env_flags, user_options_str, "--net=host", image,
"bash"
]
return " ".join(docker_run)
|
plugins/lookup/php_packages.py | manala/ansible-roles | 138 | 11197178 | <reponame>manala/ansible-roles
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: php_packages
author: Manala (@manala)
short_description: returns a curated packages list
description:
- Takes an packages list and returns it curated.
'''
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
results = []
packages = self._flatten(terms[0])
version = terms[1]
for package in packages:
items = []
# Extensions
items.append('php%s-%s' % (version, package))
# Merge
for item in items:
if item not in results:
results.append(item)
return results
|
api/src/opentrons/protocol_engine/state/configs.py | mrod0101/opentrons | 235 | 11197212 | <gh_stars>100-1000
"""Configurations for the Engine."""
from dataclasses import dataclass
@dataclass(frozen=True)
class EngineConfigs:
"""Configurations for Protocol Engine."""
ignore_pause: bool = False
|
python-modules/twisted/twisted/conch/test/test_checkers.py | stormtheh4ck3r/python-for-android | 267 | 11197214 | <reponame>stormtheh4ck3r/python-for-android<filename>python-modules/twisted/twisted/conch/test/test_checkers.py
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.checkers}.
"""
try:
import pwd
except ImportError:
pwd = None
import os, base64
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.credentials import UsernamePassword, IUsernamePassword, \
SSHPrivateKey, ISSHPrivateKey
from twisted.cred.error import UnhandledCredentials, UnauthorizedLogin
from twisted.python.fakepwd import UserDatabase
from twisted.test.test_process import MockOS
try:
import Crypto.Cipher.DES3
import pyasn1
except ImportError:
SSHPublicKeyDatabase = None
else:
from twisted.conch.ssh import keys
from twisted.conch.checkers import SSHPublicKeyDatabase, SSHProtocolChecker
from twisted.conch.error import NotEnoughAuthentication, ValidPublicKey
from twisted.conch.test import keydata
class SSHPublicKeyDatabaseTestCase(TestCase):
"""
Tests for L{SSHPublicKeyDatabase}.
"""
if pwd is None:
skip = "Cannot run without pwd module"
elif SSHPublicKeyDatabase is None:
skip = "Cannot run without PyCrypto or PyASN1"
def setUp(self):
self.checker = SSHPublicKeyDatabase()
self.key1 = base64.encodestring("foobar")
self.key2 = base64.encodestring("eggspam")
self.content = "t1 %s foo\nt2 %s egg\n" % (self.key1, self.key2)
self.mockos = MockOS()
self.mockos.path = FilePath(self.mktemp())
self.mockos.path.makedirs()
self.sshDir = self.mockos.path.child('.ssh')
self.sshDir.makedirs()
userdb = UserDatabase()
userdb.addUser('user', 'password', 1, 2, 'first last',
self.mockos.path.path, '/bin/shell')
self.patch(pwd, "getpwnam", userdb.getpwnam)
self.patch(os, "seteuid", self.mockos.seteuid)
self.patch(os, "setegid", self.mockos.setegid)
def _testCheckKey(self, filename):
self.sshDir.child(filename).setContent(self.content)
user = UsernamePassword("user", "password")
user.blob = "foobar"
self.assertTrue(self.checker.checkKey(user))
user.blob = "eggspam"
self.assertTrue(self.checker.checkKey(user))
user.blob = "notallowed"
self.assertFalse(self.checker.checkKey(user))
def test_checkKey(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys file and check the keys against that file.
"""
self._testCheckKey("authorized_keys")
self.assertEquals(self.mockos.seteuidCalls, [])
self.assertEquals(self.mockos.setegidCalls, [])
def test_checkKey2(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys2 file and check the keys against that file.
"""
self._testCheckKey("authorized_keys2")
self.assertEquals(self.mockos.seteuidCalls, [])
self.assertEquals(self.mockos.setegidCalls, [])
def test_checkKeyAsRoot(self):
"""
If the key file is readable, L{SSHPublicKeyDatabase.checkKey} should
switch its uid/gid to the ones of the authenticated user.
"""
keyFile = self.sshDir.child("authorized_keys")
keyFile.setContent(self.content)
# Fake permission error by changing the mode
keyFile.chmod(0000)
self.addCleanup(keyFile.chmod, 0777)
# And restore the right mode when seteuid is called
savedSeteuid = os.seteuid
def seteuid(euid):
keyFile.chmod(0777)
return savedSeteuid(euid)
self.patch(os, "seteuid", seteuid)
user = UsernamePassword("user", "password")
user.blob = "foobar"
self.assertTrue(self.checker.checkKey(user))
self.assertEquals(self.mockos.seteuidCalls, [0, 1, 0, os.getuid()])
self.assertEquals(self.mockos.setegidCalls, [2, os.getgid()])
def test_requestAvatarId(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should return the avatar id
passed in if its C{_checkKey} method returns True.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey('test', 'ssh-rsa', keydata.publicRSA_openssh,
'foo', keys.Key.fromString(keydata.privateRSA_openssh).sign('foo'))
d = self.checker.requestAvatarId(credentials)
def _verify(avatarId):
self.assertEquals(avatarId, 'test')
return d.addCallback(_verify)
def test_requestAvatarIdWithoutSignature(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should raise L{ValidPublicKey}
if the credentials represent a valid key without a signature. This
tells the user that the key is valid for login, but does not actually
allow that user to do so without a signature.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey('test', 'ssh-rsa', keydata.publicRSA_openssh, None, None)
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, ValidPublicKey)
def test_requestAvatarIdInvalidKey(self):
"""
If L{SSHPublicKeyDatabase.checkKey} returns False,
C{_cbRequestAvatarId} should raise L{UnauthorizedLogin}.
"""
def _checkKey(ignored):
return False
self.patch(self.checker, 'checkKey', _checkKey)
d = self.checker.requestAvatarId(None);
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdInvalidSignature(self):
"""
Valid keys with invalid signatures should cause
L{SSHPublicKeyDatabase.requestAvatarId} to return a {UnauthorizedLogin}
failure
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey('test', 'ssh-rsa', keydata.publicRSA_openssh,
'foo', keys.Key.fromString(keydata.privateDSA_openssh).sign('foo'))
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdNormalizeException(self):
"""
Exceptions raised while verifying the key should be normalized into an
C{UnauthorizedLogin} failure.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey('test', None, 'blob', 'sigData', 'sig')
d = self.checker.requestAvatarId(credentials)
def _verifyLoggedException(failure):
errors = self.flushLoggedErrors(keys.BadKeyError)
self.assertEqual(len(errors), 1)
return failure
d.addErrback(_verifyLoggedException)
return self.assertFailure(d, UnauthorizedLogin)
class SSHProtocolCheckerTestCase(TestCase):
"""
Tests for L{SSHProtocolChecker}.
"""
if SSHPublicKeyDatabase is None:
skip = "Cannot run without PyCrypto"
def test_registerChecker(self):
"""
L{SSHProcotolChecker.registerChecker} should add the given checker to
the list of registered checkers.
"""
checker = SSHProtocolChecker()
self.assertEquals(checker.credentialInterfaces, [])
checker.registerChecker(SSHPublicKeyDatabase(), )
self.assertEquals(checker.credentialInterfaces, [ISSHPrivateKey])
self.assertIsInstance(checker.checkers[ISSHPrivateKey],
SSHPublicKeyDatabase)
def test_registerCheckerWithInterface(self):
"""
If a apecific interface is passed into
L{SSHProtocolChecker.registerChecker}, that interface should be
registered instead of what the checker specifies in
credentialIntefaces.
"""
checker = SSHProtocolChecker()
self.assertEquals(checker.credentialInterfaces, [])
checker.registerChecker(SSHPublicKeyDatabase(), IUsernamePassword)
self.assertEquals(checker.credentialInterfaces, [IUsernamePassword])
self.assertIsInstance(checker.checkers[IUsernamePassword],
SSHPublicKeyDatabase)
def test_requestAvatarId(self):
"""
L{SSHProtocolChecker.requestAvatarId} should defer to one if its
registered checkers to authenticate a user.
"""
checker = SSHProtocolChecker()
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser('test', 'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
def _callback(avatarId):
self.assertEquals(avatarId, 'test')
return d.addCallback(_callback)
def test_requestAvatarIdWithNotEnoughAuthentication(self):
"""
If the client indicates that it is never satisfied, by always returning
False from _areDone, then L{SSHProtocolChecker} should raise
L{NotEnoughAuthentication}.
"""
checker = SSHProtocolChecker()
def _areDone(avatarId):
return False
self.patch(checker, 'areDone', _areDone)
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser('test', 'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
return self.assertFailure(d, NotEnoughAuthentication)
def test_requestAvatarIdInvalidCredential(self):
"""
If the passed credentials aren't handled by any registered checker,
L{SSHProtocolChecker} should raise L{UnhandledCredentials}.
"""
checker = SSHProtocolChecker()
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
return self.assertFailure(d, UnhandledCredentials)
def test_areDone(self):
"""
The default L{SSHProcotolChecker.areDone} should simply return True.
"""
self.assertEquals(SSHProtocolChecker().areDone(None), True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.