filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_9336 | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from pandas import DataFrame
from collections import OrderedDict
from pyomo.environ import value
from pyomo.network import Arc, Port
import idaes.logger as idaeslog
from idaes.core.util.units_of_measurement import report_quantity
_log = idaeslog.getLogger(__name__)
__author__ = "John Eslick, Andrew Lee"
def arcs_to_stream_dict(
blk, additional=None, descend_into=True, sort=False, prepend=None, s=None
):
"""
Creates a stream dictionary from the Arcs in a model, using the Arc names as
keys. This can be used to automate the creation of the streams dictionary
needed for the ``create_stream_table_dataframe()`` and ``stream_states_dict()``
functions.
Args:
blk (pyomo.environ._BlockData): Pyomo model to search for Arcs
additional (dict): Additional states to add to the stream dictionary,
which aren't represented by arcs in blk, for example feed or
product streams without Arcs attached or states internal to a unit
model.
descend_into (bool): If True, search subblocks for Arcs as well. The
default is True.
sort (bool): If True sort keys and return an OrderedDict
prepend (str): Prepend a string to the arc name joined with a '.'.
This can be useful to prevent conflicting names when sub blocks
contain Arcs that have the same names when used in combination
with descend_into=False.
s (dict): Add streams to an existing stream dict.
Returns:
Dictionary with Arc names as keys and the Arcs as values.
"""
if s is None:
s = {}
for c in blk.component_objects(Arc, descend_into=descend_into):
key = c.getname()
if prepend is not None:
key = ".".join([prepend, key])
s[key] = c
if additional is not None:
s.update(additional)
if sort:
s = OrderedDict(sorted(s.items()))
return s
def stream_states_dict(streams, time_point=0):
"""
Method to create a dictionary of state block representing stream states.
This takes a dict with stream name keys and stream values.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
time_point : point in the time domain at which to generate stream table
(default = 0)
Returns:
A pandas DataFrame containing the stream table data.
"""
stream_dict = OrderedDict()
def _stream_dict_add(sb, n, i=None):
"""add a line to the stream table"""
if i is None:
key = n
else:
key = "{}[{}]".format(n, i)
stream_dict[key] = sb
for n in streams.keys():
if isinstance(streams[n], Arc):
for i, a in streams[n].items():
try:
# if getting the StateBlock from the destination port
# fails for any reason try the source port. This could
# happen if a port does not have an associated
# StateBlock. For example a surrogate model may not
# use state blocks, unit models may handle physical
# properties without state blocks, or the port could
# be used to serve the purpose of a translator block.
sb = _get_state_from_port(a.ports[1], time_point)
except:
sb = _get_state_from_port(a.ports[0], time_point)
_stream_dict_add(sb, n, i)
elif isinstance(streams[n], Port):
sb = _get_state_from_port(streams[n], time_point)
_stream_dict_add(sb, n)
else:
# _IndexedStateBlock is a private class, so cannot directly test
# whether streams[n] is one or not.
try:
sb = streams[n][time_point]
except KeyError as err:
raise TypeError(
f"Either component type of stream argument {streams[n]} "
f"is unindexed or {time_point} is not a member of its "
f"indexing set."
) from err
_stream_dict_add(sb, n)
return stream_dict
def tag_state_quantities(blocks, attributes, labels, exception=False):
"""Take a stream states dictionary, and return a tag dictionary for stream
quantities. This takes a dictionary (blk) that has state block labels as
keys and state blocks as values. The attributes are a list of attributes to
tag. If an element of the attribute list is list-like, the fist element is
the attribute and the remaining elements are indexes. Lables provides a list
of attribute lables to be used to create the tag. Tags are blk_key + label
for the attribute.
Args:
blocks (dict): Dictionary of state blocks. The key is the block label to
be used in the tag, and the value is a state block.
attributes (list-like): A list of attriutes to tag. It is okay if a
particular attribute does not exist in a state bock. This allows
you to mix state blocks with differnt sets of attributes. If an
attribute is indexed, the attribute can be specified as a list or
tuple where the first element is the attribute and the remaining
elements are indexes.
labels (list-like): These are attribute lables. The order corresponds to the
attribute list. They are used to create the tags. Tags are in the
form blk.key + label.
exception (bool): If True, raise exceptions releated to invalid or
missing indexes. If false missing or bad indexes are ignored and
None is used for the table value. Setting this to False allows
tables where some state blocks have the same attributes with differnt
indexing. (default is True)
Return:
(dict): Dictionary where the keys are tags and the values are model
attributes, usually Pyomo component data objects.
"""
tags = {}
if labels is None:
lables = attributes
for a in attributes:
if isinstance(a, (tuple, list)):
if len(a) == 2:
# in case there are multiple indexes and user gives tuple
label = f"{a[0]}[{a[1]}]"
if len(a) > 2:
label = f"{a[0]}[{a[1:]}]"
else:
label = a[0]
for key, s in blocks.items():
for i, a in enumerate(attributes):
j = None
if isinstance(a, (list, tuple)):
# if a is list or tuple, the first element should be the
# attribute and the remaining elements should be indexes.
if len(a) == 2:
j = a[1] # catch user supplying list-like of indexes
if len(a) > 2:
j = a[1:]
# if len(a) == 1, we'll say that's fine here. Don't know why you
# would put the attribute in a list-like if not indexed, but I'll
# allow it.
a = a[0]
v = getattr(s, a, None)
if j is not None and v is not None:
try:
v = v[j]
except KeyError:
if not exception:
v = None
else:
_log.error(f"{j} is not a valid index of {a}")
raise KeyError(f"{j} is not a valid index of {a}")
try:
value(v, exception=False)
except TypeError:
if not exception:
v = None
else:
_log.error(f"Cannot calculate value of {a} (may be subscriptable)")
raise TypeError(
f"Cannot calculate value of {a} (may be subscriptable)"
)
except ZeroDivisionError:
pass # this one is okay
if v is not None:
tags[f"{key}{labels[i]}"] = v
return tags
def create_stream_table_dataframe(
streams, true_state=False, time_point=0, orient="columns"
):
"""
Method to create a stream table in the form of a pandas dataframe. Method
takes a dict with name keys and stream values. Use an OrderedDict to list
the streams in a specific order, otherwise the dataframe can be sorted
later.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
true_state : indicated whether the stream table should contain the
display variables define in the StateBlock (False, default) or the
state variables (True).
time_point : point in the time domain at which to generate stream table
(default = 0)
orient : orientation of stream table. Accepted values are 'columns'
(default) where streams are displayed as columns, or 'index' where
stream are displayed as rows.
Returns:
A pandas DataFrame containing the stream table data.
"""
stream_attributes = OrderedDict()
stream_states = stream_states_dict(streams=streams, time_point=time_point)
full_keys = [] # List of all rows in dataframe to fill in missing data
stream_attributes["Units"] = {}
for key, sb in stream_states.items():
stream_attributes[key] = {}
if true_state:
disp_dict = sb.define_state_vars()
else:
disp_dict = sb.define_display_vars()
for k in disp_dict:
for i in disp_dict[k]:
stream_key = k if i is None else f"{k} {i}"
quant = report_quantity(disp_dict[k][i])
stream_attributes[key][stream_key] = quant.m
# TODO: Only need to do this once, as otherwise we are just
# repeatedly overwriting this
stream_attributes["Units"][stream_key] = quant.u
if stream_key not in full_keys:
full_keys.append(stream_key)
# Check for missing rows in any stream, and fill with "-" if needed
for k, v in stream_attributes.items():
for r in full_keys:
if r not in v.keys():
# Missing row, fill with placeholder
v[r] = "-"
return DataFrame.from_dict(stream_attributes, orient=orient)
def stream_table_dataframe_to_string(stream_table, **kwargs):
"""
Method to print a stream table from a dataframe. Method takes any argument
understood by DataFrame.to_string
"""
# Set some default values for keyword arguments
na_rep = kwargs.pop("na_rep", "-")
justify = kwargs.pop("justify", "center")
float_format = kwargs.pop("float_format", lambda x: "{:#.5g}".format(x))
# Print stream table
return stream_table.to_string(
na_rep=na_rep, justify=justify, float_format=float_format, **kwargs
)
def _get_state_from_port(port, time_point):
"""
Attempt to find a StateBlock-like object connected to a Port. If the
object is indexed both in space and time, assume that the time index
comes first. If no components are assigned to the Port, raise a
ValueError. If the first component's parent block has no index, raise an
AttributeError. If different variables on the port appear to be connected
to different state blocks, raise a RuntimeError.
Args:
port (pyomo.network.Port): a port with variables derived from some
single StateBlock
time_point : point in the time domain at which to index StateBlock
(default = 0)
Returns:
(StateBlock-like) : an object containing all the components contained
in the port.
"""
vlist = list(port.iter_vars())
states = [v.parent_block().parent_component() for v in vlist]
if len(vlist) == 0:
raise ValueError(
f"No block could be retrieved from Port {port.name} "
f"because it contains no components."
)
# Check the number of indices of the parent property block. If its indexed
# both in space and time, keep the second, spatial index and throw out the
# first, temporal index. If that ordering is changed, this method will
# need to be changed as well.
try:
idx = vlist[0].parent_block().index()
except AttributeError as err:
raise AttributeError(
f"No block could be retrieved from Port {port.name} "
f"because block {vlist[0].parent_block().name} has no index."
) from err
# Assuming the time index is always first and the spatial indices are all
# the same
if isinstance(idx, tuple):
idx = (time_point, vlist[0].parent_block().index()[1:])
else:
idx = (time_point,)
# This method also assumes that ports with different spatial indices won't
# end up at the same port. Otherwise this check is insufficient.
if all(states[0] is s for s in states):
return states[0][idx]
raise RuntimeError(
f"No block could be retrieved from Port {port.name} "
f"because components are derived from multiple blocks."
)
def generate_table(blocks, attributes, heading=None, exception=True):
"""
Create a Pandas DataFrame that contains a list of user-defined attributes
from a set of Blocks.
Args:
blocks (dict): A dictionary with name keys and BlockData objects for
values. Any name can be associated with a block. Use an OrderedDict
to show the blocks in a specific order, otherwise the dataframe can
be sorted later.
attributes (list or tuple of strings): Attributes to report from a
Block, can be a Var, Param, or Expression. If an attribute doesn't
exist or doesn't have a valid value, it will be treated as missing
data.
heading (list or tuple of srings): A list of strings that will be used
as column headings. If None the attribute names will be used.
exception (bool): If True, raise exceptions releated to invalid or
missing indexes. If false missing or bad indexes are ignored and
None is used for the table value. Setting this to False allows
tables where some state blocks have the same attributes with differnt
indexing. (default is True)
Returns:
(DataFrame): A Pandas dataframe containing a data table
"""
if heading is None:
heading = attributes
st = DataFrame(columns=heading)
row = [None] * len(attributes) # not a big deal but save time on realloc
for key, s in blocks.items():
for i, a in enumerate(attributes):
j = None
if isinstance(a, (list, tuple)):
# if a is list or tuple, assume index supplied
try:
assert len(a) > 1
except AssertionError:
_log.error(f"An index must be supplided for attribute {a[0]}")
raise AssertionError(
f"An index must be supplided for attribute {a[0]}"
)
j = a[1:]
a = a[0]
v = getattr(s, a, None)
if j is not None and v is not None:
try:
v = v[j]
except KeyError:
if not exception:
v = None
else:
_log.error(f"{j} is not a valid index of {a}")
raise KeyError(f"{j} is not a valid index of {a}")
try:
v = value(v, exception=False)
except TypeError:
if not exception:
v = None
else:
_log.error(f"Cannot calculate value of {a} (may be subscriptable)")
raise TypeError(
f"Cannot calculate value of {a} (may be subscriptable)"
)
except ZeroDivisionError:
v = None
row[i] = v
st.loc[key] = row
return st
|
the-stack_0_9340 | #!/usr/bin/env python
"""Execute a Rekall plugin on the client memory.
This module implements the Rekall enabled client actions.
"""
import json
import os
import pdb
import sys
# Initialize the Rekall plugins, so pylint: disable=unused-import
from rekall import addrspace
from rekall import constants
from rekall import io_manager
from rekall import obj
from rekall import plugins
from rekall import session
from rekall.plugins.addrspaces import standard
from rekall.plugins.renderers import data_export
# pylint: enable=unused-import
import logging
from grr.client import actions
from grr.client import vfs
from grr.client.client_actions import tempfiles
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
class Error(Exception):
pass
class ProfileNotFoundError(ValueError):
pass
class GRRObjectRenderer(data_export.NativeDataExportObjectRenderer):
"""A default object renderer for the GRRRekallRenderer.
GRR Renders all Rekall objects using the Rekall DataExportRenderer. By default
we just delegate everything to DataExportRenderer.
"""
renders_type = "object"
renderers = ["GRRRekallRenderer"]
def _GetDelegateObjectRenderer(self, item):
return self.FromEncoded(item, "DataExportRenderer")(
renderer=self.renderer)
def EncodeToJsonSafe(self, item, **options):
return self._GetDelegateObjectRenderer(item).EncodeToJsonSafe(
item, **options)
def DecodeFromJsonSafe(self, value, options):
return self._GetDelegateObjectRenderer(value).DecodeFromJsonSafe(
value, options)
def RawHTML(self, item, **options):
return self._GetDelegateObjectRenderer(item).Summary(item, **options)
def Summary(self, item, **options):
return self._GetDelegateObjectRenderer(item).Summary(item, **options)
class GRRRekallRenderer(data_export.DataExportRenderer):
"""This renderer sends all messages to the server encoded as JSON.
Note that this renderer is used to encode and deliver Rekall objects to the
server. Additionally Rekall ObjectRenderer implementations specific to GRR
will be attached to this renderer.
"""
name = None
# Maximum number of statements to queue before sending a reply.
RESPONSE_CHUNK_SIZE = 1000
def __init__(self, rekall_session=None, action=None):
"""Collect Rekall rendering commands and send to the server.
Args:
rekall_session: The Rekall session object.
action: The GRR Client Action which owns this renderer. We will use it to
actually send messages back to the server.
"""
try:
sys.stdout.isatty()
except AttributeError:
sys.stdout.isatty = lambda: False
super(GRRRekallRenderer, self).__init__(session=rekall_session)
# A handle to the client action we can use for sending responses.
self.action = action
# The current plugin we are running.
self.plugin = None
self.context_messages = {}
self.new_context_messages = {}
def start(self, plugin_name=None, kwargs=None):
self.plugin = plugin_name
return super(GRRRekallRenderer, self).start(plugin_name=plugin_name,
kwargs=kwargs)
def write_data_stream(self):
"""Prepares a RekallResponse and send to the server."""
if self.data:
response_msg = rdfvalue.RekallResponse(
json_messages=json.dumps(self.data, separators=(",", ":")),
json_context_messages=json.dumps(self.context_messages.items(),
separators=(",", ":")),
plugin=self.plugin)
self.context_messages = self.new_context_messages
self.new_context_messages = {}
# Queue the response to the server.
self.action.SendReply(response_msg)
def SendMessage(self, statement):
super(GRRRekallRenderer, self).SendMessage(statement)
if statement[0] in ["s", "t"]:
self.new_context_messages[statement[0]] = statement[1]
if len(self.data) > self.RESPONSE_CHUNK_SIZE:
self.flush()
def open(self, directory=None, filename=None, mode="rb"):
result = tempfiles.CreateGRRTempFile(filename=filename, mode=mode)
# The tempfile library created an os path, we pass it through vfs to
# normalize it.
with vfs.VFSOpen(rdfvalue.PathSpec(
path=result.name,
pathtype=rdfvalue.PathSpec.PathType.OS)) as vfs_fd:
dict_pathspec = vfs_fd.pathspec.ToPrimitiveDict()
self.SendMessage(["file", dict_pathspec])
return result
def report_error(self, message):
super(GRRRekallRenderer, self).report_error(message)
if flags.FLAGS.debug:
pdb.post_mortem()
class GrrRekallSession(session.Session):
"""A GRR Specific Rekall session."""
def __init__(self, fhandle=None, action=None, **session_args):
super(GrrRekallSession, self).__init__(**session_args)
self.action = action
# Ensure the action's Progress() method is called when Rekall reports
# progress.
self.progress.Register(id(self), lambda *_, **__: self.action.Progress())
def LoadProfile(self, filename):
"""Wraps the Rekall profile's LoadProfile to fetch profiles from GRR."""
# If the user specified a special profile path we use their choice.
profile = super(GrrRekallSession, self).LoadProfile(filename)
if profile:
return profile
# Cant load the profile, we need to ask the server for it.
logging.debug("Asking server for profile %s" % filename)
self.action.SendReply(
rdfvalue.RekallResponse(
missing_profile="%s/%s" % (
constants.PROFILE_REPOSITORY_VERSION, filename)))
# Wait for the server to wake us up. When we wake up the server should
# have sent the profile over by calling the WriteRekallProfile.
self.action.Suspend()
# Now the server should have sent the data already. We try to load the
# profile one more time.
return super(GrrRekallSession, self).LoadProfile(
filename, use_cache=False)
def GetRenderer(self):
# We will use this renderer to push results to the server.
return GRRRekallRenderer(rekall_session=self, action=self.action)
class WriteRekallProfile(actions.ActionPlugin):
"""A client action to write a Rekall profile to the local cache."""
in_rdfvalue = rdfvalue.RekallProfile
def Run(self, args):
output_filename = utils.JoinPath(
config_lib.CONFIG["Client.rekall_profile_cache_path"], args.name)
try:
os.makedirs(os.path.dirname(output_filename))
except OSError:
pass
with open(output_filename, "wb") as fd:
fd.write(args.data)
class RekallAction(actions.SuspendableAction):
"""Runs a Rekall command on live memory."""
in_rdfvalue = rdfvalue.RekallRequest
out_rdfvalue = rdfvalue.RekallResponse
def Iterate(self):
"""Run a Rekall plugin and return the result."""
# Open the device pathspec as requested by the server.
with vfs.VFSOpen(self.request.device,
progress_callback=self.Progress) as fhandle:
# Create a session and run all the plugins with it.
session_args = self.request.session.ToDict()
# If the user has not specified a special profile path, we use the local
# cache directory.
if "profile_path" not in session_args:
session_args["profile_path"] = [config_lib.CONFIG[
"Client.rekall_profile_cache_path"]]
session_args.update(fhandle.GetMetadata())
rekal_session = GrrRekallSession(action=self, **session_args)
# Wrap GRR's VFS handler for the device in a Rekall FDAddressSpace so we
# can pass it directly to the Rekall session as the physical address
# space. This avoids the AS voting mechanism for Rekall's image format
# detection.
with rekal_session:
rekal_session.physical_address_space = standard.FDAddressSpace(
session=rekal_session, fhandle=fhandle)
# Autodetect the profile. Valid plugins for this profile will become
# available now.
rekal_session.GetParameter("profile")
for plugin_request in self.request.plugins:
# Get the keyword args to this plugin.
plugin_args = plugin_request.args.ToDict()
try:
rekal_session.RunPlugin(plugin_request.plugin, **plugin_args)
except Exception: # pylint: disable=broad-except
# Just ignore errors, and run the next plugin. Errors will be reported
# through the renderer.
pass
|
the-stack_0_9341 | import argparse
import cv2
import numpy
import PIL.Image
import torch
import torchvision.transforms as transforms
from PIL import Image
from torch.autograd import Variable
from models import *
from tools.canny import processing
from tools.picture2texture import estimate
def sample_images(generator,Tensor,imgs):
"""
save the processed pictures
Args:
generator: trained model
Tensor: tensor format
imgs: real picture
Author: Zhongqi Wang
"""
real_A = Variable(imgs.type(Tensor))
real_A = real_A.unsqueeze(0)
fake_B = generator(real_A)
cv2.imwrite("generate.png" ,255*fake_B[0].squeeze(0).cpu().swapaxes(0,2).swapaxes(0,1).numpy())
def process(opt,file_path):
"""
get the HED edge-painting
Args:
opt: opt file
file_path: the file path U want to process
Author: Zhongqi Wang
"""
arguments_strOut = "HED.jpg"
src = cv2.imread(file_path, 0)
src = cv2.resize(src, (opt.img_width,opt.img_height))
src_RGB = cv2.cvtColor(src, cv2.COLOR_GRAY2RGB)
a = PIL.Image.fromarray(src_RGB)
b = numpy.array(a)[:, :]
tenInput = torch.FloatTensor(numpy.ascontiguousarray(b.transpose(2, 0, 1).astype(numpy.float32) * (1.0 / 255.0)))
tenOutput = estimate(tenInput)
PIL.Image.fromarray((tenOutput.clip(0.0, 1.0).numpy().transpose(1, 2, 0)[:, :, 0] * 255.0).astype(numpy.uint8)).save(arguments_strOut)
def main(path):
parser = argparse.ArgumentParser()
parser.add_argument("--img_height", type=int, default=512, help="size of image height")
parser.add_argument("--img_width", type=int, default=512, help="size of image width")
opt = parser.parse_args()
transform=transforms.Compose([
transforms.ToTensor(),
])
cuda = True if torch.cuda.is_available() else False
generator = GeneratorUNet()
if cuda:
generator = generator.cuda() #使用gpu
generator.load_state_dict(torch.load("generator_45_canny.pth"))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
process(opt,path) #处理为HED边缘图像
img = processing(path) #处理为canny边缘图像
cv2.imwrite("canny.jpg",img)
pic1 = cv2.imread("HED.jpg")
pic1 = cv2.resize(pic1, (opt.img_width,opt.img_height))
pic2 = cv2.imread("canny.jpg")
pic2 = cv2.resize(pic2, (opt.img_width,opt.img_height))
train_data = pic1+pic2
cv2.imwrite("canny&HED.jpg",train_data) #得到二者叠加
frame = cv2.resize(train_data,(opt.img_width,opt.img_height))
frame = Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
frame = transform(frame)
sample_images(generator,Tensor,frame) #输入pix2pix模型求解
if __name__ == "__main__":
path = "test_pic/6.jpg" # 要处理的图片
main(path)
|
the-stack_0_9344 | import os
import functools
from flask import Flask
from flask import request
import redis
import hn_feeds
import logger_config
app = Flask(__name__)
logger = logger_config.get_logger()
@functools.lru_cache(None)
def _get_feed_generator():
redis_server = os.environ.get("REDIS_SERVER", None)
if redis_server:
host, port = redis_server.split(":")
redis_db = os.environ.get("REDIS_DB", 0)
redis_client = redis.Redis(host=host, port=int(port), db=redis_db)
redis_client.ping() # test connection
logger.info(f"Connected to Redis at {host}:{port}")
else:
redis_client = None
logger.warning("Not using Redis")
return hn_feeds.HNFeedsGenerator(
timeout_secs=int(os.environ.get("TIMEOUT_SECS", 5)),
max_workers=int(os.environ.get("MAX_WORKERS", 5)),
redis_client=redis_client,
redis_expire_secs=int(os.environ.get("REDIS_EXPIRE_SECS", 172800)),
fulltext_rss_url=os.environ.get("FULLTEXT_RSS_URL", None))
# global feed generator
_feed_generator = _get_feed_generator()
@app.route('/')
def base():
return f'<p>Must pass an url with a feed to parse!</p>'
@app.route('/favicon.ico')
def no_favicon():
"""Returns 404 if we pass a favicon request."""
return '', 404
@app.route('/<path:url>')
def main_entry(url):
del url # Unused since we need full path anyway.
full_path = request.full_path[1:] # Strip leading /.
base_rss = f'http://{full_path}'
logger.info(f'Got request for "{base_rss}". Creating feed.')
fg = _feed_generator.create_feed(base_rss=base_rss)
if not fg:
return '', 404
xml = fg.atom_str(pretty=True)
return xml, 200, {'Content-Type': 'text/xml; charset=utf-8'}
|
the-stack_0_9345 | #!/usr/bin/env python
import optparse
import os
import sys
chplenv_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(chplenv_dir))
import chpl_comm, chpl_compiler, chpl_platform, overrides
from compiler_utils import CompVersion, get_compiler_version
from utils import error, memoize
@memoize
def get(flag='target'):
if flag == 'network':
atomics_val = overrides.get('CHPL_NETWORK_ATOMICS')
if not atomics_val:
if chpl_comm.get() == 'ugni' and get('target') != 'locks':
atomics_val = 'ugni'
else:
atomics_val = 'none'
elif flag == 'target':
atomics_val = overrides.get('CHPL_ATOMICS')
if not atomics_val:
compiler_val = chpl_compiler.get('target')
platform_val = chpl_platform.get('target')
# we currently support intrinsics for gcc, intel, cray and clang.
# gcc added initial support in 4.1, and added support for 64 bit
# atomics on 32 bit platforms with 4.8. clang and intel also
# support 64 bit atomics on 32 bit platforms and the cray compiler
# will never run on a 32 bit machine. For pgi or 32 bit platforms
# with an older gcc, we fall back to locks
if compiler_val in ['gnu', 'cray-prgenv-gnu', 'mpi-gnu']:
version = get_compiler_version('gnu')
if version >= CompVersion('4.8'):
atomics_val = 'intrinsics'
elif version >= CompVersion('4.1') and not platform_val.endswith('32'):
atomics_val = 'intrinsics'
elif compiler_val == 'aarch64-gnu':
atomics_val = 'cstdlib'
elif compiler_val == 'intel' or compiler_val == 'cray-prgenv-intel':
atomics_val = 'intrinsics'
elif compiler_val == 'cray-prgenv-cray':
atomics_val = 'intrinsics'
elif compiler_val == 'clang':
atomics_val = 'intrinsics'
elif compiler_val == 'clang-included':
atomics_val = 'intrinsics'
# we can't use intrinsics, fall back to locks
if not atomics_val:
atomics_val = 'locks'
else:
error("Invalid flag: '{0}'".format(flag), ValueError)
return atomics_val
def _main():
parser = optparse.OptionParser(usage='usage: %prog [--network|target])')
parser.add_option('--target', dest='flag', action='store_const',
const='target', default='target')
parser.add_option('--network', dest='flag', action='store_const',
const='network')
(options, args) = parser.parse_args()
atomics_val = get(options.flag)
sys.stdout.write("{0}\n".format(atomics_val))
if __name__ == '__main__':
_main()
|
the-stack_0_9346 | class Solution:
"""
@param digits: a number represented as an array of digits
@return: the result
"""
def plusOne(self, digits):
if len(digits) == 0:
return digits
digits[-1] += 1
for i in range(len(digits) - 1, 0, -1):
if digits[i] == 10:
digits[i] = 0
digits[i - 1] += 1
if digits[0] == 10:
digits[0] = 0
digits.insert(0, 1)
return digits |
the-stack_0_9349 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.test_utils."""
import numpy as np
import tensorflow as tf
from object_detection.utils import test_utils
class TestUtilsTest(tf.test.TestCase):
def test_diagonal_gradient_image(self):
"""Tests if a good pyramid image is created."""
pyramid_image = test_utils.create_diagonal_gradient_image(3, 4, 2)
# Test which is easy to understand.
expected_first_channel = np.array([[3, 2, 1, 0],
[4, 3, 2, 1],
[5, 4, 3, 2]], dtype=np.float32)
self.assertAllEqual(np.squeeze(pyramid_image[:, :, 0]),
expected_first_channel)
# Actual test.
expected_image = np.array([[[3, 30],
[2, 20],
[1, 10],
[0, 0]],
[[4, 40],
[3, 30],
[2, 20],
[1, 10]],
[[5, 50],
[4, 40],
[3, 30],
[2, 20]]], dtype=np.float32)
self.assertAllEqual(pyramid_image, expected_image)
def test_random_boxes(self):
"""Tests if valid random boxes are created."""
num_boxes = 1000
max_height = 3
max_width = 5
boxes = test_utils.create_random_boxes(num_boxes,
max_height,
max_width)
true_column = np.ones(shape=(num_boxes)) == 1
self.assertAllEqual(boxes[:, 0] < boxes[:, 2], true_column)
self.assertAllEqual(boxes[:, 1] < boxes[:, 3], true_column)
self.assertTrue(boxes[:, 0].min() >= 0)
self.assertTrue(boxes[:, 1].min() >= 0)
self.assertTrue(boxes[:, 2].max() <= max_height)
self.assertTrue(boxes[:, 3].max() <= max_width)
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_9350 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Merge source maps to build composite sources
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import yaml
from astropy.io import fits
from fermipy.skymap import HpxMap
from fermipy.utils import load_yaml
from fermipy.jobs.scatter_gather import ScatterGather
from fermipy.jobs.slac_impl import make_nfs_path
from fermipy.jobs.link import Link
from fermipy.jobs.chain import Chain
from fermipy.diffuse.binning import Component
from fermipy.diffuse.name_policy import NameFactory
from fermipy.diffuse import defaults as diffuse_defaults
from fermipy.diffuse.model_manager import make_library
NAME_FACTORY = NameFactory()
class InitModel(Link):
"""Small class to preprate files fermipy analysis.
Specifically this create the srcmap_manifest and fermipy_config_yaml files
"""
appname = 'fermipy-init-model'
linkname_default = 'init-model'
usage = '%s [options]' % (appname)
description = "Initialize model fitting directory"
default_options = dict(comp=diffuse_defaults.diffuse['comp'],
data=diffuse_defaults.diffuse['data'],
library=diffuse_defaults.diffuse['library'],
models=diffuse_defaults.diffuse['models'],
hpx_order=diffuse_defaults.diffuse['hpx_order_fitting'])
def run_analysis(self, argv):
""" Build the manifest for all the models
"""
args = self._parser.parse_args(argv)
components = Component.build_from_yamlfile(args.comp)
NAME_FACTORY.update_base_dict(args.data)
model_dict = make_library(**args.__dict__)
model_manager = model_dict['ModelManager']
models = load_yaml(args.models)
data = args.data
hpx_order = args.hpx_order
for modelkey in models:
model_manager.make_srcmap_manifest(modelkey, components, data)
model_manager.make_fermipy_config_yaml(modelkey, components, data,
hpx_order=hpx_order,
irf_ver=NAME_FACTORY.irf_ver())
class AssembleModel(Link):
"""Small class to assemple source map files for fermipy analysis.
This is useful for re-merging after parallelizing source map creation.
"""
appname = 'fermipy-assemble-model'
linkname_default = 'assemble-model'
usage = '%s [options]' % (appname)
description = "Assemble sourcemaps for model fitting"
default_options = dict(input=(None, 'Input yaml file', str),
compname=(None, 'Component name.', str),
hpx_order=diffuse_defaults.diffuse['hpx_order_fitting'])
@staticmethod
def copy_ccube(ccube, outsrcmap, hpx_order):
"""Copy a counts cube into outsrcmap file
reducing the HEALPix order to hpx_order if needed.
"""
sys.stdout.write(" Copying counts cube from %s to %s\n" % (ccube, outsrcmap))
try:
hdulist_in = fits.open(ccube)
except IOError:
hdulist_in = fits.open("%s.gz" % ccube)
hpx_order_in = hdulist_in[1].header['ORDER']
if hpx_order_in > hpx_order:
hpxmap = HpxMap.create_from_hdulist(hdulist_in)
hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True)
hpxlist_out = hdulist_in
#hpxlist_out['SKYMAP'] = hpxmap_out.create_image_hdu()
hpxlist_out[1] = hpxmap_out.create_image_hdu()
hpxlist_out[1].name = 'SKYMAP'
hpxlist_out.writeto(outsrcmap)
return hpx_order
else:
os.system('cp %s %s' % (ccube, outsrcmap))
#os.system('cp %s.gz %s.gz' % (ccube, outsrcmap))
#os.system('gunzip -f %s.gz' % (outsrcmap))
return None
@staticmethod
def open_outsrcmap(outsrcmap):
"""Open and return the outsrcmap file in append mode """
outhdulist = fits.open(outsrcmap, 'append')
return outhdulist
@staticmethod
def append_hdus(hdulist, srcmap_file, source_names, hpx_order):
"""Append HEALPix maps to a list
Parameters
----------
hdulist : list
The list being appended to
srcmap_file : str
Path to the file containing the HDUs
source_names : list of str
Names of the sources to extract from srcmap_file
hpx_order : int
Maximum order for maps
"""
sys.stdout.write(" Extracting %i sources from %s" % (len(source_names), srcmap_file))
try:
hdulist_in = fits.open(srcmap_file)
except IOError:
try:
hdulist_in = fits.open('%s.gz' % srcmap_file)
except IOError:
sys.stdout.write(" Missing file %s\n" % srcmap_file)
return
for source_name in source_names:
sys.stdout.write('.')
sys.stdout.flush()
if hpx_order is None:
hdulist.append(hdulist_in[source_name])
else:
try:
hpxmap = HpxMap.create_from_hdulist(hdulist_in, hdu=source_name)
except IndexError:
print(" Index error on source %s in file %s" % (source_name, srcmap_file))
continue
except KeyError:
print(" Key error on source %s in file %s" % (source_name, srcmap_file))
continue
hpxmap_out = hpxmap.ud_grade(hpx_order, preserve_counts=True)
hdulist.append(hpxmap_out.create_image_hdu(name=source_name))
sys.stdout.write("\n")
hdulist.flush()
hdulist_in.close()
@staticmethod
def assemble_component(compname, compinfo, hpx_order):
"""Assemble the source map file for one binning component
Parameters
----------
compname : str
The key for this component (e.g., E0_PSF3)
compinfo : dict
Information about this component
hpx_order : int
Maximum order for maps
"""
sys.stdout.write("Working on component %s\n" % compname)
ccube = compinfo['ccube']
outsrcmap = compinfo['outsrcmap']
source_dict = compinfo['source_dict']
hpx_order = AssembleModel.copy_ccube(ccube, outsrcmap, hpx_order)
hdulist = AssembleModel.open_outsrcmap(outsrcmap)
for comp_name in sorted(source_dict.keys()):
source_info = source_dict[comp_name]
source_names = source_info['source_names']
srcmap_file = source_info['srcmap_file']
AssembleModel.append_hdus(hdulist, srcmap_file,
source_names, hpx_order)
sys.stdout.write("Done!\n")
def run_analysis(self, argv):
"""Assemble the source map file for one binning component
FIXME
"""
args = self._parser.parse_args(argv)
manifest = yaml.safe_load(open(args.input))
compname = args.compname
value = manifest[compname]
self.assemble_component(compname, value, args.hpx_order)
class AssembleModel_SG(ScatterGather):
"""Small class to generate configurations for this script
Parameters
----------
--compname : binning component definition yaml file
--data : datset definition yaml file
--models : model definitino yaml file
args : Names of models to assemble source maps for
"""
appname = 'fermipy-assemble-model-sg'
usage = "%s [options]" % (appname)
description = "Copy source maps from the library to a analysis directory"
clientclass = AssembleModel
job_time = 300
default_options = dict(comp=diffuse_defaults.diffuse['comp'],
data=diffuse_defaults.diffuse['data'],
hpx_order=diffuse_defaults.diffuse['hpx_order_fitting'],
models=diffuse_defaults.diffuse['models'])
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
NAME_FACTORY.update_base_dict(args['data'])
models = load_yaml(args['models'])
for modelkey in models:
manifest = os.path.join('analysis', 'model_%s' % modelkey,
'srcmap_manifest_%s.yaml' % modelkey)
for comp in components:
key = comp.make_key('{ebin_name}_{evtype_name}')
fullkey = "%s_%s" % (modelkey, key)
outfile = NAME_FACTORY.merged_srcmaps(modelkey=modelkey,
component=key,
coordsys=comp.coordsys,
mktime='none',
irf_ver=NAME_FACTORY.irf_ver())
logfile = make_nfs_path(outfile.replace('.fits', '.log'))
job_configs[fullkey] = dict(input=manifest,
compname=key,
hpx_order=args['hpx_order'],
logfile=logfile)
return job_configs
class AssembleModelChain(Chain):
"""Small class to split, apply mktime and bin data according to some user-provided specification
"""
appname = 'fermipy-assemble-model-chain'
linkname_default = 'assemble-model-chain'
usage = '%s [options]' % (appname)
description = 'Run init-model and assemble-model'
default_options = dict(data=diffuse_defaults.diffuse['data'],
comp=diffuse_defaults.diffuse['comp'],
library=diffuse_defaults.diffuse['library'],
models=diffuse_defaults.diffuse['models'],
hpx_order=diffuse_defaults.diffuse['hpx_order_fitting'],
dry_run=diffuse_defaults.diffuse['dry_run'])
def __init__(self, **kwargs):
"""C'tor
"""
super(AssembleModelChain, self).__init__(**kwargs)
self.comp_dict = None
def _register_link_classes(self):
InitModel.register_class()
AssembleModel_SG.register_class()
def _map_arguments(self, input_dict):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
data = input_dict.get('data')
comp = input_dict.get('comp')
library = input_dict.get('library')
models = input_dict.get('models')
hpx_order = input_dict.get('hpx_order')
dry_run = input_dict.get('dry_run', False)
self._set_link('init-model', InitModel,
comp=comp, data=data,
library=library,
models=models,
hpx_order=hpx_order,
dry_run=dry_run)
self._set_link('assemble-model', AssembleModel_SG,
comp=comp, data=data,
hpx_order=hpx_order,
models=models)
def register_classes():
"""Register these classes with the `LinkFactory` """
InitModel.register_class()
AssembleModel.register_class()
AssembleModel_SG.register_class()
AssembleModelChain.register_class()
|
the-stack_0_9352 | # -*- coding: utf-8 -*-
"""
database.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~
BatteryDataBase data structures.
"""
from chemdataextractor_batteries.chemdataextractor import Document
import json
import copy
class BatteryDataBase():
def __init__(self, paper_root, save_root, filename):
self.dic = None
self.filename = filename
self.paper_root = paper_root
self.count = 0
self.save_root = save_root
def write_into_file(self):
with open('{}/{}.json'.format(self.save_root, self.filename), 'a', encoding='utf-8') as json_file:
json.dump(self.dic, json_file, ensure_ascii=False)
json_file.write('\n')
return
def extract(self, file):
"""
:param file: The parsing files (HTML/XML...)
:return: Write the record into the documents
"""
# try:
f = open(file, 'rb')
d = Document.from_file(f)
print('parsing ' + file)
rough = d.records.serialize()
print(rough)
data = []
for dic in rough:
if 'Compound' in dic:
continue
try:
dic['metadata'] = d.metadata[0].serialize()
if dic['metadata']['doi'] == "None":
pass
except BaseException:
pass
self.count += 1
if self.is_valid(dic):
dic_list = self.distribute(dic)
data += dic_list
if len(data) <= 3:
for i in data:
i['warning'] = 1
for new_dic in data:
self.dic = new_dic
self.write_into_file()
print(str(self.count) + ' relations in total')
print(file + ' is done')
f.close()
# except BaseException:
# pass
def is_valid(self, dic):
"""
Check if the data record is valid or not
:param dic:
:return:
"""
if "BatteryVolumeCapacity" in dic:
return False
else:
try:
if 'names' in next(iter(dic.values()))['compound']['Compound']:
return True
except BaseException:
return False
def distribute(self, dic):
"""
:param dic: A dictionary returned by CDE
:return: A list of dictionaries with valid records
"""
"""
Extract chemical names if a length of a list > 1
Create a new key: 'names' (list)
"""
# Create a key 'names' (list)
name_length = next(iter(dic.values()))['compound']['Compound']['names']
next(iter(dic.values()))['names'] = [name_length[0]]
if len(name_length) > 1:
for j in name_length[1:]:
if j.lower() not in [x.lower()
for x in next(iter(dic.values()))['names']]:
next(iter(dic.values()))['names'].append(j)
# Update the key 'value' as a list of float
next(iter(dic.values()))['value'] = json.loads(
next(iter(dic.values()))['value'])
# Distribute
dic_lists = self.distribute_value_and_names(dic)
return dic_lists
def distribute_value_and_names(self, dic):
"""
:param dic: A single dictionary, with keys 'names' and 'value' as 2 lists
:return: A list of dictionaries with single name and value
"""
dic_list = []
len_names = len(next(iter(dic.values()))['names'])
len_values = len(next(iter(dic.values()))['value'])
copydic = copy.deepcopy(dic)
if len_names == 1 and len_values == 1:
next(iter(copydic.values()))['value'] = next(
iter(dic.values()))['value'][0]
next(iter(copydic.values()))['names'] = next(
iter(dic.values()))['names'][0]
dic_list.append(copydic)
elif len_names == 1 and len_values > 1:
for j in range(len_values):
next(iter(copydic.values()))['value'] = float(
next(iter(dic.values()))['value'][j])
next(iter(copydic.values()))['names'] = next(
iter(dic.values()))['names'][0]
dic_list.append(copydic)
elif len_names > 1 and len_values == 1:
for j in range(len_names):
next(iter(copydic.values()))['value'] = float(
next(iter(dic.values()))['value'][0])
next(iter(copydic.values()))['names'] = next(
iter(dic.values()))['names'][j]
dic_list.append(copydic)
elif len_names == len_values and len_names > 1:
for j in range(len_names):
next(iter(copydic.values()))['value'] = float(
next(iter(dic.values()))['value'][j])
next(iter(copydic.values()))['names'] = next(
iter(dic.values()))['names'][j]
dic_list.append(copydic)
else:
for j in range(len_names):
for k in range(len_values):
next(iter(copydic.values()))['value'] = float(
next(iter(dic.values()))['value'][k])
next(
iter(
copydic.values()))['names'] = next(
iter(
dic.values()))['names'][j]
dic_list.append(copydic)
return dic_list
|
the-stack_0_9356 | ##Elias Howell | 10/24/2019 | Homework #3
#Compares two lists and returns a list of items shared by the two
def similar_items(list1, list2):
listOfItems = []
for item in list1:
if item in list2:
listOfItems.append(item)
return listOfItems
#Compares two lists and returns a list of items not shared by the two
def unique_items(list1, list2):
listOfItems = []
for item in list1:
if item not in list2:
listOfItems.append(item)
return listOfItems
#Takes the sum of all items in a list
def sum_items(myList):
summationOfItems = 0
for item in myList:
summationOfItems += item
return summationOfItems
#Takes the product of all items in a list
def multiply_items(myList):
productOfItems = 1
for item in myList:
productOfItems *= item
return productOfItems
#Finds and returns the smallest value in a list
def minimum_item(myList):
minValue = myList[0]
for item in myList:
if item < minValue:
minValue = item
return minValue
#Finds and returns the largest value in a list
def maximum_item(myList):
maxValue = myList[0]
for item in myList:
if item > maxValue:
maxValue = item
return maxValue
|
the-stack_0_9357 | from datetime import datetime
from datetime import date
from typing import Optional
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm import dynamic
from flask_atomic.orm.database import db
from flask_atomic.orm.mixins.core import CoreMixin
def extract(model, fields=None, exclude: Optional[set] = None) -> dict:
resp = dict()
if exclude is None:
exclude = set()
if fields is None:
fields = model.keys()
restricted_fields = getattr(model, 'RESTRICTED_FIELDS', set())
if restricted_fields:
fields.discard(restricted_fields)
exclude = exclude.union(restricted_fields or set())
for column in set(fields).difference(set(exclude)):
if isinstance(getattr(model, column), datetime) or isinstance(getattr(model, column), date):
resp[column] = str(getattr(model, column))
else:
resp[column] = getattr(model, column)
return resp
class DeclarativeBase(db.Model, CoreMixin):
"""
Base model to be extended for use with Flask projects.
Core concept of the model is common functions to help wrap up database
interaction into a single interface. Testing can be rolled up easier this
way also. Inheriting from this class automatically sets id field and db
soft deletion field managed by active using the DYNA pattern (D, Y, N, A).
Basic usage::
from flask_atomic.sqlalchemy.declarative import DeclarativeBase
class MyNewModel(DeclarativeBase):
field_a = db.Column(db.String(256), nullable=True)
"""
__abstract__ = True
# active = db.Column(db.String(5), default='Y')
def __str__(self):
return self.whatami()
@classmethod
def identify_primary_key(cls):
return list(cls.__table__.primary_key).pop().name
@classmethod
def checkfilters(cls, filters):
resp = {}
for k, v in filters.items():
resp[cls.normalise(k)] = v
return resp
@classmethod
def getquery(cls):
return db.session.query
@classmethod
def makequery(cls, fields=None):
try:
# return db.session.query(cls, fields)
if not fields:
return cls.query
return db.session.query(cls, *fields)
except Exception as e:
logger.error(str(e))
db.session.rollback()
return db.session.query(cls, *fields)
@classmethod
def relations(cls, flag):
if flag == True:
return set(cls.__mapper__.relationships.keys())
elif isinstance(flag, list):
return set(flag)
return set()
@classmethod
def relationattrs(cls):
return set(cls.__mapper__.relationships.keys())
@classmethod
def objectcolumns(cls, include_relationships=False):
bound_columns = set(cls.__mapper__.columns)
if include_relationships:
rels = cls.__mapper__.relationships
return bound_columns.union(set([i.class_attribute for i in cls.__mapper__.relationships]))
return bound_columns
@classmethod
def keys(cls):
return set(cls.__table__.columns.keys())
@classmethod
def schema(cls, rel=True, exclude=None):
if exclude is None:
exclude = []
schema = []
for item in [key for key in cls.keys() if key not in exclude]:
schema.append(dict(name=item.replace('_', ' '), key=item))
return schema
@classmethod
def getkey(cls, field):
if isinstance(field, InstrumentedAttribute):
return getattr(cls, field.key)
return getattr(cls, field)
def relationships(self, root=''):
return list(filter(lambda r: r != root, self.__mapper__.relationships.keys()))
def columns(self, exc: Optional[list] = None) -> list:
"""
Gets a list of columns to work with, minus the excluded sublist (exc).
:param exc:
:return:
"""
if exc is None:
exc = list()
return [key for key in list(self.__table__.columns.keys()) if key not in exc]
def whatami(self) -> str:
"""
Self-describe the model.
:return: Descriptive name based on the tablename used at declaration.
"""
# I am not a number :)
return self.__tablename__
def process_relationships(self, root: str, exclude: set = None, rels=None):
resp = dict()
if rels is None or isinstance(rels, bool):
rels = self.relationships(root)
for idx, item in enumerate(rels):
# First check if it is a sub lookup
_lookup = None
if hasattr(self, '__i__' + item):
resp[item] = getattr(self, '__i__' + item)
continue
sublookup = False
if '.' in item:
sublookup = True
lookup = item.split('.')
_lookup = lookup.copy()
relationship_instance = getattr(getattr(self, lookup.pop(0), None), lookup.pop())
else:
relationship_instance = getattr(self, item, None)
if isinstance(relationship_instance, dynamic.AppenderMixin):
# TO handle dynamic relationships (lazy=dynamic)
fields = set(map(lambda x: x.key, relationship_instance._entity_zero().column_attrs)).difference(exclude)
resp[item] = []
if hasattr(self, '__i__' + item):
resp[item] = getattr(self, '__i__' + item)
else:
for index, entry in enumerate(relationship_instance.all()):
resp[item].append(extract(entry, fields))
elif isinstance(relationship_instance, list):
# if relationship_instance.uselist:
if sublookup:
parent = _lookup.pop(0)
attr = _lookup.pop()
else:
resp[item] = []
for index, entry in enumerate(relationship_instance):
fields = set(entry.keys()).difference(exclude)
if sublookup:
if not resp.get(parent, None):
resp[parent] = dict()
resp[parent].setdefault(attr, []).append(entry.extract(fields))
else:
resp[item].append(entry.extract(set(entry.keys()).difference(exclude)))
elif relationship_instance:
fields = set(relationship_instance.keys()).difference(exclude)
if _lookup:
resp[_lookup.pop(0)][_lookup.pop()] = relationship_instance.extract(fields)
else:
resp[item] = relationship_instance.extract(fields)
return resp
def extract(self, fields=None, exclude: Optional[set] = None, **kwargs) -> dict:
resp = dict()
if exclude is None:
exclude = set()
if fields is None:
fields = self.keys()
restricted_fields = getattr(self, 'RESTRICTED_FIELDS', set())
if restricted_fields and not kwargs.get('private', None):
fields.discard(restricted_fields)
exclude = exclude.union(restricted_fields or set())
for column in set(fields).difference(set(exclude)):
if isinstance(getattr(self, column), datetime) or isinstance(getattr(self, column), date):
resp[column] = str(getattr(self, column))
else:
resp[column] = getattr(self, column)
return resp
def serialize(self, fields=None, exc: Optional[set] = None, rels=False, root=None, exclude=None, functions=None,
**kwargs):
"""
This utility function dynamically converts Alchemy model classes into a
dict using introspective lookups. This saves on manually mapping each
model and all the fields. However, exclusions should be noted. Such as
passwords and protected properties.
:param functions:
:param fields: More of a whitelist of fields to include (preferred way)
:param rels: Whether or not to introspect to relationships
:param exc: Fields to exclude from query result set
:param root: Root model for processing relationships. This acts as a
recursive sentinel to prevent infinite recursion due to selecting oneself
as a related model, and then infinitely trying to traverse the roots
own relationships, from itself over and over.
:param exclude: Exclusion in set form. Currently in favour of exc param.
Only remedy to this is also to use one way relationships. Avoiding any
back referencing of models.
:return: json data structure of model
:rtype: dict
"""
if functions is None:
functions = {}
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
if not fields:
fields = set(self.fields())
if root is None:
root = self.whatami()
if exc is None:
exc = {'password'}
set(exclude).union(exc)
# Define our model properties here. Columns and Schema relationships
resp = self.extract(fields, exc, **kwargs)
if functions:
for key, value in functions.items():
resp[f'_{key}'] = value(getattr(self, key))
restricted_fields = set(fields).discard(getattr(self, 'RESTRICTED_FIELDS', set()))
if restricted_fields:
fields.discard(restricted_fields)
exclude = exclude.union(restricted_fields or set())
rels = rels or set(self.relationships()).intersection(fields)
if not rels or len(set(self.relationships())) < 1:
return resp
# for rel in rels:
# if rel in [i.split('__i__').pop() for i in self.__dict__ if '__i__' in i]:
# rels.remove(rel)
resp.update(self.process_relationships(root, rels=rels, exclude=exclude))
return resp
def __eq__(self, comparison):
if type(self) != type(comparison):
raise ValueError('Objects are not the same. Cannot compare')
base = self.columns()
base_dictionary = self.__dict__
comp_dictionary = self.__dict__
flag = True
for column_name in base:
if base_dictionary[column_name] != comp_dictionary[column_name]:
flag = False
break
return flag
|
the-stack_0_9360 | """models.cipher
This module contains the ciphers that are stored in the database
"""
import json
from app import db
from models import funcs
from sqlalchemy import sql
class Cipher(db.Model):
"""
The Cipher class stores the cipher string for an individual site's info.
This also contains an enumeration of the different types of cipher
Attributes:
id (int): The id of this cipher
user_id (Foreign Key): The user associated with this cipher
folder_id (Foreign Key): The folder that contains this cipher
organization_id (str): ID of the organization this is associated with
cipher_type (int): The type of cipher
favorite (bool): If this cipher is a favorite or not
data (str): JSON serialized data contained in this cipher
fields (str): JSON serialized fields contained in this cipher
name (str): JSON serialized name of cipher
notes (str): JSON serialized note on cipher
login (str): JSON serialized login
secure_note (str): JSON serialized secure note
card (str): JSON serialized card
identity (str): JSON serialized identity
attachments (str): JSON serialized attachments
create_date (DateTime): The creation time of this cipher
update_date (DateTime): The time of the last update to this cipher
"""
# Type enumeration
TYPE_LOGIN = 1
TYPE_NOTE = 2
TYPE_CARD = 3
TYPE_IDENTITY = 4
# Member variables
id = db.Column(
db.String(64), name='id', primary_key=True,
default=funcs.generateSecureUUID
)
user_id = db.Column(
db.String(64), db.ForeignKey('user.id', ondelete='CASCADE')
)
folder_id = db.Column(
db.String(64), db.ForeignKey('folder.id', ondelete='CASCADE'),
nullable=True
)
organization_id = db.Column(db.String(64), nullable=True)
cipher_type = db.Column(db.Integer, nullable=False)
favorite = db.Column(db.Boolean(), default=False, nullable=False)
data = db.Column(db.JSON(), nullable=True)
name = db.Column(db.JSON(), nullable=True)
notes = db.Column(db.JSON(), nullable=True)
fields = db.Column(db.JSON(), nullable=True)
login = db.Column(db.JSON(), nullable=True)
secure_note = db.Column(db.JSON(), nullable=True)
card = db.Column(db.JSON(), nullable=True)
identity = db.Column(db.JSON(), nullable=True)
attachments = db.Column(db.JSON(), nullable=True)
create_date = db.Column(db.DateTime(), server_default=sql.func.now())
update_date = db.Column(
db.DateTime(), server_default=sql.func.now(), onupdate=sql.func.now()
)
# Functions
def type_str(in_type):
"""
Returns a string representation of the inputted type
Args:
:param in_type: The inputed type
Returns:
str: The string representation
"""
if(in_type is Cipher.TYPE_LOGIN):
return 'login'
elif(in_type is Cipher.TYPE_NOTE):
return 'note'
elif(in_type is Cipher.TYPE_CARD):
return 'card'
elif(in_type is Cipher.TYPE_IDENTITY):
return 'identity'
else:
return str(in_type)
def updateFromParams(self, params):
"""
This function will update a cipher based on the passed in parameters
Args:
:param self: This object
:param params: A dictionary of params
"""
self.folder_id = params['folderid']
self.organization_id = params['organizationid']
self.favorite = bool(params['favorite'])
self.type = int(params['type'])
self.name = params['name']
self.notes = params['notes']
self.fields = funcs.uppercaseFirstHash(params['fields'])
# Parse additional data based on cipher type
if(self.cipher_type is Cipher.TYPE_LOGIN):
login_data = funcs.uppercaseFirstHash(params['login'])
if(login_data['Uris'] and isinstance(login_data['Uris'], dict)):
login_data['Uris'] = funcs.uppercaseFirstHash(
login_data['Uris']
)
self.login = login_data
elif(self.cipher_type is Cipher.TYPE_NOTE):
self.secure_note = funcs.uppercaseFirstHash(params['securenote'])
elif(self.cipher_type is Cipher.TYPE_CARD):
self.card = funcs.uppercaseFirstHash(params['card'])
else:
# TODO: Implement more types
if(self.cipher_type is Cipher.TYPE_IDENTITY):
self.identity = funcs.uppercaseFirstHash(params['identity'])
def toHash(self):
"""
Returns the cipher as a hash.
Args:
:param self: The object
Returns:
dict: The hash representation of the object
"""
return {
'Id': self.id,
'Type': self.cipher_type,
'RevisionDate': self.update_date.strftime(
'%Y-%m-%dT%H:%M:%S.000000Z'
),
'FolderId': self.folder_id,
'Favorite': self.favorite,
'OrganizationId': self.organization_id,
'Attachments': self.attachments,
'OrganizationUserTotp': False,
'Object': 'cipher',
'Name': self.name,
'Notes': self.notes,
'Fields': self.fields,
'Login': self.login,
'Card': self.card,
'Identity': self.identity,
'SecureNote': self.secure_note
}
def migrateData(self):
"""
This function will migrate data from being an all in one and split it
into separate fields.
If there is no data, we will just return false. If the data is not able
to be turned into a JSON, we will raise a ValueError. If the data is
not a dict or a string, we will raise a TypeError.
Args:
:param self: The object
Raises:
TypeError: If this object's data is not a dict or string
ValueError: If this object can not become a JSON
NotImplementedError: If we try to migrate from a nonsupported type
"""
if(self.data is None):
return False
if(isinstance(self.data, str)):
try:
data = json.loads(self.data)
except(Exception):
raise ValueError
elif(isinstance(self.data, dict)):
data = self.data
else:
raise TypeError
self.name = data['Name']
del data['Name']
self.notes = data['Notes']
del data['Notes']
self.fields = data['Fields']
del data['Fields']
if(self.cipher_type is self.TYPE_LOGIN):
data['Uris'] = {
'Uri': data['Uri'],
'Match': None
}
del data['Uri']
self.login = data
elif(self.cipher_type is self.TYPE_NOTE):
self.secure_note = data
elif(self.cipher_type is self.TYPE_CARD):
self.card = data
elif(self.cipher_type is self.TYPE_IDENTITY):
self.identity = data
else:
raise NotImplementedError
|
the-stack_0_9362 | """
보간 탐색 (Interpolation Search)
이진 탐색의 비효율성을 개선시킨 알고리즘이다. 이진 탐색의 경우 찾는 대상이 어디에 위치하건
일관되게 반씩 줄여가며 탐색을 진행한다. 반면 보간 탐색은 타겟이 상대적으로 앞에 위치한다고
판단을 하면 앞쪽에서 탐색을 진행한다. 따라서, 찾는 데이터와 가깝기 때문에 이진 탐색보다
속도가 뛰어나다.
"""
from __future__ import print_function
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
#보간탐색
def interpolation_search(sorted_collection, item):
"""
input값은 반드시 정렬 된 채로 주어져야 합니다.
그러지 않으면 원하지 않는 결과값이 나올 수 있습니다.
:param sorted_collection: 탐색을 진행할 정렬된 배열
:param item : 탐색을 진행할 키(key) 값
;return : 키 값이 있는 위치(index), 없을 경우 None
"""
left = 0
right = len(sorted_collection) - 1
while left <= right:
point = left + ((item - sorted_collection[left]) * (right - left)) // (sorted_collection[right] - sorted_collection[left])
#out of range check
if point<0 or point>=len(sorted_collection):
return None
current_item = sorted_collection[point]
if current_item == item:
return point
else:
if item < current_item:
right = point - 1
else:
left = point + 1
return None
#재귀를 이용한 보간탐색
def interpolation_search_by_recursion(sorted_collection, item, left, right):
"""
가장 처음 재귀는 left = 0, right=(len(sorted_collection)-1)을 초기값으로 줘야합니다.
:param left : 탐색 범위의 시작
:param right : 탐색 범위의 끝
"""
point = left + ((item - sorted_collection[left]) * (right - left)) // (sorted_collection[right] - sorted_collection[left])
#out of range check
if point<0 or point>=len(sorted_collection):
return None
if sorted_collection[point] == item:
return point
elif sorted_collection[point] > item:
return interpolation_search_by_recursion(sorted_collection, item, left, point-1)
else:
return interpolation_search_by_recursion(sorted_collection, item, point+1, right)
#입력값이 정렬이 됬는지 확인 해주는 함수
def __assert_sorted(collection):
if collection != sorted(collection):
print('error: Collection must be sorted')
raise ValueError('Collection must be sorted')
return True
if __name__ == '__main__':
import sys
user_input = raw_input('Enter numbers separated by comma:\n').strip()
collection = [int(item) for item in user_input.split(',')]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be sorted to apply interpolation search')
target_input = raw_input('Enter a single number to be found in the list:\n')
target = int(target_input)
#interpolation_search 함수 사용
result = interpolation_search(collection, target)
if result is not None:
print('{} interpolation search found at positions: {}'.format(target, result))
else:
print('Not found')
#interpolation_search_by_recursion 함수 사용
result = interpolation_search_by_recursion(collection, target, 0, len(collection)-1)
if result is not None:
print('{} interpolation search by recursion found at positions: {}'.format(target, result))
else:
print('Not found')
|
the-stack_0_9366 | import _thread
import contextlib
import socketserver
import time
from http.server import BaseHTTPRequestHandler
from onlinepayments.sdk.communicator import Communicator
from onlinepayments.sdk.defaultimpl.default_authenticator import DefaultAuthenticator
from onlinepayments.sdk.defaultimpl.default_connection import DefaultConnection
from onlinepayments.sdk.endpoint_configuration import EndpointConfiguration
from onlinepayments.sdk.factory import Factory
from onlinepayments.sdk.meta_data_provider import MetaDataProvider
def create_handler(call_able):
"""Creates a handler that serves requests by calling the callable object
with this handler as argument
"""
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
call_able(self)
time.sleep(0.1) # sleep to avoid dropping the client before it can read the response
def do_POST(self):
call_able(self)
time.sleep(0.1) # sleep to avoid dropping the client before it can read the response
def do_HEAD(self):
pass
def do_DELETE(self):
call_able(self)
time.sleep(0.1) # sleep to avoid dropping the client before it can read the response
return RequestHandler
@contextlib.contextmanager
def create_server_listening(call_able):
"""Context manager that creates a thread with a server at localhost which listens for requests
and responds by calling the *call_able* function.
:param call_able: a callable function to handle incoming requests, when a request comes in
the function will be called with a SimpleHTTPRequestHandler to handle the request
:return the url where the server is listening (http://localhost:port)
"""
server = socketserver.TCPServer(('localhost', 0), create_handler(call_able), bind_and_activate=True)
try:
# frequent polling server for a faster server shutdown and faster tests
_thread.start_new(server.serve_forever, (0.1,))
yield 'http://localhost:' + str(server.server_address[1])
finally:
server.shutdown()
server.server_close()
def create_client(http_host, connect_timeout=0.500, socket_timeout=0.500,
max_connections=EndpointConfiguration.DEFAULT_MAX_CONNECTIONS):
connection = DefaultConnection(connect_timeout, socket_timeout, max_connections)
authenticator = DefaultAuthenticator("apiKey", "secret")
meta_data_provider = MetaDataProvider("OnlinePayments")
communicator = Communicator(
api_endpoint=http_host,
authenticator=authenticator,
meta_data_provider=meta_data_provider,
connection=connection)
return Factory.create_client_from_communicator(communicator)
|
the-stack_0_9369 | """
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
# avoid division truncation
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet
from ..metrics.pairwise import pairwise_distances
from ..utils.validation import _deprecate_positional_args
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : ndarray of shape (n_features, n_features)
Maximum Likelihood Estimator of covariance.
precision : ndarray of shape (n_features, n_features)
The precision matrix of the covariance model to be tested.
Returns
-------
log_likelihood_ : float
Sample mean of the log-likelihood.
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
@_deprecate_positional_args
def empirical_covariance(X, *, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
Returns
-------
covariance : ndarray of shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
Examples
--------
>>> from sklearn.covariance import empirical_covariance
>>> X = [[1,1,1],[1,1,1],[1,1,1],
... [0,0,0],[0,0,0],[0,0,0]]
>>> empirical_covariance(X)
array([[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25]])
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool, default=True
Specifies if the estimated precision is stored.
assume_centered : bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EmpiricalCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = EmpiricalCovariance().fit(X)
>>> cov.covariance_
array([[0.7569..., 0.2818...],
[0.2818..., 0.3928...]])
>>> cov.location_
array([0.0622..., 0.0193...])
"""
@_deprecate_positional_args
def __init__(self, *, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : array-like of shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like of shape (n_features, n_features)
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : Ignored
Not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like of shape (n_samples, n_features)
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : Ignored
Not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like of shape (n_features, n_features)
The covariance to compare with.
norm : {"frobenius", "spectral"}, default="frobenius"
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool, default=True
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool, default=True
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
result : float
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, X):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : ndarray of shape (n_samples,)
Squared Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
dist = pairwise_distances(X, self.location_[np.newaxis, :],
metric='mahalanobis', VI=precision)
return np.reshape(dist, (len(X),)) ** 2
|
the-stack_0_9370 | # encoding: utf-8
# author: BrikerMan
# contact: [email protected]
# blog: https://eliyar.biz
# file: abs_task_model.py
# time: 1:43 下午
import json
import os
import pathlib
from abc import ABC, abstractmethod
from typing import Dict, Any, TYPE_CHECKING, Union
import tensorflow as tf
import kashgari
from kashgari.embeddings import ABCEmbedding
from kashgari.logger import logger
from kashgari.processors.abc_processor import ABCProcessor
from kashgari.utils import load_data_object
from kashgari.layers import KConditionalRandomField
if TYPE_CHECKING:
from kashgari.tasks.labeling import ABCLabelingModel
from kashgari.tasks.classification import ABCClassificationModel
class ABCTaskModel(ABC):
def __init__(self) -> None:
self.tf_model: tf.keras.Model = None
self.embedding: ABCEmbedding = None
self.hyper_parameters: Dict[str, Any]
self.sequence_length: int
self.text_processor: ABCProcessor
self.label_processor: ABCProcessor
def to_dict(self) -> Dict[str, Any]:
model_json_str = self.tf_model.to_json()
return {
'tf_version': tf.__version__, # type: ignore
'kashgari_version': kashgari.__version__,
'__class_name__': self.__class__.__name__,
'__module__': self.__class__.__module__,
'config': {
'hyper_parameters': self.hyper_parameters, # type: ignore
'sequence_length': self.sequence_length # type: ignore
},
'embedding': self.embedding.to_dict(), # type: ignore
'text_processor': self.text_processor.to_dict(),
'label_processor': self.label_processor.to_dict(),
'tf_model': json.loads(model_json_str)
}
@classmethod
def default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
The default hyper parameters of the model dict, **all models must implement this function.**
You could easily change model's hyper-parameters.
For example, change the LSTM unit in BiLSTM_Model from 128 to 32.
>>> from kashgari.tasks.classification import BiLSTM_Model
>>> hyper = BiLSTM_Model.default_hyper_parameters()
>>> print(hyper)
{'layer_bi_lstm': {'units': 128, 'return_sequences': False}, 'layer_output': {}}
>>> hyper['layer_bi_lstm']['units'] = 32
>>> model = BiLSTM_Model(hyper_parameters=hyper)
Returns:
hyper params dict
"""
raise NotImplementedError
def save(self, model_path: str, encoding='utf-8') -> str:
pathlib.Path(model_path).mkdir(exist_ok=True, parents=True)
model_path = os.path.abspath(model_path)
with open(os.path.join(model_path, 'model_config.json'), 'w', encoding=encoding) as f:
f.write(json.dumps(self.to_dict(), indent=2, ensure_ascii=False))
f.close()
self.embedding.embed_model.save_weights(os.path.join(model_path, 'embed_model_weights.h5'))
self.tf_model.save_weights(os.path.join(model_path, 'model_weights.h5')) # type: ignore
logger.info('model saved to {}'.format(os.path.abspath(model_path)))
return model_path
@classmethod
def load_model(cls, model_path: str, encoding='utf-8') -> Union["ABCLabelingModel", "ABCClassificationModel"]:
model_config_path = os.path.join(model_path, 'model_config.json')
model_config = json.loads(open(model_config_path, 'r', encoding=encoding).read())
model = load_data_object(model_config)
model.embedding = load_data_object(model_config['embedding'])
model.text_processor = load_data_object(model_config['text_processor'])
model.label_processor = load_data_object(model_config['label_processor'])
tf_model_str = json.dumps(model_config['tf_model'])
model.tf_model = tf.keras.models.model_from_json(tf_model_str,
custom_objects=kashgari.custom_objects)
if isinstance(model.tf_model.layers[-1], KConditionalRandomField):
model.crf_layer = model.tf_model.layers[-1]
model.tf_model.load_weights(os.path.join(model_path, 'model_weights.h5'))
model.embedding.embed_model.load_weights(os.path.join(model_path, 'embed_model_weights.h5'))
return model
@abstractmethod
def build_model(self,
x_data: Any,
y_data: Any) -> None:
raise NotImplementedError
|
the-stack_0_9372 | # coding: utf-8
import pprint
import re
import six
class DeleteEdgeCloudRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'edgecloud_id': 'str'
}
attribute_map = {
'edgecloud_id': 'edgecloud_id'
}
def __init__(self, edgecloud_id=None):
"""DeleteEdgeCloudRequest - a model defined in huaweicloud sdk"""
self._edgecloud_id = None
self.discriminator = None
self.edgecloud_id = edgecloud_id
@property
def edgecloud_id(self):
"""Gets the edgecloud_id of this DeleteEdgeCloudRequest.
:return: The edgecloud_id of this DeleteEdgeCloudRequest.
:rtype: str
"""
return self._edgecloud_id
@edgecloud_id.setter
def edgecloud_id(self, edgecloud_id):
"""Sets the edgecloud_id of this DeleteEdgeCloudRequest.
:param edgecloud_id: The edgecloud_id of this DeleteEdgeCloudRequest.
:type: str
"""
self._edgecloud_id = edgecloud_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteEdgeCloudRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_9373 | #!/usr/bin/env python3
#
# Copyright (c) 2022 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Commissioning test.
import os
import sys
from optparse import OptionParser
from base import TestFail, TestTimeout, BaseTestHelper, FailIfNot, logger
from cluster_objects import NODE_ID, ClusterObjectTests
from network_commissioning import NetworkCommissioningTests
import asyncio
# The thread network dataset tlv for testing, splited into T-L-V.
TEST_THREAD_NETWORK_DATASET_TLV = "0e080000000000010000" + \
"000300000c" + \
"35060004001fffe0" + \
"0208fedcba9876543210" + \
"0708fd00000000001234" + \
"0510ffeeddccbbaa99887766554433221100" + \
"030e54657374696e674e6574776f726b" + \
"0102d252" + \
"041081cb3b2efa781cc778397497ff520fa50c0302a0ff"
# Network id, for the thread network, current a const value, will be changed to XPANID of the thread network.
TEST_THREAD_NETWORK_ID = "fedcba9876543210"
TEST_DISCRIMINATOR = 3840
ENDPOINT_ID = 0
LIGHTING_ENDPOINT_ID = 1
GROUP_ID = 0
def main():
optParser = OptionParser()
optParser.add_option(
"-t",
"--timeout",
action="store",
dest="testTimeout",
default=75,
type='int',
help="The program will return with timeout after specified seconds.",
metavar="<timeout-second>",
)
optParser.add_option(
"-a",
"--address",
action="store",
dest="deviceAddress1",
default='',
type='str',
help="Address of the first device",
)
optParser.add_option(
'--paa-trust-store-path',
dest="paaPath",
default='',
type='str',
help="Path that contains valid and trusted PAA Root Certificates."
)
optParser.add_option(
'--fail-on-report',
action="store_true",
dest="report",
default=False,
help='Use this flag to simulate a failure handling the report. Without this flag, failure is simulated on the stage'
)
(options, remainingArgs) = optParser.parse_args(sys.argv[1:])
timeoutTicker = TestTimeout(options.testTimeout)
timeoutTicker.start()
test = BaseTestHelper(nodeid=112233, testCommissioner=True,
paaTrustStorePath=options.paaPath)
FailIfNot(test.SetNetworkCommissioningParameters(dataset=TEST_THREAD_NETWORK_DATASET_TLV),
"Failed to set network commissioning parameters")
logger.info("Testing PASE connection to device")
# TODO: Start at stage 2 once handling for arming failsafe on pase is done.
if options.report:
for testFailureStage in range(3, 17):
FailIfNot(test.TestPaseOnly(ip=options.deviceAddress1,
setuppin=20202021,
nodeid=1),
"Failed to establish PASE connection with device")
FailIfNot(test.TestCommissionFailureOnReport(1, testFailureStage),
"Commissioning failure tests failed for simulated report failure on stage {}".format(testFailureStage))
else:
for testFailureStage in range(3, 17):
FailIfNot(test.TestPaseOnly(ip=options.deviceAddress1,
setuppin=20202021,
nodeid=1),
"Failed to establish PASE connection with device")
FailIfNot(test.TestCommissionFailure(1, testFailureStage),
"Commissioning failure tests failed for simulated stage failure on stage {}".format(testFailureStage))
# Ensure we can still commission for real
FailIfNot(test.TestPaseOnly(ip=options.deviceAddress1,
setuppin=20202021,
nodeid=1),
"Failed to establish PASE connection with device")
FailIfNot(test.TestCommissionFailure(1, 0), "Failed to commission device")
logger.info("Testing on off cluster")
FailIfNot(test.TestOnOffCluster(nodeid=1,
endpoint=LIGHTING_ENDPOINT_ID,
group=GROUP_ID), "Failed to test on off cluster")
timeoutTicker.stop()
logger.info("Test finished")
# TODO: Python device controller cannot be shutdown clean sometimes and will block on AsyncDNSResolverSockets shutdown.
# Call os._exit(0) to force close it.
os._exit(0)
if __name__ == "__main__":
try:
main()
except Exception as ex:
logger.exception(ex)
TestFail("Exception occurred when running tests.")
|
the-stack_0_9374 | from model.contact import Contact
testdata = [
Contact(firstname="firstname1", middlename="middlename1", lastname="lastname1", nickname="nickname1",
email="email1", email2="email21", email3="email3", homephone="homephone",
workphone="workphone"),
Contact(firstname="firstname2", middlename="middlename2", lastname="lastname2", nickname="nickname2",
email="email12", email2="email22", email3="email32", homephone="homephone2",
workphone="workphone2")
]
|
the-stack_0_9375 | import base64
import datetime
import json
import urllib
import flask
import requests
import src.config
redirectdownloadBP = flask.Blueprint(
"redirectdownload", __name__, url_prefix="/api/v1/redirectdownload"
)
@redirectdownloadBP.route("/<name>")
async def redirectdownloadFunction(name):
id = flask.request.args.get("id")
itag = flask.request.args.get("itag")
config = src.config.readConfig()
if config.get("kill_switch") == True:
return
if (
datetime.datetime.strptime(
config.get("token_expiry", datetime.datetime.utcnow()),
"%Y-%m-%d %H:%M:%S.%f",
)
<= datetime.datetime.utcnow()
):
config, drive = src.credentials.refreshCredentials(config)
with open("config.json", "w+") as w:
json.dump(obj=config, fp=w, sort_keys=True, indent=4)
tmp_metadata = src.metadata.jsonExtract(
src.metadata.readMetadata(config), "id", id, False
)
if tmp_metadata:
name = tmp_metadata.get("name", name)
args = "?"
for arg in flask.request.args:
args += "%s=%s&" % (
arg,
urllib.parse.quote(flask.request.args.get(arg, "").encode("utf-8")),
)
session = {"access_token": config.get("access_token")}
session["url"] = "https://www.googleapis.com/drive/v3/files/%s?alt=media" % (id)
if itag and itag != "" and config.get("transcoded") == True:
req = requests.get(
"https://drive.google.com/get_video_info?docid=%s" % (id),
headers={"Authorization": "Bearer %s" % (config.get("access_token"))},
)
parsed = urllib.parse.parse_qs(urllib.parse.unquote(req.text))
if parsed.get("status") == ["ok"]:
for stream in parsed["url"]:
if ("itag=%s" % (itag)) in stream:
url = stream
break
cookie_string = "; ".join(
[str(x) + "=" + str(y) for x, y in req.cookies.items()]
)
session["cookie"] = cookie_string
session["transcoded"] = config.get("transcoded")
session["url"] = url
sessionB64 = base64.b64encode(json.dumps(session).encode("ascii")).decode("ascii")
print(
"/api/v1/download/%s%ssession=%s&"
% (urllib.parse.quote(name.encode("utf-8")), args, sessionB64)
)
if config.get("cloudflare") and config.get("cloudflare") != "":
return flask.redirect(
config.get("cloudflare")
+ "/api/v1/download/%s%ssession=%s&" % (name, args, sessionB64),
code=302,
)
else:
return flask.redirect(
"/api/v1/download/%s%ssession=%s&"
% (urllib.parse.quote(name.encode("utf-8")), args, sessionB64),
code=302,
)
|
the-stack_0_9376 | """
This creates and poulates directories for ROMS runs on gaggle. It is
designed to work with the "BLANK" version of the .in file,
replacing things like $whatever$ with meaningful values.
"""
import os
import sys
fpth = os.path.abspath('../../')
if fpth not in sys.path:
sys.path.append(fpth)
import forcing_functions as ffun
Ldir, Lfun = ffun.intro()
#import netCDF4 as nc
#import numpy as np
from datetime import datetime, timedelta
fdt = datetime.strptime(Ldir['date_string'], '%Y.%m.%d')
fdt_yesterday = fdt - timedelta(1)
print('- dot_in.py creating files for LiveOcean for ' + Ldir['date_string'])
gtag = Ldir['gtag']
gtagex = gtag + '_' + Ldir['ex_name']
EX_NAME = Ldir['ex_name'].upper()
#### USER DEFINED VALUES ####
# which ROMS code to use
roms_name = 'LO_ROMS'
# account for differences when using biology
do_bio = False
multi_core = True # use more than one core
if Ldir['run_type'] == 'backfill':
days_to_run = 1.0
elif Ldir['run_type'] == 'forecast':
days_to_run = float(Ldir['forecast_days'])
# time step in seconds (should fit evenly into 3600 sec)
if Ldir['blow_ups'] == 0:
dtsec = 60
elif Ldir['blow_ups'] == 1:
dtsec = 50
elif Ldir['blow_ups'] == 2:
dtsec = 40
elif Ldir['blow_ups'] == 3:
dtsec = 30
elif Ldir['blow_ups'] == 4:
dtsec = 20
elif Ldir['blow_ups'] == 5:
dtsec = 10
elif Ldir['blow_ups'] == 6:
dtsec = 8
elif Ldir['blow_ups'] == 7:
dtsec = 5
else:
print('Unsupported number of blow ups: %d' % (Ldir['blow_ups']))
ndtfast = 20
restart_nrrec = '-1' # '-1' for a non-crash restart file, otherwise '1' or '2'
his_interval = 3600 # seconds to define and write to history files
rst_interval = 10 # days between writing to the restart file (e.g. 5)
# which forcings to look for
atm_dir = 'BLANK/' # which atm forcing files to use
ocn_dir = 'ocnA/' # which ocn forcing files to use
riv_dir = 'rivE/' # which riv forcing files to use
tide_dir = 'tideA/' # which tide forcing files to use
#### END USER DEFINED VALUES ####
# DERIVED VALUES
if multi_core:
if Ldir['np_num'] == 64: # for new mox nodes 2*32=64 2019_02
ntilei = '8' # number of tiles in I-direction
ntilej = '8' # number of tiles in J-direction
elif Ldir['np_num'] == 72:
ntilei = '6' # number of tiles in I-direction
ntilej = '12' # number of tiles in J-direction
elif Ldir['np_num'] == 144:
ntilei = '8' # number of tiles in I-direction
ntilej = '18' # number of tiles in J-direction
elif Ldir['np_num'] == 196:
ntilei = '14' # number of tiles in I-direction
ntilej = '14' # number of tiles in J-direction
elif Ldir['np_num'] == 392:
ntilei = '14' # number of tiles in I-direction
ntilej = '28' # number of tiles in J-direction
elif Ldir['np_num'] == 588:
ntilei = '21' # number of tiles in I-direction
ntilej = '28' # number of tiles in J-direction
else:
print('Unsupported number of processors: %d' % (Ldir['np_num']))
else:
ntilei = '1'
ntilej = '1'
# if np.mod(3600,dtsec) != 0:
# print('** WARNING: dtsec does not fit evenly into 1 hour **')
if dtsec == int(dtsec):
dt = str(dtsec) + '.0d0' # a string version of dtsec, for the .in file
else:
dt = str(dtsec) + 'd0' # a string version of dtsec, for the .in file
ninfo = int(his_interval/dtsec) # how often to write info to the log file (# of time steps)
nhis = int(his_interval/dtsec) # how often to write to the history files
ndefhis = int(nhis) # how often to create new history files
nrst = int(rst_interval*86400/dtsec)
ntimes = int(days_to_run*86400/dtsec)
# file location stuff
date_string = Ldir['date_string']
date_string_yesterday = fdt_yesterday.strftime('%Y.%m.%d')
dstart = str(int(Lfun.datetime_to_modtime(fdt) / 86400.))
f_string = 'f' + date_string
f_string_yesterday = 'f'+ date_string_yesterday
# where forcing files live (fjord, as seen from gaggle)
# NOTE: eventually this should not be hard-wired.
lo_dir = Ldir['parent'] + 'LiveOcean/'
loo_dir = Ldir['parent'] + 'LiveOcean_output/'
grid_dir = Ldir['parent'] + 'LiveOcean_data/grids/' + Ldir['gridname'] + '/'
force_dir = loo_dir + gtag + '/' + f_string + '/'
roms_dir = Ldir['parent'] + 'LiveOcean_roms/'
# determine grid size
# gfn = grid_dir + 'grid.nc'
# ds = nc.Dataset(gfn)
# h = ds['h'][:]
# nrows0, ncols0 = h.shape
# nrows = nrows0 - 2
# ncols = ncols0 - 2
#ds.close()
# hardwired because we don't have netCDF4
nrows = 385 - 2
ncols = 142 - 2
# determine number of layers
s_dict = Lfun.csv_to_dict(grid_dir + 'S_COORDINATE_INFO.csv')
nlayers = str(s_dict['N'])
if do_bio:
bio_tag = ''
else:
bio_tag = ''
# the .in file
dot_in_name = 'liveocean.in' # name of the .in file
dot_in_dir00 = Ldir['roms'] + 'output/'
Lfun.make_dir(dot_in_dir00) # make sure it exists
dot_in_dir0 = Ldir['roms'] + 'output/' + gtagex + '/'
Lfun.make_dir(dot_in_dir0) # make sure it exists
dot_in_dir = dot_in_dir0 + f_string +'/'
Lfun.make_dir(dot_in_dir, clean=True) # make sure it exists and is empty
# where to put the output files according to the .in file
out_dir0 = roms_dir + 'output/' + gtagex + '/'
out_dir = out_dir0 + f_string + '/'
if Ldir['start_type'] == 'continuation':
nrrec = '0' # '-1' for a hot restart
#ininame = 'ocean_rst.nc' # for a hot perfect restart
ininame = 'ocean_his_0025.nc' # for a hot restart
ini_fullname = out_dir0 + f_string_yesterday + '/' + ininame
elif Ldir['start_type'] == 'new':
nrrec = '0' # '0' for a history or ini file
ininame = 'ocean_ini' + bio_tag + '.nc' # could be an ini or history file
ini_fullname = force_dir + ocn_dir + ininame
# END DERIVED VALUES
## create .in ##########################
f = open('BLANK.in','r')
f2 = open(dot_in_dir + dot_in_name,'w')
in_varlist = ['base_dir','ntilei','ntilej','ntimes','dt','nrrec','ninfo',
'nhis','dstart','ndefhis','nrst','force_dir','grid_dir','roms_dir',
'atm_dir','ocn_dir','riv_dir','tide_dir','dot_in_dir',
'ini_fullname','out_dir','EX_NAME','roms_name','bio_tag',
'nrows','ncols', 'nlayers', 'ndtfast']
for line in f:
for var in in_varlist:
if '$'+var+'$' in line:
line2 = line.replace('$'+var+'$', str(eval(var)))
line = line2
else:
line2 = line
f2.write(line2)
f.close()
f2.close()
## npzd2o_Banas.in ###########
f = open('npzd2o_Banas_BLANK.in','r')
bio_dot_in_name = 'npzd2o_Banas.in'
f3 = open(dot_in_dir + bio_dot_in_name,'w')
in_varlist = ['force_dir','riv_dir','bio_tag']
for line in f:
for var in in_varlist:
if '$'+var+'$' in line:
line2 = line.replace('$'+var+'$', str(eval(var)))
line = line2
else:
line2 = line
f3.write(line2)
f.close()
f3.close()
|
the-stack_0_9377 | import numpy
import sympy
from sympy.diffgeom import Manifold, Patch
from pystein import geodesic, metric, coords
from pystein.utilities import tensor_pow as tpow
class TestGeodesic:
def test_numerical(self):
M = Manifold('M', dim=2)
P = Patch('origin', M)
rho, phi, a = sympy.symbols('rho phi a', nonnegative=True)
cs = coords.CoordSystem('schw', P, [rho, phi])
drho, dphi = cs.base_oneforms()
ds2 = a ** 2 * ((1 / (1 - rho ** 2)) * tpow(drho, 2) + rho ** 2 * tpow(dphi, 2))
g = metric.Metric(twoform=ds2)
init = (0.01, 0.01, 0.000001, 0.1)
ts = numpy.arange(0, 1000, 0.1)
df = geodesic.numerical_geodesic(g, init, ts)
print('yay')
def test_parallel(self):
M = Manifold('M', dim=2)
P = Patch('origin', M)
theta, phi, a = sympy.symbols('theta phi a', nonnegative=True)
cs = coords.CoordSystem('spherical', P, [theta, phi])
dtheta, dphi = cs.base_oneforms()
ds2 = a ** 2 * (tpow(dtheta, 2) + sympy.sin(theta) ** 2 * tpow(dphi, 2))
g2 = metric.Metric(twoform=ds2)
param = sympy.symbols('lambda')
curve = [
2 * sympy.pi * param,
sympy.pi / 4,
]
lhs_0 = geodesic.parallel_transport_equation(0, curve, param, g2)
print(lhs_0)
|
the-stack_0_9379 | r"""
`\ZZ`-Filtered Vector Spaces
This module implements filtered vector spaces, that is, a descending
sequence of vector spaces
.. math::
\cdots \supset F_d \supset F_{d+1} \supset F_{d+2} \supset \cdots
with degrees `d\in \ZZ`. It is not required that `F_d` is the entire
ambient space for `d\ll 0` (see
:meth:`~FilteredVectorSpace_class.is_exhaustive`) nor that `F_d=0` for
`d\gg 0` (see :meth:`~FilteredVectorSpace_class.is_separating`). To
construct a filtered vector space, use the :func:`FilteredVectorSpace`
command. It supports easy creation of simple filtrations, for example
the trivial one::
sage: FilteredVectorSpace(2, base_ring=RDF)
RDF^2
The next-simplest filtration has a single non-trivial inclusion
between `V_d` and `V_{d+1}`::
sage: d = 1
sage: V = FilteredVectorSpace(2, d); V
QQ^2 >= 0
sage: [V.get_degree(i).dimension() for i in range(0,4)]
[2, 2, 0, 0]
To construct general filtrations, you need tell Sage about generating
vectors for the nested subspaces. For example, a dictionary whose keys
are the degrees and values are a list of generators::
sage: r1 = (1, 0, 5)
sage: r2 = (0, 1, 2)
sage: r3 = (1, 2, 1)
sage: V = FilteredVectorSpace({0:[r1, r2, r3], 1:[r1, r2], 3:[r1]}); V
QQ^3 >= QQ^2 >= QQ^1 >= QQ^1 >= 0
For degrees `d` that are not specified, the associated vector subspace
is the same as the next-lower degree, that is, `V_d \simeq
V_{d-1}`. In the above example, this means that
* `V_d \simeq \QQ^3` for `d<0`
* `V_0 = \mathop{span}(r_1, r_2) \simeq \QQ^2`
* `V_1 = V_2 = \mathop{span}(r_3) \simeq \QQ`
* `V_d = 0` for `d \geq 3`
That is::
sage: V.get_degree(0) == V
True
sage: V.get_degree(1) == V.span([r1, r2])
True
sage: V.get_degree(2) == V.get_degree(3) == V.span([r1])
True
sage: V.get_degree(4) == V.get_degree(5) == V.span([])
True
If you have many generators you can just pass the generators once and
then refer to them by index::
sage: FilteredVectorSpace([r1, r2, r3], {0:[0,1,2], 1:[1,2], 3:[1]})
QQ^3 >= QQ^2 >= QQ^1 >= QQ^1 >= 0
Note that generators for the degree-`d` subspace of the filtration are
automatically generators for all lower degrees. For example, here we
do not have to specify the ray `r_2` separately in degree 1::
sage: FilteredVectorSpace([r1, r2, r3], {0:[0 ], 1:[1]})
QQ^2 >= QQ^1 >= 0 in QQ^3
sage: FilteredVectorSpace([r1, r2, r3], {0:[0, 1], 1:[1]})
QQ^2 >= QQ^1 >= 0 in QQ^3
The degree can be infinite (plus infinity), this allows construction
of filtered vector spaces that are not eventually zero in high
degree::
sage: FilteredVectorSpace([r1, r2, r3], {0:[0,1], oo:[1]})
QQ^2 >= QQ^1 in QQ^3
Any field can be used as the vector space base. For example a finite
field::
sage: F.<a> = GF(5^3)
sage: r1 = (a, 0, F(5)); r1
(a, 0, 0)
sage: FilteredVectorSpace([r1, r2, r3], {0:[0,1], oo:[1]}, base_ring=F)
GF(125)^2 >= GF(125)^1 in GF(125)^3
Or the algebraic field::
sage: r1 = (1, 0, 1+QQbar(I)); r1
(1, 0, I + 1)
sage: FilteredVectorSpace([r1, r2, r3], {0:[0,1], oo:[1]}, base_ring=QQbar)
Vector space of dimension 2 over Algebraic Field
>= Vector space of dimension 1 over Algebraic Field
in Vector space of dimension 3 over Algebraic Field
"""
#*****************************************************************************
# Copyright (C) 2013 Volker Braun <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.rings.all import QQ, ZZ, RDF, RR, Integer
from sage.rings.infinity import InfinityRing, infinity, minus_infinity
from sage.categories.fields import Fields
from sage.modules.free_module import FreeModule_ambient_field, VectorSpace
from sage.matrix.constructor import vector, matrix
from sage.misc.all import uniq, cached_method
def is_FilteredVectorSpace(X):
"""
Test whether ``X`` is a filtered vector space.
This function is for library use only.
INPUT:
- ``X`` -- anything.
OUTPUT:
Boolean.
EXAMPLES::
sage: from sage.modules.filtered_vector_space import is_FilteredVectorSpace
sage: V = FilteredVectorSpace(2, 1)
sage: is_FilteredVectorSpace(V)
True
sage: is_FilteredVectorSpace('ceci n\'est pas une pipe')
False
"""
return isinstance(X, FilteredVectorSpace_class)
def FilteredVectorSpace(arg1, arg2=None, base_ring=QQ, check=True):
"""
Construct a filtered vector space.
INPUT:
This function accepts various input that determines the vector space and filtration.
- Just the dimensionFilteredVectorSpace(dimension): Return the trivial filtration
(where all vector spaces are isomorphic).
- Dimension and maximal degree, see
:func:`constructor_from_dim_degree` for arguments. Construct a
filtration with only one non-trivial step `V\supset 0` at the
given cutoff degree.
- A dictionary containing the degrees as keys and a list of vector
space generators as values, see
:func:`FilteredVectorSpace_from_generators`
- Generators and a dictionary containing the degrees as keys and
the indices of vector space generators as values, see
:func:`FilteredVectorSpace_from_generators_indices`
In addition, the following keyword arguments are supported:
- ``base_ring`` -- a field (optional, default `\QQ`). The base
field of the vector space. Must be a field.
EXAMPLES:
Just the dimension for the trivial filtration::
sage: FilteredVectorSpace(2)
QQ^2
Dimension and degree::
sage: FilteredVectorSpace(2, 1)
QQ^2 >= 0
Dictionary of generators::
sage: FilteredVectorSpace({1:[(1,0), (0,1)], 3:[(1,0)]})
QQ^2 >= QQ^1 >= QQ^1 >= 0
Generators and a dictionary referring to them by index::
sage: FilteredVectorSpace([(1,0), (0,1)], {1:[0,1], 3:[0]})
QQ^2 >= QQ^1 >= QQ^1 >= 0
"""
if base_ring not in Fields():
raise ValueError('the base_ring argument must be a field')
if arg1 in ZZ:
return construct_from_dim_degree(arg1, arg2, base_ring, check)
elif arg2 is None:
return construct_from_generators(arg1, base_ring, check)
else:
return construct_from_generators_indices(arg1, arg2, base_ring, check)
def normalize_degree(deg):
"""
Normalized the degree
- ``deg`` -- something that defines the degree (either integer or
infinity).
OUTPUT:
Plus/minus infinity or a Sage integer.
EXAMPLES::
sage: from sage.modules.filtered_vector_space import normalize_degree
sage: type(normalize_degree(int(1)))
<type 'sage.rings.integer.Integer'>
sage: normalize_degree(oo)
+Infinity
"""
try:
return ZZ(deg)
except TypeError:
pass
deg = InfinityRing(deg)
if deg == infinity:
return infinity
if deg == minus_infinity:
return minus_infinity
raise ValueError('not integer or infinity')
def construct_from_dim_degree(dim, max_degree, base_ring, check):
"""
Construct a filtered vector space.
INPUT:
- ``dim`` -- integer. The dimension.
- ``max_degree`` -- integer or infinity. The maximal degree where
the vector subspace of the filtration is still the entire space.
EXAMPLES::
sage: V = FilteredVectorSpace(2, 5); V
QQ^2 >= 0
sage: V.get_degree(5)
Vector space of degree 2 and dimension 2 over Rational Field
Basis matrix:
[1 0]
[0 1]
sage: V.get_degree(6)
Vector space of degree 2 and dimension 0 over Rational Field
Basis matrix:
[]
sage: FilteredVectorSpace(2, oo)
QQ^2
sage: FilteredVectorSpace(2, -oo)
0 in QQ^2
TESTS::
sage: from sage.modules.filtered_vector_space import construct_from_dim_degree
sage: V = construct_from_dim_degree(2, 5, QQ, True); V
QQ^2 >= 0
"""
if dim not in ZZ:
raise ValueError('dimension must be an integer')
dim = ZZ(dim)
from sage.matrix.constructor import identity_matrix
generators = identity_matrix(base_ring, dim).columns()
filtration = dict()
if max_degree is None:
max_degree = infinity
filtration[normalize_degree(max_degree)] = range(dim)
return construct_from_generators_indices(generators, filtration, base_ring, check)
def construct_from_generators(filtration, base_ring, check):
"""
Construct a filtered vector space.
INPUT:
- ``filtration`` -- a dictionary of filtration steps. Each
filtration step is a pair consisting of an integer degree and a
list/tuple/iterable of vector space generators. The integer
``degree`` stipulates that all filtration steps of degree higher
or equal than ``degree`` (up to the next filtration step) are
said subspace.
EXAMPLES::
sage: from sage.modules.filtered_vector_space import construct_from_generators
sage: r = [1, 2]
sage: construct_from_generators({1:[r]}, QQ, True)
QQ^1 >= 0 in QQ^2
"""
def normalize_gen(v):
return tuple(map(base_ring, v))
# convert generator notation to generator+indices
if len(filtration) == 0:
raise ValueError('you need to specify at least one ray to deduce the dimension')
generators = []
for gens in filtration.values():
generators += map(normalize_gen, gens)
generators = tuple(uniq(generators))
# normalize filtration data
normalized = dict()
for deg, gens_deg in filtration.iteritems():
indices = [generators.index(normalize_gen(v)) for v in gens_deg]
normalized[deg] = tuple(indices)
return construct_from_generators_indices(generators, normalized, base_ring, check)
def construct_from_generators_indices(generators, filtration, base_ring, check):
"""
Construct a filtered vector space.
INPUT:
- ``generators`` -- a list/tuple/iterable of vectors, or something
convertible to them. The generators spanning various
subspaces.
- ``filtration`` -- a list or iterable of filtration steps. Each
filtration step is a pair ``(degree, ray_indices)``. The
``ray_indices`` are a list or iterable of ray indices, which
span a subspace of the vector space. The integer ``degree``
stipulates that all filtration steps of degree higher or equal
than ``degree`` (up to the next filtration step) are said
subspace.
EXAMPLES::
sage: from sage.modules.filtered_vector_space import construct_from_generators_indices
sage: gens = [(1,0), (0,1), (-1,-1)]
sage: V = construct_from_generators_indices(gens, {1:[0,1], 3:[1]}, QQ, True); V
QQ^2 >= QQ^1 >= QQ^1 >= 0
TESTS::
sage: gens = [(int(1),int(0)), (0,1), (-1,-1)]
sage: construct_from_generators_indices(iter(gens), {int(0):[0, int(1)], 2:[2]}, QQ, True)
QQ^2 >= QQ^1 >= QQ^1 >= 0
"""
# normalize generators
generators = map(list, generators)
# deduce dimension
if len(generators) == 0:
dim = ZZ(0)
else:
dim = ZZ(len(generators[0]))
ambient = VectorSpace(base_ring, dim)
# complete generators to a generating set
if matrix(base_ring, generators).rank() < dim:
complement = ambient.span(generators).complement()
generators = generators + list(complement.gens())
# normalize generators II
generators = tuple(ambient(v) for v in generators)
for v in generators:
v.set_immutable()
# normalize filtration data
normalized = dict()
for deg, gens in filtration.iteritems():
deg = normalize_degree(deg)
gens = map(ZZ, gens)
if any(i < 0 or i >= len(generators) for i in gens):
raise ValueError('generator index out of bounds')
normalized[deg] = tuple(sorted(gens))
try:
del normalized[minus_infinity]
except KeyError:
pass
filtration = normalized
return FilteredVectorSpace_class(base_ring, dim, generators, filtration, check=check)
class FilteredVectorSpace_class(FreeModule_ambient_field):
def __init__(self, base_ring, dim, generators, filtration, check=True):
r"""
A descending filtration of a vector space
INPUT:
- ``base_ring`` -- a field. The base field of the ambient vector space.
- ``dim`` -- integer. The dimension of the ambient vector space.
- ``generators`` -- tuple of generators for the ambient vector
space. These will be used to span the subspaces of the
filtration.
- ``filtration`` -- a dictionary of filtration steps in ray
index notation. See
:func:`construct_from_generators_indices` for details.
- ``check`` -- boolean (optional; default: ``True``). Whether
to perform consistency checks.
TESTS::
sage: from sage.modules.filtered_vector_space import FilteredVectorSpace_class
sage: gens = [(1,0,0), (1,1,0), (1,2,0), (-1,-1, 0), (0,0,1)]
sage: FilteredVectorSpace_class(QQ, 3, gens, {2:(0,1), oo:(4,)})
QQ^3 >= QQ^1
sage: FilteredVectorSpace_class(QQ, 3, gens, {2:(0,1), 3:(4,)})
QQ^3 >= QQ^1 >= 0
The trivial filtration::
sage: FilteredVectorSpace_class(QQ, 3, gens, {}, QQ)
0 in QQ^3
The empty vector space::
sage: FilteredVectorSpace_class(QQ, 0, [], {})
0
Higher-degree generators are automatically generators in lower degrees::
sage: FilteredVectorSpace_class(QQ, 3, gens, {2:(4,), 3:(1,)})
QQ^2 >= QQ^1 >= 0 in QQ^3
"""
if check:
assert isinstance(dim, Integer)
assert base_ring in Fields()
super(FilteredVectorSpace_class, self).__init__(base_ring, dim)
if check:
assert matrix(generators).rank() == self.dimension()
assert isinstance(filtration, dict)
for degree, indices in filtration.iteritems():
assert isinstance(degree, Integer) or degree == infinity
assert isinstance(indices, tuple)
assert all(isinstance(r, Integer) for r in indices)
# Construct subspaces from the generators and store in self._filt
def make_subspace(indices):
return self.span([generators[i] for i in indices])
indices = set(filtration.pop(infinity, []))
V = make_subspace(indices)
filtered_subspaces = [(infinity, V)]
for deg in reversed(sorted(filtration.keys())):
next_V = V
indices.update(filtration[deg])
V = make_subspace(indices)
if V == next_V: # skip trivial filtrations
continue
filtered_subspaces.append((deg, V))
filtered_subspaces.append((minus_infinity, V))
filtered_subspaces.reverse()
self._filt = tuple(filtered_subspaces)
assert self._filt[0][0] is minus_infinity
def change_ring(self, base_ring):
"""
Return the same filtration over a different base ring.
INPUT:
- ``base_ring`` -- a ring. The new base ring.
OUTPUT:
This method returns a new filtered vector space whose
subspaces are defined by the same generators but over a
different base ring.
EXAMPLES::
sage: V = FilteredVectorSpace(1, 0); V
QQ^1 >= 0
sage: V.change_ring(RDF)
RDF^1 >= 0
"""
generators, filtration = self.presentation()
return FilteredVectorSpace(generators, filtration, base_ring=base_ring)
def ambient_vector_space(self):
"""
Return the ambient (unfiltered) vector space.
OUTPUT:
A vector space.
EXAMPLES::
sage: V = FilteredVectorSpace(1, 0)
sage: V.ambient_vector_space()
Vector space of dimension 1 over Rational Field
"""
return VectorSpace(self.base_ring(), self.dimension())
@cached_method
def is_constant(self):
"""
Return whether the filtration is constant.
OUTPUT:
Boolean. Whether the filtered vector spaces are identical in
all degrees.
EXAMPLES::
sage: V = FilteredVectorSpace(2); V
QQ^2
sage: V.is_constant()
True
sage: V = FilteredVectorSpace(1, 0); V
QQ^1 >= 0
sage: V.is_constant()
False
sage: V = FilteredVectorSpace({0:[(1,)]}); V
QQ^1 >= 0
sage: V.is_constant()
False
"""
f = self._filt
return (len(f) == 1) or (len(f) == 2 and f[1][0] == infinity)
def is_exhaustive(self):
"""
Return whether the filtration is exhaustive.
A filtration $\{F_d\}$ in an ambient vector space $V$ is
exhaustive if $\cup F_d = V$. See also :meth:`is_separating`.
OUTPUT:
Boolean.
EXAMPLES::
sage: F = FilteredVectorSpace({0:[(1,1)]}); F
QQ^1 >= 0 in QQ^2
sage: F.is_exhaustive()
False
sage: G = FilteredVectorSpace(2, 0); G
QQ^2 >= 0
sage: G.is_exhaustive()
True
"""
return self.get_degree(minus_infinity).dimension() == \
self.ambient_vector_space().dimension()
def is_separating(self):
"""
Return whether the filtration is separating.
A filtration $\{F_d\}$ in an ambient vector space $V$ is
exhaustive if $\cap F_d = 0$. See also :meth:`is_exhaustive`.
OUTPUT:
Boolean.
EXAMPLES::
sage: F = FilteredVectorSpace({0:[(1,1)]}); F
QQ^1 >= 0 in QQ^2
sage: F.is_separating()
True
sage: G = FilteredVectorSpace({0:[(1,1,0)], oo:[(0,0,1)]}); G
QQ^2 >= QQ^1 in QQ^3
sage: G.is_separating()
False
"""
return self.get_degree(infinity).dimension() == 0
@cached_method
def support(self):
"""
Return the degrees in which there are non-trivial generators.
OUTPUT:
A tuple of integers (and plus infinity) in ascending
order. The last entry is plus infinity if and only if the
filtration is not separating (see :meth:`is_separating`).
EXAMPLES::
sage: G = FilteredVectorSpace({0:[(1,1,0)], 3:[(0,1,0)]}); G
QQ^2 >= QQ^1 >= QQ^1 >= QQ^1 >= 0 in QQ^3
sage: G.support()
(0, 3)
sage: G = FilteredVectorSpace({0:[(1,1,0)], 3:[(0,1,0)], oo:[(0,0,1)]}); G
QQ^3 >= QQ^2 >= QQ^2 >= QQ^2 >= QQ^1
sage: G.support()
(0, 3, +Infinity)
"""
if self.is_separating():
filt = self._filt[1:-1]
else:
filt = self._filt[1:]
return tuple(f[0] for f in filt)
@cached_method
def min_degree(self):
r"""
Return the lowest degree of the filtration.
OUTPUT:
Integer or plus infinity. The largest degree `d` of the
(descending) filtration such that the filtered vector space
`F_d` is still equal to `F_{-\infty}`.
EXAMPLES::
sage: FilteredVectorSpace(1, 3).min_degree()
3
sage: FilteredVectorSpace(2).min_degree()
+Infinity
"""
if self.is_constant():
return infinity
return self._filt[1][0]
@cached_method
def max_degree(self):
r"""
Return the highest degree of the filtration.
OUTPUT:
Integer or minus infinity. The smallest degree of the
filtration such that the filtration is constant to the right.
EXAMPLES::
sage: FilteredVectorSpace(1, 3).max_degree()
4
sage: FilteredVectorSpace({0:[[1]]}).max_degree()
1
sage: FilteredVectorSpace(3).max_degree()
-Infinity
"""
f = self._filt
if len(f) == 1:
return minus_infinity
d = f[-1][0]
if d == infinity:
if len(f) == 1:
return minus_infinity
else:
return f[-2][0] + 1
else:
return d + 1
def get_degree(self, d):
r"""
Return the degree-``d`` entry of the filtration.
INPUT:
- ``d`` -- Integer. The desired degree of the filtration.
OUTPUT:
The degree-``d`` vector space in the filtration as subspace of
the ambient space.
EXAMPLES::
sage: rays = [(1,0), (1,1), (1,2), (-1,-1)]
sage: F = FilteredVectorSpace(rays, {3:[1], 1:[1,2]})
sage: F.get_degree(2)
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[1 1]
sage: F.get_degree(oo)
Vector space of degree 2 and dimension 0 over Rational Field
Basis matrix:
[]
sage: F.get_degree(-oo)
Vector space of degree 2 and dimension 2 over Rational Field
Basis matrix:
[1 0]
[0 1]
"""
d = normalize_degree(d)
for deg, Vdeg in self._filt:
if d <= deg:
return Vdeg
assert False # unreachable
def graded(self, d):
r"""
Return the associated graded vectorspace.
INPUT:
- ``d`` -- integer. The degree.
OUTPUT:
The quotient `G_d = F_d / F_{d+1}`.
EXAMPLES::
sage: rays = [(1,0), (1,1), (1,2)]
sage: F = FilteredVectorSpace(rays, {3:[1], 1:[1,2]})
sage: F.graded(1)
Vector space quotient V/W of dimension 1 over Rational Field where
V: Vector space of degree 2 and dimension 2 over Rational Field
Basis matrix:
[1 0]
[0 1]
W: Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[1 1]
"""
return self.get_degree(d).quotient(self.get_degree(d+1))
def presentation(self):
"""
Return a presentation in term of generators of various degrees.
OUTPUT:
A pair consisting of generators and a filtration suitable as
input to :func:`~construct_from_generators_indices`.
EXAMPLES::
sage: rays = [(1,0), (1,1), (1,2), (-1,-1)]
sage: F = FilteredVectorSpace(rays, {0:[1, 2], 2:[3]}); F
QQ^2 >= QQ^1 >= QQ^1 >= 0
sage: F.presentation()
(((0, 1), (1, 0), (1, 1)), {0: (1, 0), 2: (2,), +Infinity: ()})
"""
# this could be done more efficiently with (potentially) less generators
generators = set()
filt = self._filt[1:]
for d, V in filt:
generators.update(V.echelonized_basis())
generators = tuple(generators)
filtration = dict()
for d, V in filt:
indices = [ZZ(generators.index(v)) for v in V.echelonized_basis()]
filtration[d] = tuple(indices)
return generators, filtration
def _repr_field_name(self):
"""
Return an abbreviated field name as string
RAISES:
``NotImplementedError``: The field does not have an
abbreviated name defined.
EXAMPLES::
sage: FilteredVectorSpace(2, base_ring=QQ)._repr_field_name()
'QQ'
sage: F.<a> = GF(9)
sage: FilteredVectorSpace(2, base_ring=F)._repr_field_name()
'GF(9)'
sage: FilteredVectorSpace(2, base_ring=AA)._repr_field_name()
Traceback (most recent call last):
...
NotImplementedError
"""
if self.base_ring() == QQ:
return 'QQ'
elif self.base_ring() == RDF:
return 'RDF'
elif self.base_ring() == RR:
return 'RR'
from sage.categories.finite_fields import FiniteFields
if self.base_ring() in FiniteFields():
return 'GF({0})'.format(len(self.base_ring()))
else:
raise NotImplementedError()
def _repr_vector_space(self, dim):
"""
Return a string representation of the vector space of given dimension
INPUT:
- ``dim`` -- integer.
OUTPUT:
String representation of the vector space of dimension ``dim``.
EXAMPLES::
sage: F = FilteredVectorSpace(3, base_ring=RDF)
sage: F._repr_vector_space(1234)
'RDF^1234'
sage: F3 = FilteredVectorSpace(3, base_ring=GF(3))
sage: F3._repr_vector_space(1234)
'GF(3)^1234'
sage: F3 = FilteredVectorSpace(3, base_ring=AA)
sage: F3._repr_vector_space(1234)
'Vector space of dimension 1234 over Algebraic Real Field'
"""
if dim == 0:
return '0'
try:
return self._repr_field_name() + '^' + str(dim)
except NotImplementedError:
return repr(VectorSpace(self.base_ring(), dim))
def _repr_degrees(self, min_deg, max_deg):
"""
Return a string representation
This method is like :meth:`_repr_` except that the user can
select the range of degrees to be shown in the output.
INPUT:
- ``min_deg``, ``max_deg`` -- two integers.
EXAMPLES::
sage: rays = [(1,0), (1,1), (1,2), (-1,-1)]
sage: F = FilteredVectorSpace(rays, {0:[1, 2], 2:[3]})
sage: F._repr_degrees(-2, 4)
['QQ^2', 'QQ^2', 'QQ^2', 'QQ^1', 'QQ^1', '0', '0', '0']
"""
degrees = range(min_deg, max_deg+1)
dims = []
for i in degrees + [infinity]:
d = self.get_degree(i).dimension()
dims.append(self._repr_vector_space(d))
return dims
def _repr_(self):
r"""
Return as string representation of ``self``.
OUTPUT:
A string.
EXAMPLES::
sage: rays = [(1,0), (1,1), (1,2), (-1,-1)]
sage: FilteredVectorSpace(rays, {0:[1, 2], 2:[3]})._repr_()
'QQ^2 >= QQ^1 >= QQ^1 >= 0'
sage: FilteredVectorSpace(rays, {0:[1, 2], oo:[3]})
QQ^2 >= QQ^1
sage: FilteredVectorSpace(rays, {oo:[3]})
QQ^1 in QQ^2
sage: FilteredVectorSpace(rays, {0:[3]})
QQ^1 >= 0 in QQ^2
sage: FilteredVectorSpace({1:[(1,0), (-1,1)], 3:[(1,0)]}, base_ring=GF(3))
GF(3)^2 >= GF(3)^1 >= GF(3)^1 >= 0
sage: FilteredVectorSpace({1:[(1,0), (-1,1)], 3:[(1,0)]}, base_ring=AA)
Vector space of dimension 2 over Algebraic Real Field
>= Vector space of dimension 1 over Algebraic Real Field
>= Vector space of dimension 1 over Algebraic Real Field >= 0
"""
finite_support = [d for d in self.support() if d != infinity]
if len(finite_support) == 0:
dims = self._repr_degrees(0, -1)
else:
min_deg = finite_support[0]
max_deg = finite_support[-1]
dims = self._repr_degrees(min_deg, max_deg)
s = ' >= '.join(dims)
if not self.is_exhaustive():
s += ' in ' + self._repr_vector_space(self.degree())
return s
def __cmp__(self, other):
"""
Compare two filtered vector spaces.
EXAMPLES::
sage: V = FilteredVectorSpace(2, 0)
sage: W = FilteredVectorSpace([(1,0),(0,1)], {0:[0, 1]})
sage: V == W
True
sage: V is W
False
sage: W = FilteredVectorSpace([(1,0),(1,1)], {0:[1]})
sage: V == W
False
TESTS::
sage: P = toric_varieties.P2()
sage: T_P = P.sheaves.tangent_bundle()
sage: O_P = P.sheaves.trivial_bundle(1)
sage: S1 = T_P + O_P
sage: S2 = O_P + T_P
sage: S1._filt[0].is_isomorphic(S2._filt[0]) # known bug
True
sage: FilteredVectorSpace(2, base_ring=QQ) == FilteredVectorSpace(2, base_ring=GF(5))
False
"""
c = cmp(type(self), type(other))
if c!=0: return c
c = cmp(self.base_ring(), other.base_ring())
if c!=0: return c
c = cmp(self.dimension(), other.dimension())
if c!=0: return c
c = cmp(len(self._filt), len(other._filt))
if c!=0: return c
for self_filt, other_filt in zip(self._filt, other._filt):
c = cmp(self_filt[0], other_filt[0]) # compare degree
if c!=0: return c
c = cmp(self_filt[1].echelonized_basis_matrix(), # compare vector subspace
other_filt[1].echelonized_basis_matrix())
if c!=0: return c
return 0
def direct_sum(self, other):
"""
Return the direct sum.
INPUT:
- ``other`` -- a filtered vector space.
OUTPUT:
The direct sum as a filtered vector space.
EXAMPLES::
sage: V = FilteredVectorSpace(2, 0)
sage: W = FilteredVectorSpace({0:[(1,-1),(2,1)], 1:[(1,1)]})
sage: V.direct_sum(W)
QQ^4 >= QQ^1 >= 0
sage: V + W # syntactic sugar
QQ^4 >= QQ^1 >= 0
sage: V + V == FilteredVectorSpace(4, 0)
True
sage: W = FilteredVectorSpace([(1,-1),(2,1)], {1:[0,1], 2:[1]})
sage: V + W
QQ^4 >= QQ^2 >= QQ^1 >= 0
A suitable base ring is chosen if they do not match::
sage: v = [(1,0), (0,1)]
sage: F1 = FilteredVectorSpace(v, {0:[0], 1:[1]}, base_ring=QQ)
sage: F2 = FilteredVectorSpace(v, {0:[0], 1:[1]}, base_ring=RDF)
sage: F1 + F2
RDF^4 >= RDF^2 >= 0
"""
from sage.structure.element import get_coercion_model
base_ring = get_coercion_model().common_parent(self.base_ring(), other.base_ring())
# construct the generators
self_gens, self_filt = self.presentation()
other_gens, other_filt = other.presentation()
generators = \
[ list(v) + [base_ring.zero()]*other.dimension() for v in self_gens ] + \
[ [base_ring.zero()]*self.dimension() + list(v) for v in other_gens ]
# construct the filtration dictionary
def join_indices(self_indices, other_indices):
self_indices = tuple(self_indices)
other_indices = tuple(i + len(self_gens) for i in other_indices)
return self_indices + other_indices
filtration = dict()
self_indices = set()
other_indices = set()
for deg in reversed(uniq(self_filt.keys() + other_filt.keys())):
self_indices.update(self_filt.get(deg, []))
other_indices.update(other_filt.get(deg, []))
gens = join_indices(self_indices, other_indices)
filtration[deg] = gens
return FilteredVectorSpace(generators, filtration, base_ring=base_ring)
__add__ = direct_sum
def tensor_product(self, other):
r"""
Return the graded tensor product.
INPUT:
- ``other`` -- a filtered vector space.
OUTPUT:
The graded tensor product, that is, the tensor product of a
generator of degree `d_1` with a generator in degree `d_2` has
degree `d_1 + d_2`.
EXAMPLES::
sage: F1 = FilteredVectorSpace(1, 1)
sage: F2 = FilteredVectorSpace(1, 2)
sage: F1.tensor_product(F2)
QQ^1 >= 0
sage: F1 * F2
QQ^1 >= 0
sage: F1.min_degree()
1
sage: F2.min_degree()
2
sage: (F1*F2).min_degree()
3
A suitable base ring is chosen if they do not match::
sage: v = [(1,0), (0,1)]
sage: F1 = FilteredVectorSpace(v, {0:[0], 1:[1]}, base_ring=QQ)
sage: F2 = FilteredVectorSpace(v, {0:[0], 1:[1]}, base_ring=RDF)
sage: F1 * F2
RDF^4 >= RDF^3 >= RDF^1 >= 0
"""
V = self
W = other
from sage.structure.element import get_coercion_model
base_ring = get_coercion_model().common_parent(V.base_ring(), W.base_ring())
from sage.modules.tensor_operations import VectorCollection, TensorOperation
V_generators, V_indices = V.presentation()
W_generators, W_indices = W.presentation()
V_coll = VectorCollection(V_generators, base_ring, V.dimension())
W_coll = VectorCollection(W_generators, base_ring, W.dimension())
T = TensorOperation([V_coll, W_coll], 'product')
filtration = dict()
for V_deg in V.support():
for W_deg in W.support():
deg = V_deg + W_deg
indices = filtration.get(deg, set())
for i in V_indices[V_deg]:
for j in W_indices[W_deg]:
i_tensor_j = T.index_map(i, j)
indices.add(i_tensor_j)
filtration[deg] = indices
return FilteredVectorSpace(T.vectors(), filtration, base_ring=base_ring)
__mul__ = tensor_product
def _power_operation(self, n, operation):
"""
Return tensor power operation.
INPUT:
- ``n`` -- integer. the number of factors of ``self``.
- ``operation`` -- string. See
:class:`~sage.modules.tensor_operations.TensorOperation` for
details.
EXAMPLES::
sage: F = FilteredVectorSpace(1, 1) + FilteredVectorSpace(1, 2); F
QQ^2 >= QQ^1 >= 0
sage: F._power_operation(2, 'symmetric')
QQ^3 >= QQ^2 >= QQ^1 >= 0
sage: F._power_operation(2, 'antisymmetric')
QQ^1 >= 0
"""
from sage.modules.tensor_operations import VectorCollection, TensorOperation
generators, indices = self.presentation()
V = VectorCollection(generators, self.base_ring(), self.dimension())
T = TensorOperation([V] * n, operation)
iters = [self.support()] * n
filtration = dict()
from sage.categories.cartesian_product import cartesian_product
for degrees in cartesian_product(iters):
deg = sum(degrees)
filt_deg = filtration.get(deg, set())
for i in cartesian_product([indices.get(d) for d in degrees]):
pow_i = T.index_map(*i)
if pow_i is not None:
filt_deg.add(pow_i)
filtration[deg] = filt_deg
return FilteredVectorSpace(T.vectors(), filtration, base_ring=self.base_ring())
def exterior_power(self, n):
"""
Return the `n`-th graded exterior power.
INPUT:
- ``n`` -- integer. Exterior product of how many copies of
``self``.
OUTPUT:
The graded exterior product, that is, the wedge product of a
generator of degree `d_1` with a generator in degree `d_2` has
degree `d_1 + d_2`.
EXAMPLES::
sage: F = FilteredVectorSpace(1, 1) + FilteredVectorSpace(1, 2); F
QQ^2 >= QQ^1 >= 0
sage: F.exterior_power(1)
QQ^2 >= QQ^1 >= 0
sage: F.exterior_power(2)
QQ^1 >= 0
sage: F.exterior_power(3)
0
sage: F.wedge(2)
QQ^1 >= 0
"""
return self._power_operation(n, 'antisymmetric')
wedge = exterior_power
def symmetric_power(self, n):
"""
Return the `n`-th graded symmetric power.
INPUT:
- ``n`` -- integer. Symmetric product of how many copies of
``self``.
OUTPUT:
The graded symmetric product, that is, the symmetrization of a
generator of degree `d_1` with a generator in degree `d_2` has
degree `d_1 + d_2`.
EXAMPLES::
sage: F = FilteredVectorSpace(1, 1) + FilteredVectorSpace(1, 2); F
QQ^2 >= QQ^1 >= 0
sage: F.symmetric_power(2)
QQ^3 >= QQ^2 >= QQ^1 >= 0
"""
return self._power_operation(n, 'symmetric')
def dual(self):
"""
Return the dual filtered vector space.
OUTPUT:
The graded dual, that is, the dual of a degree-`d` subspace is
a set of linear constraints in degree `-d+1`. That is, the
dual generators live in degree `-d`.
EXAMPLES::
sage: gens = identity_matrix(3).rows()
sage: F = FilteredVectorSpace(gens, {0:[0,1,2], 2:[0]}); F
QQ^3 >= QQ^1 >= QQ^1 >= 0
sage: F.support()
(0, 2)
sage: F.dual()
QQ^3 >= QQ^2 >= QQ^2 >= 0
sage: F.dual().support()
(-2, 0)
"""
filtration = dict()
prev_deg = minus_infinity
for deg, V in self._filt[1:]:
filtration[-prev_deg] = V.complement().echelonized_basis()
prev_deg = deg
return FilteredVectorSpace(filtration, base_ring=self.base_ring())
def shift(self, deg):
"""
Return a filtered vector space with degrees shifted by a constant.
EXAMPLES::
sage: gens = identity_matrix(3).rows()
sage: F = FilteredVectorSpace(gens, {0:[0,1,2], 2:[0]}); F
QQ^3 >= QQ^1 >= QQ^1 >= 0
sage: F.support()
(0, 2)
sage: F.shift(-5).support()
(-5, -3)
"""
generators, filtration = self.presentation()
shifted = dict()
for d, indices in filtration.iteritems():
shifted[d + deg] = indices
return FilteredVectorSpace(generators, shifted, base_ring=self.base_ring())
def random_deformation(self, epsilon=None):
"""
Return a random deformation
INPUT:
- ``epsilon`` -- a number in the base ring.
OUTPUT:
A new filtered vector space where the generators of the
subspaces are moved by ``epsilon`` times a random vector.
EXAMPLES::
sage: gens = identity_matrix(3).rows()
sage: F = FilteredVectorSpace(gens, {0:[0,1,2], 2:[0]}); F
QQ^3 >= QQ^1 >= QQ^1 >= 0
sage: F.get_degree(2)
Vector space of degree 3 and dimension 1 over Rational Field
Basis matrix:
[1 0 0]
sage: G = F.random_deformation(1/50); G
QQ^3 >= QQ^1 >= QQ^1 >= 0
sage: G.get_degree(2)
Vector space of degree 3 and dimension 1 over Rational Field
Basis matrix:
[ 1 -15/304 0]
"""
from sage.modules.free_module_element import random_vector
R = self.base_ring()
if epsilon is None:
epsilon = R.one()
filtration = dict()
for deg, filt in self._filt[1:]:
generators = [v + epsilon * random_vector(R, self.rank())
for v in filt.echelonized_basis()]
filtration[deg] = generators
return FilteredVectorSpace(filtration, base_ring=R, check=True)
|
the-stack_0_9383 | """ test to_datetime """
import calendar
from collections import deque
from datetime import (
datetime,
timedelta,
)
from decimal import Decimal
import locale
from dateutil.parser import parse
from dateutil.tz.tz import tzoffset
import numpy as np
import pytest
import pytz
from pandas._libs import tslib
from pandas._libs.tslibs import (
iNaT,
parsing,
)
from pandas.errors import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
)
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_datetime64_ns_dtype
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
NaT,
Series,
Timestamp,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
from pandas.core.tools import datetimes as tools
class TestTimeConversionFormats:
@pytest.mark.parametrize("readonly", [True, False])
def test_to_datetime_readonly(self, readonly):
# GH#34857
arr = np.array([], dtype=object)
if readonly:
arr.setflags(write=False)
result = to_datetime(arr)
expected = to_datetime([])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format(self, cache):
values = ["1/1/2000", "1/2/2000", "1/3/2000"]
results1 = [Timestamp("20000101"), Timestamp("20000201"), Timestamp("20000301")]
results2 = [Timestamp("20000101"), Timestamp("20000102"), Timestamp("20000103")]
for vals, expecteds in [
(values, (Index(results1), Index(results2))),
(Series(values), (Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2])),
]:
for i, fmt in enumerate(["%d/%m/%Y", "%m/%d/%Y"]):
result = to_datetime(vals, format=fmt, cache=cache)
expected = expecteds[i]
if isinstance(expected, Series):
tm.assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
assert result == expected
else:
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_YYYYMMDD(self, cache):
s = Series([19801222, 19801222] + [19810105] * 5)
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
result = to_datetime(s.apply(str), format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# with NaT
expected = Series(
[Timestamp("19801222"), Timestamp("19801222")] + [Timestamp("19810105")] * 5
)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = "nat"
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# coercion
# GH 7930
s = Series([20121231, 20141231, 99991231])
result = pd.to_datetime(s, format="%Y%m%d", errors="ignore", cache=cache)
expected = Series(
[datetime(2012, 12, 31), datetime(2014, 12, 31), datetime(9999, 12, 31)],
dtype=object,
)
tm.assert_series_equal(result, expected)
result = pd.to_datetime(s, format="%Y%m%d", errors="coerce", cache=cache)
expected = Series(["20121231", "20141231", "NaT"], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_s",
[
# Null values with Strings
["19801222", "20010112", None],
["19801222", "20010112", np.nan],
["19801222", "20010112", pd.NaT],
["19801222", "20010112", "NaT"],
# Null values with Integers
[19801222, 20010112, None],
[19801222, 20010112, np.nan],
[19801222, 20010112, pd.NaT],
[19801222, 20010112, "NaT"],
],
)
def test_to_datetime_format_YYYYMMDD_with_none(self, input_s):
# GH 30011
# format='%Y%m%d'
# with None
expected = Series([Timestamp("19801222"), Timestamp("20010112"), pd.NaT])
result = Series(pd.to_datetime(input_s, format="%Y%m%d"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_s, expected",
[
# NaN before strings with invalid date values
[
Series(["19801222", np.nan, "20010012", "10019999"]),
Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
],
# NaN after strings with invalid date values
[
Series(["19801222", "20010012", "10019999", np.nan]),
Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
],
# NaN before integers with invalid date values
[
Series([20190813, np.nan, 20010012, 20019999]),
Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
],
# NaN after integers with invalid date values
[
Series([20190813, 20010012, np.nan, 20019999]),
Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
],
],
)
def test_to_datetime_format_YYYYMMDD_overflow(self, input_s, expected):
# GH 25512
# format='%Y%m%d', errors='coerce'
result = pd.to_datetime(input_s, format="%Y%m%d", errors="coerce")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_integer(self, cache):
# GH 10178
s = Series([2000, 2001, 2002])
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format="%Y", cache=cache)
tm.assert_series_equal(result, expected)
s = Series([200001, 200105, 200206])
expected = Series([Timestamp(x[:4] + "-" + x[4:]) for x in s.apply(str)])
result = to_datetime(s, format="%Y%m", cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"int_date, expected",
[
# valid date, length == 8
[20121030, datetime(2012, 10, 30)],
# short valid date, length == 6
[199934, datetime(1999, 3, 4)],
# long integer date partially parsed to datetime(2012,1,1), length > 8
[2012010101, 2012010101],
# invalid date partially parsed to datetime(2012,9,9), length == 8
[20129930, 20129930],
# short integer date partially parsed to datetime(2012,9,9), length < 8
[2012993, 2012993],
# short invalid date, length == 4
[2121, 2121],
],
)
def test_int_to_datetime_format_YYYYMMDD_typeerror(self, int_date, expected):
# GH 26583
result = to_datetime(int_date, format="%Y%m%d", errors="ignore")
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_microsecond(self, cache):
# these are locale dependent
lang, _ = locale.getlocale()
month_abbr = calendar.month_abbr[4]
val = f"01-{month_abbr}-2011 00:00:01.978"
format = "%d-%b-%Y %H:%M:%S.%f"
result = to_datetime(val, format=format, cache=cache)
exp = datetime.strptime(val, format)
assert result == exp
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_time(self, cache):
data = [
["01/10/2010 15:20", "%m/%d/%Y %H:%M", Timestamp("2010-01-10 15:20")],
["01/10/2010 05:43", "%m/%d/%Y %I:%M", Timestamp("2010-01-10 05:43")],
[
"01/10/2010 13:56:01",
"%m/%d/%Y %H:%M:%S",
Timestamp("2010-01-10 13:56:01"),
] # ,
# ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 20:14')],
# ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 07:40')],
# ['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p',
# Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@td.skip_if_has_locale
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_non_exact(self, cache):
# GH 10834
# 8904
# exact kw
s = Series(
["19MAY11", "foobar19MAY11", "19MAY11:00:00:00", "19MAY11 00:00:00Z"]
)
result = to_datetime(s, format="%d%b%y", exact=False, cache=cache)
expected = to_datetime(
s.str.extract(r"(\d+\w+\d+)", expand=False), format="%d%b%y", cache=cache
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_parse_nanoseconds_with_formula(self, cache):
# GH8989
# truncating the nanoseconds when a format was provided
for v in [
"2012-01-01 09:00:00.000000001",
"2012-01-01 09:00:00.000001",
"2012-01-01 09:00:00.001",
"2012-01-01 09:00:00.001000",
"2012-01-01 09:00:00.001000000",
]:
expected = pd.to_datetime(v, cache=cache)
result = pd.to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f", cache=cache)
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_weeks(self, cache):
data = [
["2009324", "%Y%W%w", Timestamp("2009-08-13")],
["2013020", "%Y%U%w", Timestamp("2013-01-13")],
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@pytest.mark.parametrize(
"fmt,dates,expected_dates",
[
[
"%Y-%m-%d %H:%M:%S %Z",
["2010-01-01 12:00:00 UTC"] * 2,
[Timestamp("2010-01-01 12:00:00", tz="UTC")] * 2,
],
[
"%Y-%m-%d %H:%M:%S %Z",
[
"2010-01-01 12:00:00 UTC",
"2010-01-01 12:00:00 GMT",
"2010-01-01 12:00:00 US/Pacific",
],
[
Timestamp("2010-01-01 12:00:00", tz="UTC"),
Timestamp("2010-01-01 12:00:00", tz="GMT"),
Timestamp("2010-01-01 12:00:00", tz="US/Pacific"),
],
],
[
"%Y-%m-%d %H:%M:%S%z",
["2010-01-01 12:00:00+0100"] * 2,
[Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2,
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 +0100"] * 2,
[Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2,
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 +0100", "2010-01-01 12:00:00 -0100"],
[
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60)),
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(-60)),
],
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 Z", "2010-01-01 12:00:00 Z"],
[
Timestamp(
"2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)
), # pytz coerces to UTC
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)),
],
],
],
)
def test_to_datetime_parse_tzname_or_tzoffset(self, fmt, dates, expected_dates):
# GH 13486
result = pd.to_datetime(dates, format=fmt)
expected = Index(expected_dates)
tm.assert_equal(result, expected)
def test_to_datetime_parse_tzname_or_tzoffset_different_tz_to_utc(self):
# GH 32792
dates = [
"2010-01-01 12:00:00 +0100",
"2010-01-01 12:00:00 -0100",
"2010-01-01 12:00:00 +0300",
"2010-01-01 12:00:00 +0400",
]
expected_dates = [
"2010-01-01 11:00:00+00:00",
"2010-01-01 13:00:00+00:00",
"2010-01-01 09:00:00+00:00",
"2010-01-01 08:00:00+00:00",
]
fmt = "%Y-%m-%d %H:%M:%S %z"
result = pd.to_datetime(dates, format=fmt, utc=True)
expected = DatetimeIndex(expected_dates)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"offset", ["+0", "-1foo", "UTCbar", ":10", "+01:000:01", ""]
)
def test_to_datetime_parse_timezone_malformed(self, offset):
fmt = "%Y-%m-%d %H:%M:%S %z"
date = "2010-01-01 12:00:00 " + offset
msg = "does not match format|unconverted data remains"
with pytest.raises(ValueError, match=msg):
pd.to_datetime([date], format=fmt)
def test_to_datetime_parse_timezone_keeps_name(self):
# GH 21697
fmt = "%Y-%m-%d %H:%M:%S %z"
arg = Index(["2010-01-01 12:00:00 Z"], name="foo")
result = pd.to_datetime(arg, format=fmt)
expected = DatetimeIndex(["2010-01-01 12:00:00"], tz="UTC", name="foo")
tm.assert_index_equal(result, expected)
class TestToDatetime:
@pytest.mark.parametrize(
"s, _format, dt",
[
["2015-1-1", "%G-%V-%u", datetime(2014, 12, 29, 0, 0)],
["2015-1-4", "%G-%V-%u", datetime(2015, 1, 1, 0, 0)],
["2015-1-7", "%G-%V-%u", datetime(2015, 1, 4, 0, 0)],
],
)
def test_to_datetime_iso_week_year_format(self, s, _format, dt):
# See GH#16607
assert to_datetime(s, format=_format) == dt
@pytest.mark.parametrize(
"msg, s, _format",
[
[
"ISO week directive '%V' must be used with the ISO year directive "
"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 50",
"%Y %V",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 51",
"%G %V",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 Monday",
"%G %A",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 Mon",
"%G %a",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 6",
"%G %w",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 6",
"%G %u",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"2051",
"%G",
],
[
"Day of the year directive '%j' is not compatible with ISO year "
"directive '%G'. Use '%Y' instead.",
"1999 51 6 256",
"%G %V %u %j",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 Sunday",
"%Y %V %A",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 Sun",
"%Y %V %a",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 1",
"%Y %V %w",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 1",
"%Y %V %u",
],
[
"ISO week directive '%V' must be used with the ISO year directive "
"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.",
"20",
"%V",
],
],
)
def test_error_iso_week_year(self, msg, s, _format):
# See GH#16607
# This test checks for errors thrown when giving the wrong format
# However, as discussed on PR#25541, overriding the locale
# causes a different error to be thrown due to the format being
# locale specific, but the test data is in english.
# Therefore, the tests only run when locale is not overwritten,
# as a sort of solution to this problem.
if locale.getlocale() != ("zh_CN", "UTF-8") and locale.getlocale() != (
"it_IT",
"UTF-8",
):
with pytest.raises(ValueError, match=msg):
to_datetime(s, format=_format)
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_to_datetime_dtarr(self, tz):
# DatetimeArray
dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz)
arr = DatetimeArray(dti)
result = to_datetime(arr)
assert result is arr
result = to_datetime(arr)
assert result is arr
def test_to_datetime_pydatetime(self):
actual = pd.to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
def test_to_datetime_YYYYMMDD(self):
actual = pd.to_datetime("20080115")
assert actual == datetime(2008, 1, 15)
def test_to_datetime_unparseable_ignore(self):
# unparseable
s = "Month 1, 1999"
assert pd.to_datetime(s, errors="ignore") == s
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_now(self):
# See GH#18666
with tm.set_timezone("US/Eastern"):
npnow = np.datetime64("now").astype("datetime64[ns]")
pdnow = pd.to_datetime("now")
pdnow2 = pd.to_datetime(["now"])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdnow.value - npnow.astype(np.int64)) < 1e10
assert abs(pdnow2.value - npnow.astype(np.int64)) < 1e10
assert pdnow.tzinfo is None
assert pdnow2.tzinfo is None
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_today(self):
# See GH#18666
# Test with one timezone far ahead of UTC and another far behind, so
# one of these will _almost_ always be in a different day from UTC.
# Unfortunately this test between 12 and 1 AM Samoa time
# this both of these timezones _and_ UTC will all be in the same day,
# so this test will not detect the regression introduced in #18666.
with tm.set_timezone("Pacific/Auckland"): # 12-13 hours ahead of UTC
nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64)
pdtoday = pd.to_datetime("today")
pdtoday2 = pd.to_datetime(["today"])[0]
tstoday = Timestamp("today")
tstoday2 = Timestamp.today()
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert abs(pdtoday.value - tstoday.value) < 1e10
assert abs(pdtoday.value - tstoday2.value) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
with tm.set_timezone("US/Samoa"): # 11 hours behind UTC
nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64)
pdtoday = pd.to_datetime("today")
pdtoday2 = pd.to_datetime(["today"])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
def test_to_datetime_today_now_unicode_bytes(self):
to_datetime(["now"])
to_datetime(["today"])
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_dt64s(self, cache):
in_bound_dts = [np.datetime64("2000-01-01"), np.datetime64("2000-01-02")]
for dt in in_bound_dts:
assert pd.to_datetime(dt, cache=cache) == Timestamp(dt)
@pytest.mark.parametrize(
"dt", [np.datetime64("1000-01-01"), np.datetime64("5000-01-02")]
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_dt64s_out_of_bounds(self, cache, dt):
msg = f"Out of bounds nanosecond timestamp: {dt}"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dt, errors="raise")
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp(dt)
assert pd.to_datetime(dt, errors="coerce", cache=cache) is NaT
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize("unit", ["s", "D"])
def test_to_datetime_array_of_dt64s(self, cache, unit):
# https://github.com/pandas-dev/pandas/issues/31491
# Need at least 50 to ensure cache is used.
dts = [
np.datetime64("2000-01-01", unit),
np.datetime64("2000-01-02", unit),
] * 30
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
tm.assert_index_equal(
pd.to_datetime(dts, cache=cache),
DatetimeIndex([Timestamp(x).asm8 for x in dts]),
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64("9999-01-01")]
msg = "Out of bounds nanosecond timestamp: 9999-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dts_with_oob, errors="raise")
tm.assert_index_equal(
pd.to_datetime(dts_with_oob, errors="coerce", cache=cache),
DatetimeIndex(
[Timestamp(dts_with_oob[0]).asm8, Timestamp(dts_with_oob[1]).asm8] * 30
+ [pd.NaT],
),
)
# With errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
tm.assert_index_equal(
pd.to_datetime(dts_with_oob, errors="ignore", cache=cache),
Index([dt.item() for dt in dts_with_oob]),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_tz(self, cache):
# xref 8260
# uniform returns a DatetimeIndex
arr = [
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
result = pd.to_datetime(arr, cache=cache)
expected = DatetimeIndex(
["2013-01-01 13:00:00", "2013-01-02 14:00:00"], tz="US/Pacific"
)
tm.assert_index_equal(result, expected)
# mixed tzs will raise
arr = [
Timestamp("2013-01-01 13:00:00", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00", tz="US/Eastern"),
]
msg = (
"Tz-aware datetime.datetime cannot be "
"converted to datetime64 unless utc=True"
)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_different_offsets(self, cache):
# inspired by asv timeseries.ToDatetimeNONISO8601 benchmark
# see GH-26097 for more
ts_string_1 = "March 1, 2018 12:00:00+0400"
ts_string_2 = "March 1, 2018 12:00:00+0500"
arr = [ts_string_1] * 5 + [ts_string_2] * 5
expected = Index([parse(x) for x in arr])
result = pd.to_datetime(arr, cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_tz_pytz(self, cache):
# see gh-8260
us_eastern = pytz.timezone("US/Eastern")
arr = np.array(
[
us_eastern.localize(
datetime(year=2000, month=1, day=1, hour=3, minute=0)
),
us_eastern.localize(
datetime(year=2000, month=6, day=1, hour=3, minute=0)
),
],
dtype=object,
)
result = pd.to_datetime(arr, utc=True, cache=cache)
expected = DatetimeIndex(
["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"],
dtype="datetime64[ns, UTC]",
freq=None,
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"init_constructor, end_constructor, test_method",
[
(Index, DatetimeIndex, tm.assert_index_equal),
(list, DatetimeIndex, tm.assert_index_equal),
(np.array, DatetimeIndex, tm.assert_index_equal),
(Series, Series, tm.assert_series_equal),
],
)
def test_to_datetime_utc_true(
self, cache, init_constructor, end_constructor, test_method
):
# See gh-11934 & gh-6415
data = ["20100102 121314", "20100102 121315"]
expected_data = [
Timestamp("2010-01-02 12:13:14", tz="utc"),
Timestamp("2010-01-02 12:13:15", tz="utc"),
]
result = pd.to_datetime(
init_constructor(data), format="%Y%m%d %H%M%S", utc=True, cache=cache
)
expected = end_constructor(expected_data)
test_method(result, expected)
# Test scalar case as well
for scalar, expected in zip(data, expected_data):
result = pd.to_datetime(
scalar, format="%Y%m%d %H%M%S", utc=True, cache=cache
)
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_utc_true_with_series_single_value(self, cache):
# GH 15760 UTC=True with Series
ts = 1.5e18
result = pd.to_datetime(Series([ts]), utc=True, cache=cache)
expected = Series([Timestamp(ts, tz="utc")])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_utc_true_with_series_tzaware_string(self, cache):
ts = "2013-01-01 00:00:00-01:00"
expected_ts = "2013-01-01 01:00:00"
data = Series([ts] * 3)
result = pd.to_datetime(data, utc=True, cache=cache)
expected = Series([Timestamp(expected_ts, tz="utc")] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"date, dtype",
[
("2013-01-01 01:00:00", "datetime64[ns]"),
("2013-01-01 01:00:00", "datetime64[ns, UTC]"),
],
)
def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype):
expected = Series([Timestamp("2013-01-01 01:00:00", tz="UTC")])
result = pd.to_datetime(Series([date], dtype=dtype), utc=True, cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@td.skip_if_no("psycopg2")
def test_to_datetime_tz_psycopg2(self, cache):
# xref 8260
import psycopg2
# misc cases
tz1 = psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)
tz2 = psycopg2.tz.FixedOffsetTimezone(offset=-240, name=None)
arr = np.array(
[
datetime(2000, 1, 1, 3, 0, tzinfo=tz1),
datetime(2000, 6, 1, 3, 0, tzinfo=tz2),
],
dtype=object,
)
result = pd.to_datetime(arr, errors="coerce", utc=True, cache=cache)
expected = DatetimeIndex(
["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"],
dtype="datetime64[ns, UTC]",
freq=None,
)
tm.assert_index_equal(result, expected)
# dtype coercion
i = DatetimeIndex(
["2000-01-01 08:00:00"],
tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None),
)
assert is_datetime64_ns_dtype(i)
# tz coercion
result = pd.to_datetime(i, errors="coerce", cache=cache)
tm.assert_index_equal(result, i)
result = pd.to_datetime(i, errors="coerce", utc=True, cache=cache)
expected = DatetimeIndex(["2000-01-01 13:00:00"], dtype="datetime64[ns, UTC]")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_datetime_bool(self, cache):
# GH13176
msg = r"dtype bool cannot be converted to datetime64\[ns\]"
with pytest.raises(TypeError, match=msg):
to_datetime(False)
assert to_datetime(False, errors="coerce", cache=cache) is NaT
assert to_datetime(False, errors="ignore", cache=cache) is False
with pytest.raises(TypeError, match=msg):
to_datetime(True)
assert to_datetime(True, errors="coerce", cache=cache) is NaT
assert to_datetime(True, errors="ignore", cache=cache) is True
msg = f"{type(cache)} is not convertible to datetime"
with pytest.raises(TypeError, match=msg):
to_datetime([False, datetime.today()], cache=cache)
with pytest.raises(TypeError, match=msg):
to_datetime(["20130101", True], cache=cache)
tm.assert_index_equal(
to_datetime([0, False, NaT, 0.0], errors="coerce", cache=cache),
DatetimeIndex(
[to_datetime(0, cache=cache), NaT, NaT, to_datetime(0, cache=cache)]
),
)
def test_datetime_invalid_datatype(self):
# GH13176
msg = "is not convertible to datetime"
with pytest.raises(TypeError, match=msg):
pd.to_datetime(bool)
with pytest.raises(TypeError, match=msg):
pd.to_datetime(pd.to_datetime)
@pytest.mark.parametrize("value", ["a", "00:01:99"])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_invalid_scalar(self, value, format, infer):
# GH24763
res = pd.to_datetime(
value, errors="ignore", format=format, infer_datetime_format=infer
)
assert res == value
res = pd.to_datetime(
value, errors="coerce", format=format, infer_datetime_format=infer
)
assert res is pd.NaT
msg = (
"is a bad directive in format|"
"second must be in 0..59|"
"Given date string not likely a datetime"
)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("value", ["3000/12/11 00:00:00"])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_outofbounds_scalar(self, value, format, infer):
# GH24763
res = pd.to_datetime(
value, errors="ignore", format=format, infer_datetime_format=infer
)
assert res == value
res = pd.to_datetime(
value, errors="coerce", format=format, infer_datetime_format=infer
)
assert res is pd.NaT
if format is not None:
msg = "is a bad directive in format|Out of bounds nanosecond timestamp"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
else:
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("values", [["a"], ["00:01:99"], ["a", "b", "99:00:00"]])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_invalid_index(self, values, format, infer):
# GH24763
res = pd.to_datetime(
values, errors="ignore", format=format, infer_datetime_format=infer
)
tm.assert_index_equal(res, Index(values))
res = pd.to_datetime(
values, errors="coerce", format=format, infer_datetime_format=infer
)
tm.assert_index_equal(res, DatetimeIndex([pd.NaT] * len(values)))
msg = (
"is a bad directive in format|"
"Given date string not likely a datetime|"
"second must be in 0..59"
)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(
values, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
@pytest.mark.parametrize("constructor", [list, tuple, np.array, Index, deque])
def test_to_datetime_cache(self, utc, format, constructor):
date = "20130101 00:00:00"
test_dates = [date] * 10 ** 5
data = constructor(test_dates)
result = pd.to_datetime(data, utc=utc, format=format, cache=True)
expected = pd.to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"listlike",
[
(deque([Timestamp("2010-06-02 09:30:00")] * 51)),
([Timestamp("2010-06-02 09:30:00")] * 51),
(tuple([Timestamp("2010-06-02 09:30:00")] * 51)),
],
)
def test_no_slicing_errors_in_should_cache(self, listlike):
# GH 29403
assert tools.should_cache(listlike) is True
def test_to_datetime_from_deque(self):
# GH 29403
result = pd.to_datetime(deque([Timestamp("2010-06-02 09:30:00")] * 51))
expected = pd.to_datetime([Timestamp("2010-06-02 09:30:00")] * 51)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
def test_to_datetime_cache_series(self, utc, format):
date = "20130101 00:00:00"
test_dates = [date] * 10 ** 5
data = Series(test_dates)
result = pd.to_datetime(data, utc=utc, format=format, cache=True)
expected = pd.to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_series_equal(result, expected)
def test_to_datetime_cache_scalar(self):
date = "20130101 00:00:00"
result = pd.to_datetime(date, cache=True)
expected = Timestamp("20130101 00:00:00")
assert result == expected
@pytest.mark.parametrize(
"date, format",
[
("2017-20", "%Y-%W"),
("20 Sunday", "%W %A"),
("20 Sun", "%W %a"),
("2017-21", "%Y-%U"),
("20 Sunday", "%U %A"),
("20 Sun", "%U %a"),
],
)
def test_week_without_day_and_calendar_year(self, date, format):
# GH16774
msg = "Cannot use '%W' or '%U' without day and year"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(date, format=format)
def test_to_datetime_coerce(self):
# GH 26122
ts_strings = [
"March 1, 2018 12:00:00+0400",
"March 1, 2018 12:00:00+0500",
"20100240",
]
result = to_datetime(ts_strings, errors="coerce")
expected = Index(
[
datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 14400)),
datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 18000)),
NaT,
]
)
tm.assert_index_equal(result, expected)
def test_to_datetime_coerce_malformed(self):
# GH 28299
ts_strings = ["200622-12-31", "111111-24-11"]
result = to_datetime(ts_strings, errors="coerce")
expected = Index([NaT, NaT])
tm.assert_index_equal(result, expected)
def test_iso_8601_strings_with_same_offset(self):
# GH 17697, 11736
ts_str = "2015-11-18 15:30:00+05:30"
result = to_datetime(ts_str)
expected = Timestamp(ts_str)
assert result == expected
expected = DatetimeIndex([Timestamp(ts_str)] * 2)
result = to_datetime([ts_str] * 2)
tm.assert_index_equal(result, expected)
result = DatetimeIndex([ts_str] * 2)
tm.assert_index_equal(result, expected)
def test_iso_8601_strings_with_different_offsets(self):
# GH 17697, 11736
ts_strings = ["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30", NaT]
result = to_datetime(ts_strings)
expected = np.array(
[
datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)),
datetime(2015, 11, 18, 16, 30, tzinfo=tzoffset(None, 23400)),
NaT,
],
dtype=object,
)
# GH 21864
expected = Index(expected)
tm.assert_index_equal(result, expected)
result = to_datetime(ts_strings, utc=True)
expected = DatetimeIndex(
[Timestamp(2015, 11, 18, 10), Timestamp(2015, 11, 18, 10), NaT], tz="UTC"
)
tm.assert_index_equal(result, expected)
def test_iso8601_strings_mixed_offsets_with_naive(self):
# GH 24992
result = pd.to_datetime(
[
"2018-11-28T00:00:00",
"2018-11-28T00:00:00+12:00",
"2018-11-28T00:00:00",
"2018-11-28T00:00:00+06:00",
"2018-11-28T00:00:00",
],
utc=True,
)
expected = pd.to_datetime(
[
"2018-11-28T00:00:00",
"2018-11-27T12:00:00",
"2018-11-28T00:00:00",
"2018-11-27T18:00:00",
"2018-11-28T00:00:00",
],
utc=True,
)
tm.assert_index_equal(result, expected)
items = ["2018-11-28T00:00:00+12:00", "2018-11-28T00:00:00"]
result = pd.to_datetime(items, utc=True)
expected = pd.to_datetime(list(reversed(items)), utc=True)[::-1]
tm.assert_index_equal(result, expected)
def test_mixed_offsets_with_native_datetime_raises(self):
# GH 25978
s = Series(
[
"nan",
Timestamp("1990-01-01"),
"2015-03-14T16:15:14.123-08:00",
"2019-03-04T21:56:32.620-07:00",
None,
]
)
with pytest.raises(ValueError, match="Tz-aware datetime.datetime"):
pd.to_datetime(s)
def test_non_iso_strings_with_tz_offset(self):
result = to_datetime(["March 1, 2018 12:00:00+0400"] * 2)
expected = DatetimeIndex(
[datetime(2018, 3, 1, 12, tzinfo=pytz.FixedOffset(240))] * 2
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"ts, expected",
[
(Timestamp("2018-01-01"), Timestamp("2018-01-01", tz="UTC")),
(
Timestamp("2018-01-01", tz="US/Pacific"),
Timestamp("2018-01-01 08:00", tz="UTC"),
),
],
)
def test_timestamp_utc_true(self, ts, expected):
# GH 24415
result = to_datetime(ts, utc=True)
assert result == expected
@pytest.mark.parametrize("dt_str", ["00010101", "13000101", "30000101", "99990101"])
def test_to_datetime_with_format_out_of_bounds(self, dt_str):
# GH 9107
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dt_str, format="%Y%m%d")
def test_to_datetime_utc(self):
arr = np.array([parse("2012-06-13T01:39:00Z")], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_to_datetime_fixed_offset(self):
from pandas.tests.indexes.datetimes.test_timezones import fixed_off
dates = [
datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off),
]
result = to_datetime(dates)
assert result.tz == fixed_off
class TestToDatetimeUnit:
@pytest.mark.parametrize("cache", [True, False])
def test_unit(self, cache):
# GH 11758
# test proper behavior with errors
msg = "cannot specify both format and unit"
with pytest.raises(ValueError, match=msg):
to_datetime([1], unit="D", format="%Y%m%d", cache=cache)
values = [11111111, 1, 1.0, iNaT, NaT, np.nan, "NaT", ""]
result = to_datetime(values, unit="D", errors="ignore", cache=cache)
expected = Index(
[
11111111,
Timestamp("1970-01-02"),
Timestamp("1970-01-02"),
NaT,
NaT,
NaT,
NaT,
NaT,
],
dtype=object,
)
tm.assert_index_equal(result, expected)
result = to_datetime(values, unit="D", errors="coerce", cache=cache)
expected = DatetimeIndex(
["NaT", "1970-01-02", "1970-01-02", "NaT", "NaT", "NaT", "NaT", "NaT"]
)
tm.assert_index_equal(result, expected)
msg = "cannot convert input 11111111 with the unit 'D'"
with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):
to_datetime(values, unit="D", errors="raise", cache=cache)
values = [1420043460000, iNaT, NaT, np.nan, "NaT"]
result = to_datetime(values, errors="ignore", unit="s", cache=cache)
expected = Index([1420043460000, NaT, NaT, NaT, NaT], dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, errors="coerce", unit="s", cache=cache)
expected = DatetimeIndex(["NaT", "NaT", "NaT", "NaT", "NaT"])
tm.assert_index_equal(result, expected)
msg = "cannot convert input 1420043460000 with the unit 's'"
with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):
to_datetime(values, errors="raise", unit="s", cache=cache)
# if we have a string, then we raise a ValueError
# and NOT an OutOfBoundsDatetime
for val in ["foo", Timestamp("20130101")]:
try:
to_datetime(val, errors="raise", unit="s", cache=cache)
except tslib.OutOfBoundsDatetime as err:
raise AssertionError("incorrect exception raised") from err
except ValueError:
pass
@pytest.mark.parametrize("cache", [True, False])
def test_unit_consistency(self, cache):
# consistency of conversions
expected = Timestamp("1970-05-09 14:25:11")
result = pd.to_datetime(11111111, unit="s", errors="raise", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit="s", errors="coerce", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit="s", errors="ignore", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_with_numeric(self, cache):
# GH 13180
# coercions from floats/ints are ok
expected = DatetimeIndex(["2015-06-19 05:33:20", "2015-05-27 22:33:20"])
arr1 = [1.434692e18, 1.432766e18]
arr2 = np.array(arr1).astype("int64")
for errors in ["ignore", "raise", "coerce"]:
result = pd.to_datetime(arr1, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
result = pd.to_datetime(arr2, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
# but we want to make sure that we are coercing
# if we have ints/strings
expected = DatetimeIndex(["NaT", "2015-06-19 05:33:20", "2015-05-27 22:33:20"])
arr = ["foo", 1.434692e18, 1.432766e18]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
expected = DatetimeIndex(
["2015-06-19 05:33:20", "2015-05-27 22:33:20", "NaT", "NaT"]
)
arr = [1.434692e18, 1.432766e18, "foo", "NaT"]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_mixed(self, cache):
# mixed integers/datetimes
expected = DatetimeIndex(["2013-01-01", "NaT", "NaT"])
arr = [Timestamp("20130101"), 1.434692e18, 1.432766e18]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
msg = "mixed datetimes and integers in passed array"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, errors="raise", cache=cache)
expected = DatetimeIndex(["NaT", "NaT", "2013-01-01"])
arr = [1.434692e18, 1.432766e18, Timestamp("20130101")]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, errors="raise", cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_rounding(self, cache):
# GH 14156 & GH 20445: argument will incur floating point errors
# but no premature rounding
result = pd.to_datetime(1434743731.8770001, unit="s", cache=cache)
expected = Timestamp("2015-06-19 19:55:31.877000192")
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_unit_ignore_keeps_name(self, cache):
# GH 21697
expected = Index([15e9] * 2, name="name")
result = pd.to_datetime(expected, errors="ignore", unit="s", cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_dataframe(self, cache):
df = DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"hour": [6, 7],
"minute": [58, 59],
"second": [10, 11],
"ms": [1, 1],
"us": [2, 2],
"ns": [3, 3],
}
)
result = to_datetime(
{"year": df["year"], "month": df["month"], "day": df["day"]}, cache=cache
)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:0:00")]
)
tm.assert_series_equal(result, expected)
# dict-like
result = to_datetime(df[["year", "month", "day"]].to_dict(), cache=cache)
tm.assert_series_equal(result, expected)
# dict but with constructable
df2 = df[["year", "month", "day"]].to_dict()
df2["month"] = 2
result = to_datetime(df2, cache=cache)
expected2 = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160205 00:0:00")]
)
tm.assert_series_equal(result, expected2)
# unit mappings
units = [
{
"year": "years",
"month": "months",
"day": "days",
"hour": "hours",
"minute": "minutes",
"second": "seconds",
},
{
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
},
]
for d in units:
result = to_datetime(df[list(d.keys())].rename(columns=d), cache=cache)
expected = Series(
[Timestamp("20150204 06:58:10"), Timestamp("20160305 07:59:11")]
)
tm.assert_series_equal(result, expected)
d = {
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
"ms": "ms",
"us": "us",
"ns": "ns",
}
result = to_datetime(df.rename(columns=d), cache=cache)
expected = Series(
[
Timestamp("20150204 06:58:10.001002003"),
Timestamp("20160305 07:59:11.001002003"),
]
)
tm.assert_series_equal(result, expected)
# coerce back to int
result = to_datetime(df.astype(str), cache=cache)
tm.assert_series_equal(result, expected)
# passing coerce
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
msg = (
"cannot assemble the datetimes: time data .+ does not "
r"match format '%Y%m%d' \(match\)"
)
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
result = to_datetime(df2, errors="coerce", cache=cache)
expected = Series([Timestamp("20150204 00:00:00"), NaT])
tm.assert_series_equal(result, expected)
# extra columns
msg = r"extra keys have been passed to the datetime assemblage: \[foo\]"
with pytest.raises(ValueError, match=msg):
df2 = df.copy()
df2["foo"] = 1
to_datetime(df2, cache=cache)
# not enough
msg = (
r"to assemble mappings requires at least that \[year, month, "
r"day\] be specified: \[.+\] is missing"
)
for c in [
["year"],
["year", "month"],
["year", "month", "second"],
["month", "day"],
["year", "day", "second"],
]:
with pytest.raises(ValueError, match=msg):
to_datetime(df[c], cache=cache)
# duplicates
msg = "cannot assemble with duplicate keys"
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
df2.columns = ["year", "year", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
df2 = DataFrame(
{"year": [2015, 2016], "month": [2, 20], "day": [4, 5], "hour": [4, 5]}
)
df2.columns = ["year", "month", "day", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_dataframe_dtypes(self, cache):
# #13451
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
# int16
result = to_datetime(df.astype("int16"), cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
# mixed dtypes
df["month"] = df["month"].astype("int8")
df["day"] = df["day"].astype("int8")
result = to_datetime(df, cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
# float
df = DataFrame({"year": [2000, 2001], "month": [1.5, 1], "day": [1, 1]})
msg = "cannot assemble the datetimes: unconverted data remains: 1"
with pytest.raises(ValueError, match=msg):
to_datetime(df, cache=cache)
def test_dataframe_utc_true(self):
# GH 23760
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
result = pd.to_datetime(df, utc=True)
expected = Series(
np.array(["2015-02-04", "2016-03-05"], dtype="datetime64[ns]")
).dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_to_datetime_errors_ignore_utc_true(self):
# GH 23758
result = pd.to_datetime([1], unit="s", utc=True, errors="ignore")
expected = DatetimeIndex(["1970-01-01 00:00:01"], tz="UTC")
tm.assert_index_equal(result, expected)
# TODO: this is moved from tests.series.test_timeseries, may be redundant
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([epoch + t for t in range(20)])
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT])
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
# GH13834
s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t)
for t in np.arange(0, 2, 0.25)
]
+ [NaT]
)
# GH20455 argument will incur floating point errors but no premature rounding
result = result.round("ms")
tm.assert_series_equal(result, expected)
s = pd.concat(
[Series([epoch + t for t in range(20)]).astype(float), Series([np.nan])],
ignore_index=True,
)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
result = to_datetime([1, 2, "NaT", pd.NaT, np.nan], unit="D")
expected = DatetimeIndex(
[Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3
)
tm.assert_index_equal(result, expected)
msg = "non convertible value foo with the unit 'D'"
with pytest.raises(ValueError, match=msg):
to_datetime([1, 2, "foo"], unit="D")
msg = "cannot convert input 111111111 with the unit 'D'"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime([1, 2, 111111111], unit="D")
# coerce we can process
expected = DatetimeIndex(
[Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1
)
result = to_datetime([1, 2, "foo"], unit="D", errors="coerce")
tm.assert_index_equal(result, expected)
result = to_datetime([1, 2, 111111111], unit="D", errors="coerce")
tm.assert_index_equal(result, expected)
class TestToDatetimeMisc:
def test_to_datetime_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(arr)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601(self, cache):
result = to_datetime(["2012-01-01 00:00:00"], cache=cache)
exp = Timestamp("2012-01-01 00:00:00")
assert result[0] == exp
result = to_datetime(["20121001"], cache=cache) # bad iso 8601
exp = Timestamp("2012-10-01")
assert result[0] == exp
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_default(self, cache):
rs = to_datetime("2001", cache=cache)
xp = datetime(2001, 1, 1)
assert rs == xp
# dayfirst is essentially broken
# to_datetime('01-13-2012', dayfirst=True)
# pytest.raises(ValueError, to_datetime('01-13-2012',
# dayfirst=True))
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_on_datetime64_series(self, cache):
# #2699
s = Series(date_range("1/1/2000", periods=10))
result = to_datetime(s, cache=cache)
assert result[0] == s[0]
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_space_in_series(self, cache):
# GH 6428
s = Series(["10/18/2006", "10/18/2008", " "])
msg = r"(\(')?String does not contain a date(:', ' '\))?"
with pytest.raises(ValueError, match=msg):
to_datetime(s, errors="raise", cache=cache)
result_coerce = to_datetime(s, errors="coerce", cache=cache)
expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), NaT])
tm.assert_series_equal(result_coerce, expected_coerce)
result_ignore = to_datetime(s, errors="ignore", cache=cache)
tm.assert_series_equal(result_ignore, s)
@td.skip_if_has_locale
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_apply(self, cache):
# this is only locale tested with US/None locales
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(["May 04", "Jun 02", "Dec 11"], index=[1, 2, 3])
expected = pd.to_datetime(td, format="%b %y", cache=cache)
result = td.apply(pd.to_datetime, format="%b %y", cache=cache)
tm.assert_series_equal(result, expected)
td = Series(["May 04", "Jun 02", ""], index=[1, 2, 3])
msg = r"time data '' does not match format '%b %y' \(match\)"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(td, format="%b %y", errors="raise", cache=cache)
with pytest.raises(ValueError, match=msg):
td.apply(pd.to_datetime, format="%b %y", errors="raise", cache=cache)
expected = pd.to_datetime(td, format="%b %y", errors="coerce", cache=cache)
result = td.apply(
lambda x: pd.to_datetime(x, format="%b %y", errors="coerce", cache=cache)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_types(self, cache):
# empty string
result = to_datetime("", cache=cache)
assert result is NaT
result = to_datetime(["", ""], cache=cache)
assert isna(result).all()
# ints
result = Timestamp(0)
expected = to_datetime(0, cache=cache)
assert result == expected
# GH 3888 (strings)
expected = to_datetime(["2012"], cache=cache)[0]
result = to_datetime("2012", cache=cache)
assert result == expected
# array = ['2012','20120101','20120101 12:01:01']
array = ["20120101", "20120101 12:01:01"]
expected = list(to_datetime(array, cache=cache))
result = [Timestamp(date_str) for date_str in array]
tm.assert_almost_equal(result, expected)
# currently fails ###
# result = Timestamp('2012')
# expected = to_datetime('2012')
# assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_unprocessable_input(self, cache):
# GH 4928
# GH 21864
result = to_datetime([1, "1"], errors="ignore", cache=cache)
expected = Index(np.array([1, "1"], dtype="O"))
tm.assert_equal(result, expected)
msg = "invalid string coercion to datetime"
with pytest.raises(TypeError, match=msg):
to_datetime([1, "1"], errors="raise", cache=cache)
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view("M8[us]")
as_obj = scalar.astype("O")
index = DatetimeIndex([scalar])
assert index[0] == scalar.astype("O")
value = Timestamp(scalar)
assert value == as_obj
def test_to_datetime_list_of_integers(self):
rng = date_range("1/1/2000", periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
tm.assert_index_equal(rng, result)
def test_to_datetime_overflow(self):
# gh-17637
# we are overflowing Timedelta range here
msg = "|".join(
[
"Python int too large to convert to C long",
"long too big to convert",
"int too big to convert",
]
)
with pytest.raises(OutOfBoundsTimedelta, match=msg):
date_range(start="1/1/1700", freq="B", periods=100000)
@pytest.mark.parametrize("cache", [True, False])
def test_string_na_nat_conversion(self, cache):
# GH #999, #858
strings = np.array(
["1/1/2000", "1/2/2000", np.nan, "1/4/2000, 12:34:56"], dtype=object
)
expected = np.empty(4, dtype="M8[ns]")
for i, val in enumerate(strings):
if isna(val):
expected[i] = iNaT
else:
expected[i] = parse(val)
result = tslib.array_to_datetime(strings)[0]
tm.assert_almost_equal(result, expected)
result2 = to_datetime(strings, cache=cache)
assert isinstance(result2, DatetimeIndex)
tm.assert_numpy_array_equal(result, result2.values)
malformed = np.array(["1/100/2000", np.nan], dtype=object)
# GH 10636, default is now 'raise'
msg = r"Unknown string format:|day is out of range for month"
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
result = to_datetime(malformed, errors="ignore", cache=cache)
# GH 21864
expected = Index(malformed)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
idx = ["a", "b", "c", "d", "e"]
series = Series(
["1/1/2000", np.nan, "1/3/2000", np.nan, "1/5/2000"], index=idx, name="foo"
)
dseries = Series(
[
to_datetime("1/1/2000", cache=cache),
np.nan,
to_datetime("1/3/2000", cache=cache),
np.nan,
to_datetime("1/5/2000", cache=cache),
],
index=idx,
name="foo",
)
result = to_datetime(series, cache=cache)
dresult = to_datetime(dseries, cache=cache)
expected = Series(np.empty(5, dtype="M8[ns]"), index=idx)
for i in range(5):
x = series[i]
if isna(x):
expected[i] = pd.NaT
else:
expected[i] = to_datetime(x, cache=cache)
tm.assert_series_equal(result, expected, check_names=False)
assert result.name == "foo"
tm.assert_series_equal(dresult, expected, check_names=False)
assert dresult.name == "foo"
@pytest.mark.parametrize(
"dtype",
[
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
@pytest.mark.parametrize("cache", [True, False])
def test_dti_constructor_numpy_timeunits(self, cache, dtype):
# GH 9114
base = pd.to_datetime(
["2000-01-01T00:00", "2000-01-02T00:00", "NaT"], cache=cache
)
values = base.values.astype(dtype)
tm.assert_index_equal(DatetimeIndex(values), base)
tm.assert_index_equal(to_datetime(values, cache=cache), base)
@pytest.mark.parametrize("cache", [True, False])
def test_dayfirst(self, cache):
# GH 5917
arr = ["10/02/2014", "11/02/2014", "12/02/2014"]
expected = DatetimeIndex(
[datetime(2014, 2, 10), datetime(2014, 2, 11), datetime(2014, 2, 12)]
)
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True, cache=cache)
idx4 = to_datetime(np.array(arr), dayfirst=True, cache=cache)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
tm.assert_index_equal(expected, idx1)
tm.assert_index_equal(expected, idx2)
tm.assert_index_equal(expected, idx3)
tm.assert_index_equal(expected, idx4)
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
@pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray])
def test_to_datetime_dta_tz(self, klass):
# GH#27733
dti = date_range("2015-04-05", periods=3).rename("foo")
expected = dti.tz_localize("UTC")
obj = klass(dti)
expected = klass(expected)
result = to_datetime(obj, utc=True)
tm.assert_equal(result, expected)
class TestGuessDatetimeFormat:
@td.skip_if_not_us_locale
def test_guess_datetime_format_for_array(self):
expected_format = "%Y-%m-%d %H:%M:%S.%f"
dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)
test_arrays = [
np.array([dt_string, dt_string, dt_string], dtype="O"),
np.array([np.nan, np.nan, dt_string], dtype="O"),
np.array([dt_string, "random_string"], dtype="O"),
]
for test_array in test_arrays:
assert tools._guess_datetime_format_for_array(test_array) == expected_format
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array([np.nan, np.nan, np.nan], dtype="O")
)
assert format_for_string_of_nans is None
class TestToDatetimeInferFormat:
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_consistent_format(self, cache):
s = Series(pd.date_range("20000101", periods=50, freq="H"))
test_formats = ["%m-%d-%Y", "%m/%d/%Y %H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S.%f"]
for test_format in test_formats:
s_as_dt_strings = s.apply(lambda x: x.strftime(test_format))
with_format = pd.to_datetime(
s_as_dt_strings, format=test_format, cache=cache
)
no_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=False, cache=cache
)
yes_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=True, cache=cache
)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
tm.assert_series_equal(with_format, no_infer)
tm.assert_series_equal(no_infer, yes_infer)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache):
s = Series(
np.array(
["01/01/2011 00:00:00", "01-02-2011 00:00:00", "2011-01-03T00:00:00"]
)
)
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
s = Series(np.array(["Jan/01/2011", "Feb/01/2011", "Mar/01/2011"]))
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_with_nans(self, cache):
s = Series(
np.array(["01/01/2011 00:00:00", np.nan, "01/03/2011 00:00:00", np.nan])
)
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):
s = Series(
np.array(
[
np.nan,
np.nan,
"01/01/2011 00:00:00",
"01/02/2011 00:00:00",
"01/03/2011 00:00:00",
]
)
)
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize(
"tz_name, offset", [("UTC", 0), ("UTC-3", 180), ("UTC+3", -180)]
)
def test_infer_datetime_format_tz_name(self, tz_name, offset):
# GH 33133
s = Series([f"2019-02-02 08:07:13 {tz_name}"])
result = to_datetime(s, infer_datetime_format=True)
expected = Series(
[Timestamp("2019-02-02 08:07:13").tz_localize(pytz.FixedOffset(offset))]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601_noleading_0s(self, cache):
# GH 11871
s = Series(["2014-1-1", "2014-2-2", "2015-3-3"])
expected = Series(
[
Timestamp("2014-01-01"),
Timestamp("2014-02-02"),
Timestamp("2015-03-03"),
]
)
tm.assert_series_equal(pd.to_datetime(s, cache=cache), expected)
tm.assert_series_equal(
pd.to_datetime(s, format="%Y-%m-%d", cache=cache), expected
)
class TestDaysInMonth:
# tests for issue #10154
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_coerce(self, cache):
assert isna(to_datetime("2015-02-29", errors="coerce", cache=cache))
assert isna(
to_datetime("2015-02-29", format="%Y-%m-%d", errors="coerce", cache=cache)
)
assert isna(
to_datetime("2015-02-32", format="%Y-%m-%d", errors="coerce", cache=cache)
)
assert isna(
to_datetime("2015-04-31", format="%Y-%m-%d", errors="coerce", cache=cache)
)
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_raise(self, cache):
msg = "day is out of range for month"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-29", errors="raise", cache=cache)
msg = "time data 2015-02-29 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-29", errors="raise", format="%Y-%m-%d", cache=cache)
msg = "time data 2015-02-32 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-32", errors="raise", format="%Y-%m-%d", cache=cache)
msg = "time data 2015-04-31 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-04-31", errors="raise", format="%Y-%m-%d", cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_ignore(self, cache):
assert to_datetime("2015-02-29", errors="ignore", cache=cache) == "2015-02-29"
assert (
to_datetime("2015-02-29", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-02-29"
)
assert (
to_datetime("2015-02-32", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-02-32"
)
assert (
to_datetime("2015-04-31", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-04-31"
)
class TestDatetimeParsingWrappers:
@pytest.mark.parametrize(
"date_str,expected",
list(
{
"2011-01-01": datetime(2011, 1, 1),
"2Q2005": datetime(2005, 4, 1),
"2Q05": datetime(2005, 4, 1),
"2005Q1": datetime(2005, 1, 1),
"05Q1": datetime(2005, 1, 1),
"2011Q3": datetime(2011, 7, 1),
"11Q3": datetime(2011, 7, 1),
"3Q2011": datetime(2011, 7, 1),
"3Q11": datetime(2011, 7, 1),
# quarterly without space
"2000Q4": datetime(2000, 10, 1),
"00Q4": datetime(2000, 10, 1),
"4Q2000": datetime(2000, 10, 1),
"4Q00": datetime(2000, 10, 1),
"2000q4": datetime(2000, 10, 1),
"2000-Q4": datetime(2000, 10, 1),
"00-Q4": datetime(2000, 10, 1),
"4Q-2000": datetime(2000, 10, 1),
"4Q-00": datetime(2000, 10, 1),
"00q4": datetime(2000, 10, 1),
"2005": datetime(2005, 1, 1),
"2005-11": datetime(2005, 11, 1),
"2005 11": datetime(2005, 11, 1),
"11-2005": datetime(2005, 11, 1),
"11 2005": datetime(2005, 11, 1),
"200511": datetime(2020, 5, 11),
"20051109": datetime(2005, 11, 9),
"20051109 10:15": datetime(2005, 11, 9, 10, 15),
"20051109 08H": datetime(2005, 11, 9, 8, 0),
"2005-11-09 10:15": datetime(2005, 11, 9, 10, 15),
"2005-11-09 08H": datetime(2005, 11, 9, 8, 0),
"2005/11/09 10:15": datetime(2005, 11, 9, 10, 15),
"2005/11/09 08H": datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime(2003, 9, 25, 10, 36, 28),
"Thu Sep 25 2003": datetime(2003, 9, 25),
"Sep 25 2003": datetime(2003, 9, 25),
"January 1 2014": datetime(2014, 1, 1),
# GHE10537
"2014-06": datetime(2014, 6, 1),
"06-2014": datetime(2014, 6, 1),
"2014-6": datetime(2014, 6, 1),
"6-2014": datetime(2014, 6, 1),
"20010101 12": datetime(2001, 1, 1, 12),
"20010101 1234": datetime(2001, 1, 1, 12, 34),
"20010101 123456": datetime(2001, 1, 1, 12, 34, 56),
}.items()
),
)
@pytest.mark.parametrize("cache", [True, False])
def test_parsers(self, date_str, expected, cache):
# dateutil >= 2.5.0 defaults to yearfirst=True
# https://github.com/dateutil/dateutil/issues/217
yearfirst = True
result1, _ = parsing.parse_time_string(date_str, yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(
np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache
)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
assert res == expected
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([Timestamp(expected)])
tm.assert_index_equal(res, exp)
# these really need to have yearfirst, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
assert result5 == expected
result7 = date_range(date_str, freq="S", periods=1, yearfirst=yearfirst)
assert result7 == expected
@pytest.mark.parametrize("cache", [True, False])
def test_na_values_with_cache(
self, cache, unique_nulls_fixture, unique_nulls_fixture2
):
# GH22305
expected = Index([NaT, NaT], dtype="datetime64[ns]")
result = to_datetime([unique_nulls_fixture, unique_nulls_fixture2], cache=cache)
tm.assert_index_equal(result, expected)
def test_parsers_nat(self):
# Test that each of several string-accepting methods return pd.NaT
result1, _ = parsing.parse_time_string("NaT")
result2 = to_datetime("NaT")
result3 = Timestamp("NaT")
result4 = DatetimeIndex(["NaT"])[0]
assert result1 is NaT
assert result2 is NaT
assert result3 is NaT
assert result4 is NaT
@pytest.mark.parametrize("cache", [True, False])
def test_parsers_dayfirst_yearfirst(self, cache):
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# str : dayfirst, yearfirst, expected
cases = {
"10-11-12": [
(False, False, datetime(2012, 10, 11)),
(True, False, datetime(2012, 11, 10)),
(False, True, datetime(2010, 11, 12)),
(True, True, datetime(2010, 12, 11)),
],
"20/12/21": [
(False, False, datetime(2021, 12, 20)),
(True, False, datetime(2021, 12, 20)),
(False, True, datetime(2020, 12, 21)),
(True, True, datetime(2020, 12, 21)),
],
}
for date_str, values in cases.items():
for dayfirst, yearfirst, expected in values:
# compare with dateutil result
dateutil_result = parse(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
assert dateutil_result == expected
result1, _ = parsing.parse_time_string(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
assert result2 == expected
result3 = to_datetime(
date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache
)
result4 = DatetimeIndex(
[date_str], dayfirst=dayfirst, yearfirst=yearfirst
)[0]
assert result1 == expected
assert result3 == expected
assert result4 == expected
@pytest.mark.parametrize("cache", [True, False])
def test_parsers_timestring(self, cache):
# must be the same as dateutil result
cases = {
"10:15": (parse("10:15"), datetime(1, 1, 1, 10, 15)),
"9:05": (parse("9:05"), datetime(1, 1, 1, 9, 5)),
}
for date_str, (exp_now, exp_def) in cases.items():
result1, _ = parsing.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
assert result1 == exp_def
assert result2 == exp_now
assert result3 == exp_now
assert result4 == exp_now
assert result5 == exp_now
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"dt_string, tz, dt_string_repr",
[
(
"2013-01-01 05:45+0545",
pytz.FixedOffset(345),
"Timestamp('2013-01-01 05:45:00+0545', tz='pytz.FixedOffset(345)')",
),
(
"2013-01-01 05:30+0530",
pytz.FixedOffset(330),
"Timestamp('2013-01-01 05:30:00+0530', tz='pytz.FixedOffset(330)')",
),
],
)
def test_parsers_timezone_minute_offsets_roundtrip(
self, cache, dt_string, tz, dt_string_repr
):
# GH11708
base = to_datetime("2013-01-01 00:00:00", cache=cache)
base = base.tz_localize("UTC").tz_convert(tz)
dt_time = to_datetime(dt_string, cache=cache)
assert base == dt_time
assert dt_string_repr == repr(dt_time)
@pytest.fixture(params=["D", "s", "ms", "us", "ns"])
def units(request):
"""Day and some time units.
* D
* s
* ms
* us
* ns
"""
return request.param
@pytest.fixture
def epoch_1960():
"""Timestamp at 1960-01-01."""
return Timestamp("1960-01-01")
@pytest.fixture
def units_from_epochs():
return list(range(5))
@pytest.fixture(params=["timestamp", "pydatetime", "datetime64", "str_1960"])
def epochs(epoch_1960, request):
"""Timestamp at 1960-01-01 in various forms.
* Timestamp
* datetime.datetime
* numpy.datetime64
* str
"""
assert request.param in {"timestamp", "pydatetime", "datetime64", "str_1960"}
if request.param == "timestamp":
return epoch_1960
elif request.param == "pydatetime":
return epoch_1960.to_pydatetime()
elif request.param == "datetime64":
return epoch_1960.to_datetime64()
else:
return str(epoch_1960)
@pytest.fixture
def julian_dates():
return pd.date_range("2014-1-1", periods=10).to_julian_date().values
class TestOrigin:
def test_to_basic(self, julian_dates):
# gh-11276, gh-11745
# for origin as julian
result = Series(pd.to_datetime(julian_dates, unit="D", origin="julian"))
expected = Series(
pd.to_datetime(julian_dates - Timestamp(0).to_julian_date(), unit="D")
)
tm.assert_series_equal(result, expected)
result = Series(pd.to_datetime([0, 1, 2], unit="D", origin="unix"))
expected = Series(
[Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")]
)
tm.assert_series_equal(result, expected)
# default
result = Series(pd.to_datetime([0, 1, 2], unit="D"))
expected = Series(
[Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")]
)
tm.assert_series_equal(result, expected)
def test_julian_round_trip(self):
result = pd.to_datetime(2456658, origin="julian", unit="D")
assert result.to_julian_date() == 2456658
# out-of-bounds
msg = "1 is Out of Bounds for origin='julian'"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(1, origin="julian", unit="D")
def test_invalid_unit(self, units, julian_dates):
# checking for invalid combination of origin='julian' and unit != D
if units != "D":
msg = "unit must be 'D' for origin='julian'"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(julian_dates, unit=units, origin="julian")
def test_invalid_origin(self):
# need to have a numeric specified
msg = "it must be numeric with a unit specified"
with pytest.raises(ValueError, match=msg):
pd.to_datetime("2005-01-01", origin="1960-01-01")
with pytest.raises(ValueError, match=msg):
pd.to_datetime("2005-01-01", origin="1960-01-01", unit="D")
def test_epoch(self, units, epochs, epoch_1960, units_from_epochs):
expected = Series(
[pd.Timedelta(x, unit=units) + epoch_1960 for x in units_from_epochs]
)
result = Series(pd.to_datetime(units_from_epochs, unit=units, origin=epochs))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"origin, exc",
[
("random_string", ValueError),
("epoch", ValueError),
("13-24-1990", ValueError),
(datetime(1, 1, 1), tslib.OutOfBoundsDatetime),
],
)
def test_invalid_origins(self, origin, exc, units, units_from_epochs):
msg = f"origin {origin} (is Out of Bounds|cannot be converted to a Timestamp)"
with pytest.raises(exc, match=msg):
pd.to_datetime(units_from_epochs, unit=units, origin=origin)
def test_invalid_origins_tzinfo(self):
# GH16842
with pytest.raises(ValueError, match="must be tz-naive"):
pd.to_datetime(1, unit="D", origin=datetime(2000, 1, 1, tzinfo=pytz.utc))
@pytest.mark.parametrize("format", [None, "%Y-%m-%d %H:%M:%S"])
def test_to_datetime_out_of_bounds_with_format_arg(self, format):
# see gh-23830
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime("2417-10-27 00:00:00", format=format)
def test_processing_order(self):
# make sure we handle out-of-bounds *before*
# constructing the dates
result = pd.to_datetime(200 * 365, unit="D")
expected = Timestamp("2169-11-13 00:00:00")
assert result == expected
result = pd.to_datetime(200 * 365, unit="D", origin="1870-01-01")
expected = Timestamp("2069-11-13 00:00:00")
assert result == expected
result = pd.to_datetime(300 * 365, unit="D", origin="1870-01-01")
expected = Timestamp("2169-10-20 00:00:00")
assert result == expected
@pytest.mark.parametrize(
"offset,utc,exp",
[
["Z", True, "2019-01-01T00:00:00.000Z"],
["Z", None, "2019-01-01T00:00:00.000Z"],
["-01:00", True, "2019-01-01T01:00:00.000Z"],
["-01:00", None, "2019-01-01T00:00:00.000-01:00"],
],
)
def test_arg_tz_ns_unit(self, offset, utc, exp):
# GH 25546
arg = "2019-01-01T00:00:00.000" + offset
result = to_datetime([arg], unit="ns", utc=utc)
expected = to_datetime([exp])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"listlike,do_caching",
[([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True)],
)
def test_should_cache(listlike, do_caching):
assert (
tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7)
== do_caching
)
@pytest.mark.parametrize(
"unique_share,check_count, err_message",
[
(0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"),
(10, 2, r"unique_share must be in next bounds: \(0; 1\)"),
],
)
def test_should_cache_errors(unique_share, check_count, err_message):
arg = [5] * 10
with pytest.raises(AssertionError, match=err_message):
tools.should_cache(arg, unique_share, check_count)
def test_nullable_integer_to_datetime():
# Test for #30050
ser = Series([1, 2, None, 2 ** 61, None])
ser = ser.astype("Int64")
ser_copy = ser.copy()
res = pd.to_datetime(ser, unit="ns")
expected = Series(
[
np.datetime64("1970-01-01 00:00:00.000000001"),
np.datetime64("1970-01-01 00:00:00.000000002"),
np.datetime64("NaT"),
np.datetime64("2043-01-25 23:56:49.213693952"),
np.datetime64("NaT"),
]
)
tm.assert_series_equal(res, expected)
# Check that ser isn't mutated
tm.assert_series_equal(ser, ser_copy)
@pytest.mark.parametrize("klass", [np.array, list])
def test_na_to_datetime(nulls_fixture, klass):
if isinstance(nulls_fixture, Decimal):
with pytest.raises(TypeError, match="not convertible to datetime"):
pd.to_datetime(klass([nulls_fixture]))
else:
result = pd.to_datetime(klass([nulls_fixture]))
assert result[0] is pd.NaT
def test_empty_string_datetime_coerce__format():
# GH13044
td = Series(["03/24/2016", "03/25/2016", ""])
format = "%m/%d/%Y"
# coerce empty string to pd.NaT
result = pd.to_datetime(td, format=format, errors="coerce")
expected = Series(["2016-03-24", "2016-03-25", pd.NaT], dtype="datetime64[ns]")
tm.assert_series_equal(expected, result)
# raise an exception in case a format is given
with pytest.raises(ValueError, match="does not match format"):
result = pd.to_datetime(td, format=format, errors="raise")
# don't raise an expection in case no format is given
result = pd.to_datetime(td, errors="raise")
tm.assert_series_equal(result, expected)
def test_empty_string_datetime_coerce__unit():
# GH13044
# coerce empty string to pd.NaT
result = pd.to_datetime([1, ""], unit="s", errors="coerce")
expected = DatetimeIndex(["1970-01-01 00:00:01", "NaT"], dtype="datetime64[ns]")
tm.assert_index_equal(expected, result)
# verify that no exception is raised even when errors='raise' is set
result = pd.to_datetime([1, ""], unit="s", errors="raise")
tm.assert_index_equal(expected, result)
|
the-stack_0_9384 | from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
|
the-stack_0_9389 | from celery import shared_task
from django.conf import settings
from django.core.mail import send_mail
from django.urls import reverse
@shared_task()
def send_email_task(subject, message,
email_from, recipient_list):
send_mail(subject, message,
email_from, recipient_list, fail_silently=False)
# @shared_task()
# def send_activation_code_async(email_to, code):
# path = reverse('account:activate', args=(code,))
#
# send_mail(
# 'Your activation code',
# f'http://127.0.0.1:8000{path}',
# '[email protected]',
# [email_to],
# fail_silently=False,
# )
@shared_task()
def send_activation_code_sms(email_to, code):
send_mail(
'Your activation code',
code,
from_email=[settings.EMAIL_HOST_USER, ],
recipient_list=email_to,
fail_silently=False,
)
|
the-stack_0_9390 | __all__ = [
"build_train_batch",
"build_valid_batch",
"build_infer_batch",
"train_dl",
"valid_dl",
"infer_dl",
]
from mmdet.core import BitmapMasks
from icevision.core import *
from icevision.imports import *
from icevision.models.utils import *
from icevision.models.mmdet.common.bbox.dataloaders import (
_img_tensor,
_img_meta,
_labels,
_bboxes,
)
def train_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:
return transform_dl(
dataset=dataset,
build_batch=build_train_batch,
batch_tfms=batch_tfms,
**dataloader_kwargs
)
def valid_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:
return transform_dl(
dataset=dataset,
build_batch=build_valid_batch,
batch_tfms=batch_tfms,
**dataloader_kwargs
)
def infer_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:
"""A `DataLoader` with a custom `collate_fn` that batches items as required for inferring the model.
# Arguments
dataset: Possibly a `Dataset` object, but more generally, any `Sequence` that returns records.
batch_tfms: Transforms to be applied at the batch level.
**dataloader_kwargs: Keyword arguments that will be internally passed to a Pytorch `DataLoader`.
The parameter `collate_fn` is already defined internally and cannot be passed here.
# Returns
A Pytorch `DataLoader`.
"""
return transform_dl(
dataset=dataset,
build_batch=build_infer_batch,
batch_tfms=batch_tfms,
**dataloader_kwargs
)
def build_valid_batch(
records: Sequence[RecordType], batch_tfms=None
) -> Tuple[dict, List[Dict[str, torch.Tensor]]]:
return build_train_batch(records=records, batch_tfms=batch_tfms)
def build_train_batch(
records: Sequence[RecordType], batch_tfms=None
) -> Tuple[dict, List[Dict[str, torch.Tensor]]]:
records = common_build_batch(records=records, batch_tfms=batch_tfms)
images, labels, bboxes, masks, img_metas = [], [], [], [], []
for record in records:
images.append(_img_tensor(record))
img_metas.append(_img_meta_mask(record))
labels.append(_labels(record))
bboxes.append(_bboxes(record))
masks.append(_masks(record))
data = {
"img": torch.stack(images),
"img_metas": img_metas,
"gt_labels": labels,
"gt_bboxes": bboxes,
"gt_masks": masks,
}
return data, records
def build_infer_batch(records, batch_tfms=None):
records = common_build_batch(records, batch_tfms=batch_tfms)
imgs, img_metas = [], []
for record in records:
imgs.append(_img_tensor(record))
img_metas.append(_img_meta_mask(record))
data = {
"img": [torch.stack(imgs)],
"img_metas": [img_metas],
}
return data, records
def _img_meta_mask(record):
img_meta = _img_meta(record)
img_meta["ori_shape"] = img_meta["pad_shape"]
return img_meta
def _masks(record):
if len(record["masks"]) == 0:
raise RuntimeError("Negative samples still needs to be implemented")
else:
mask = record["masks"].data
_, h, w = mask.shape
return BitmapMasks(mask, height=h, width=w)
|
the-stack_0_9393 |
__all__ = ['Serializer', 'SerializerError']
from .error import YAMLError
from .events import *
from .nodes import *
class SerializerError(YAMLError):
pass
class Serializer:
ANCHOR_TEMPLATE = 'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
|
the-stack_0_9395 | import os
import unittest
from recipe_scrapers.geniuskitchen import GeniusKitchen
class TestAllRecipesScraper(unittest.TestCase):
def setUp(self):
# tests are run from tests.py
with open(os.path.join(
os.getcwd(),
'recipe_scrapers',
'tests',
'test_data',
'geniuskitchen.testhtml'
)) as file_opened:
self.harvester_class = GeniusKitchen(file_opened, test=True)
def test_host(self):
self.assertEqual(
'geniuskitchen.com',
self.harvester_class.host()
)
def test_title(self):
self.assertEqual(
self.harvester_class.title(),
'Quiche Lorraine Cups'
)
def test_total_time(self):
self.assertEqual(
40,
self.harvester_class.total_time()
)
def test_ingredients(self):
self.assertCountEqual(
[
'12 cooked crepes (, see All Purpose Dinner Crepes Batter)',
'4 slices bacon, cooked crisp &,crumbled',
'1 cup swiss cheese, grated',
'2 tablespoons flour',
'1⁄4 teaspoon salt',
'2 eggs',
'1 cup milk'
],
self.harvester_class.ingredients()
)
def test_instructions(self):
return self.assertEqual(
'Lightly grease a 12 muffin pan or 12 custard cups.\nLine each with a crepe, fluting them.\nSprinkle bacon into the crepes.\nDivide the cheese between the crepes.\nMix together the flour, salt.\nMix the beaten eggs and milk, add to the flour.\nBlend well and pour into the crepes on top of the cheese.\nBake in 350F oven for 15-20 minutes or until firm.\nCool 5 minutes before removing from pan.',
self.harvester_class.instructions()
)
def test_ratings(self):
self.assertEqual(
5.0,
self.harvester_class.ratings()
)
|
the-stack_0_9396 | # Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from lucid.modelzoo.vision_base import Model, _layers_from_list_of_dicts
def _populate_inception_bottlenecks(scope):
"""Add Inception bottlenecks and their pre-Relu versions to the graph."""
graph = tf.get_default_graph()
for op in graph.get_operations():
if op.name.startswith(scope+'/') and 'Concat' in op.type:
name = op.name.split('/')[1]
pre_relus = []
for tower in op.inputs[1:]:
if tower.op.type == 'Relu':
tower = tower.op.inputs[0]
pre_relus.append(tower)
concat_name = scope + '/' + name + '_pre_relu'
_ = tf.concat(pre_relus, -1, name=concat_name)
class InceptionV1(Model):
"""InceptionV1 (or 'GoogLeNet')
This is a (re?)implementation of InceptionV1
https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf
The weights were trained at Google and released in an early TensorFlow
tutorial. It is possible the parameters are the original weights
(trained in TensorFlow's predecessor), but we haven't been able to
confirm this.
As far as we can tell, it is exactly the same as the model described in
the original paper, where as the slim and caffe implementations have
minor implementation differences (such as eliding the heads).
"""
model_path = 'gs://modelzoo/vision/other_models/InceptionV1.pb'
labels_path = 'gs://modelzoo/labels/ImageNet_alternate.txt'
synsets_path = 'gs://modelzoo/labels/ImageNet_alternate_synsets.txt'
dataset = 'ImageNet'
image_shape = [224, 224, 3]
image_value_range = (-117, 255-117)
input_name = 'input'
def post_import(self, scope):
_populate_inception_bottlenecks(scope)
InceptionV1.layers = _layers_from_list_of_dicts(InceptionV1(), [
{'tags': ['conv'], 'name': 'conv2d0', 'depth': 64},
{'tags': ['conv'], 'name': 'conv2d1', 'depth': 64},
{'tags': ['conv'], 'name': 'conv2d2', 'depth': 192},
{'tags': ['conv'], 'name': 'mixed3a', 'depth': 256},
{'tags': ['conv'], 'name': 'mixed3b', 'depth': 480},
{'tags': ['conv'], 'name': 'mixed4a', 'depth': 508},
{'tags': ['conv'], 'name': 'mixed4b', 'depth': 512},
{'tags': ['conv'], 'name': 'mixed4c', 'depth': 512},
{'tags': ['conv'], 'name': 'mixed4d', 'depth': 528},
{'tags': ['conv'], 'name': 'mixed4e', 'depth': 832},
{'tags': ['conv'], 'name': 'mixed5a', 'depth': 832},
{'tags': ['conv'], 'name': 'mixed5b', 'depth': 1024},
{'tags': ['conv'], 'name': 'head0_bottleneck', 'depth': 128},
{'tags': ['dense'], 'name': 'nn0', 'depth': 1024},
{'tags': ['dense'], 'name': 'softmax0', 'depth': 1008},
{'tags': ['conv'], 'name': 'head1_bottleneck', 'depth': 128},
{'tags': ['dense'], 'name': 'nn1', 'depth': 1024},
{'tags': ['dense'], 'name': 'softmax1', 'depth': 1008},
{'tags': ['dense'], 'name': 'softmax2', 'depth': 1008},
])
class InceptionV1_adv_finetuned(InceptionV1):
"""adversarially fine-tuned InceptionV1
This model is based on InceptionV1 and has been fine-tuned with
PGD-generated adversarial examples (https://arxiv.org/pdf/1706.06083.pdf).
The PGD-attack was L2-bounded with an epsilon of 255 (1.0 for normalized images).
After fine-tuning, this model achieves a robust top-5 accuracy of ~67%
for eps. 255 L2-bounded adversarial examples compared to ~4% before fine-tuning.
"""
model_path = 'gs://modelzoo/vision/other_models/InceptionV1_adv_finetuned.pb'
|
the-stack_0_9398 | from django.contrib.auth.models import User, Group
from django.conf import settings
from django.db import models
from django.db import connection
from django.db.models.signals import post_save
from pbs.prescription.models import Region, District
from smart_selects.db_fields import ChainedForeignKey
import logging
logger = logging.getLogger("log." + __name__)
class Profile(models.Model):
DEFAULT_GROUP = "Users"
user = models.OneToOneField(User)
region = models.ForeignKey(Region, blank=True, null=True, on_delete=models.PROTECT)
district = ChainedForeignKey(District,
chained_field="region", chained_model_field="region",
show_all=False, auto_choose=True, blank=True, null=True,
on_delete=models.PROTECT)
def is_fpc_user(self):
return self.user.email.lower().endswith(settings.FPC_EMAIL_EXT)
def user_post_save(sender, instance, created, **kwargs):
"""Create a user profile when a new user account is created"""
if (created and
Profile._meta.db_table in connection.introspection.table_names()):
p = Profile()
p.user = instance
p.save()
# add the default user group (fail_silently=True)
try:
group = Group.objects.get(name__iexact=p.DEFAULT_GROUP)
except Group.DoesNotExist:
logger.warning("Failed to assign group `%s' to user `%s', "
"group `%s' does not exist.", p.DEFAULT_GROUP,
p.user.username, p.DEFAULT_GROUP)
else:
p.user.groups.add(group)
post_save.connect(user_post_save, sender=User)
def prescription_modified(sender, instance, created, **kwargs):
if hasattr(instance, 'prescription'):
prescription = instance.prescription
if prescription is not None:
prescription.save() # update the modified and modifier fields
post_save.connect(prescription_modified)
|
the-stack_0_9401 |
# coding: utf-8
def write_info(amr):
#import fortranformat as ff
#nout = amr.nout
aexp = amr.aexp
h0 = amr.h0 * 1e-2
rhoc = 1.88e-29
boxlen = 1.0
f = open("info_" + str(nout).zfill(5) + ".txt", 'w')
for name, val in zip(["ncpu", "ndim", "levelmin", "levelmax", "ngridmax", "nstep_coarse"],
[amr.ncpu, amr.ndim, levelmin, amr.nlevelmax, amr.ngridmax, amr.nstep_coarse]):
f.write("{:<12s}={:11d} \n".format(name, val))
f.write("\n")
#lineformat = ff.FortranRecordWriter('(1E23.15)')
scale_d = amr.Om * rhoc * h0**2 / aexp**3
scale_t = aexp**2 / (h0*1e5/3.08e24)
scale_l = aexp* amr.boxlen * 3.08e24/(h0)
for name, val in zip(["boxlen", "time", "aexp", "H0", "omega_m", "omega_l", "omega_k", "omega_b",
"unit_l", "unit_d", "unit_t"],
[boxlen, amr.t, aexp, h0, amr.Om, amr.Ol, amr.Ok, amr.Ob, scale_l, scale_d, scale_t]):
f.write("{:<12s}={:.15E} \n".format(name,val))
f.write("\n")
f.write("ordering type=" + ah.ordering[0].decode("UTF-8"))
f.write("\n DOMAIN ind_min ind_max \n")
for i in range(amr.ncpu):
f.write("{:8d} {:.15E} {:.15E}\n".format(i+1, amr.bound_key[i],amr.bound_key[i+1]))
f.close()
"""
This can generate 'header' of info.
But it is not trivial to read 128-bit floating point (QUADHILBERT) numbers from binary bits in Python.
Instead, I used a fortran program to read amr.00001 and output hilbert keys in the info format.
"""
wdir = "./"
from pyram import load
nouts = range(113, 120)
for nout in nouts:
ah = load.sim.AmrHeader()
snout = str(nout).zfill(5)
ah._read_amr_header(open(wdir + "output_"+snout+"/amr_"+snout+".out00001", 'rb'), skip_header=False)
levelmin = 8 # From other info file
write_info(ah)
|
the-stack_0_9403 | from scipy.stats import genpareto, norm
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
def flip(edge, np_random):
return 1 if np_random.uniform() < edge else -1
class KellyCoinflipEnv(gym.Env):
"""The Kelly coinflip game is a simple gambling introduced by Haghani & Dewey 2016's
'Rational Decision-Making Under Uncertainty: Observed Betting Patterns on a Biased
Coin' (https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2856963), to test human
decision-making in a setting like that of the stock market: positive expected value
but highly stochastic; they found many subjects performed badly, often going broke,
even though optimal play would reach the maximum with ~95% probability. In the
coinflip game, the player starts with $25.00 to gamble over 300 rounds; each round,
they can bet anywhere up to their net worth (in penny increments), and then a coin is
flipped; with P=0.6, the player wins twice what they bet, otherwise, they lose it.
$250 is the maximum players are allowed to have. At the end of the 300 rounds, they
keep whatever they have. The human subjects earned an average of $91; a simple use of
the Kelly criterion (https://en.wikipedia.org/wiki/Kelly_criterion), giving a
strategy of betting 20% until the cap is hit, would earn $240; a decision tree
analysis shows that optimal play earns $246 (https://www.gwern.net/Coin-flip).
The game short-circuits when either wealth = $0 (since one can never recover) or
wealth = cap (trivial optimal play: one simply bets nothing thereafter).
In this implementation, we default to the paper settings of $25, 60% odds, wealth cap
of $250, and 300 rounds. To specify the action space in advance, we multiply the
wealth cap (in dollars) by 100 (to allow for all penny bets); should one attempt to
bet more money than one has, it is rounded down to one's net worth. (Alternately, a
mistaken bet could end the episode immediately; it's not clear to me which version
would be better.) For a harder version which randomizes the 3 key parameters, see the
Generalized Kelly coinflip game."""
metadata = {"render.modes": ["human"]}
def __init__(self, initial_wealth=25.0, edge=0.6, max_wealth=250.0, max_rounds=300):
self.action_space = spaces.Discrete(int(max_wealth * 100)) # betting in penny
# increments
self.observation_space = spaces.Tuple(
(
spaces.Box(0, max_wealth, [1], dtype=np.float32), # (w,b)
spaces.Discrete(max_rounds + 1),
)
)
self.reward_range = (0, max_wealth)
self.edge = edge
self.wealth = initial_wealth
self.initial_wealth = initial_wealth
self.max_rounds = max_rounds
self.max_wealth = max_wealth
self.np_random = None
self.rounds = None
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
bet_in_dollars = min(
action / 100.0, self.wealth
) # action = desired bet in pennies
self.rounds -= 1
coinflip = flip(self.edge, self.np_random)
self.wealth = min(self.max_wealth, self.wealth + coinflip * bet_in_dollars)
done = self.wealth < 0.01 or self.wealth == self.max_wealth or not self.rounds
reward = self.wealth if done else 0.0
return self._get_obs(), reward, done, {}
def _get_obs(self):
return np.array([self.wealth]), self.rounds
def reset(self):
self.rounds = self.max_rounds
self.wealth = self.initial_wealth
return self._get_obs()
def render(self, mode="human"):
print("Current wealth: ", self.wealth, "; Rounds left: ", self.rounds)
class KellyCoinflipGeneralizedEnv(gym.Env):
"""The Generalized Kelly coinflip game is an extension by ArthurB & Gwern Branwen
which expands the Kelly coinflip game MDP into a POMDP, where the 3 key parameters
(edge, maximum wealth, and number of rounds) are unknown random variables drawn
from 3 distributions: a Beta(7,3) for the coinflip edge 0-1, a N(300,25) the total
number of rounds, and a Pareto(5,200) for the wealth cap. These distributions are
chosen to be conjugate & easily updatable, to allow for inference (other choices
like the geometric for number of rounds wouldn't make observations informative),
and to loosely reflect what a human might expect in the original Kelly coinflip
game given that the number of rounds wasn't strictly fixed and they weren't told
the wealth cap until they neared it. With these particular distributions, the
entire history of the game can be summarized into a few sufficient statistics of
rounds-elapsed/wins/losses/max-wealth-ever-reached, from which the Bayes-optimal
decision can (in theory) be made; to avoid all agents having to tediously track
those sufficient statistics manually in the same way, the observation space is
augmented from wealth/rounds-left (rounds-left is deleted because it is a hidden
variable) to current-wealth/rounds-elapsed/wins/losses/maximum-observed-wealth.
The simple Kelly coinflip game can easily be solved by calculating decision trees,
but the Generalized Kelly coinflip game may be intractable (although the analysis
for the edge case alone suggests that the Bayes-optimal value may be very close to
what one would calculate using a decision tree for any specific case), and
represents a good challenge for RL agents."""
metadata = {"render.modes": ["human"]}
def __init__(
self,
initial_wealth=25.0,
edge_prior_alpha=7,
edge_prior_beta=3,
max_wealth_alpha=5.0,
max_wealth_m=200.0,
max_rounds_mean=300.0,
max_rounds_sd=25.0,
reseed=True,
clip_distributions=False,
):
# clip_distributions=True asserts that state and action space are not modified at reset()
# store the hyper-parameters for passing back into __init__() during resets so
# the same hyper-parameters govern the next game's parameters, as the user
# expects:
# TODO: this is boilerplate, is there any more elegant way to do this?
self.initial_wealth = float(initial_wealth)
self.edge_prior_alpha = edge_prior_alpha
self.edge_prior_beta = edge_prior_beta
self.max_wealth_alpha = max_wealth_alpha
self.max_wealth_m = max_wealth_m
self.max_rounds_mean = max_rounds_mean
self.max_rounds_sd = max_rounds_sd
self.clip_distributions = clip_distributions
if reseed or not hasattr(self, "np_random"):
self.seed()
# draw this game's set of parameters:
edge = self.np_random.beta(edge_prior_alpha, edge_prior_beta)
if self.clip_distributions:
# (clip/resample some parameters to be able to fix obs/action space sizes/bounds)
max_wealth_bound = round(
genpareto.ppf(0.85, max_wealth_alpha, max_wealth_m)
)
max_wealth = max_wealth_bound + 1.0
while max_wealth > max_wealth_bound:
max_wealth = round(
genpareto.rvs(
max_wealth_alpha, max_wealth_m, random_state=self.np_random
)
)
max_rounds_bound = int(
round(norm.ppf(0.99, max_rounds_mean, max_rounds_sd))
)
max_rounds = max_rounds_bound + 1
while max_rounds > max_rounds_bound:
max_rounds = int(
round(self.np_random.normal(max_rounds_mean, max_rounds_sd))
)
else:
max_wealth = round(
genpareto.rvs(
max_wealth_alpha, max_wealth_m, random_state=self.np_random
)
)
max_wealth_bound = max_wealth
max_rounds = int(
round(self.np_random.normal(max_rounds_mean, max_rounds_sd))
)
max_rounds_bound = max_rounds
# add an additional global variable which is the sufficient statistic for the
# Pareto distribution on wealth cap; alpha doesn't update, but x_m does, and
# simply is the highest wealth count we've seen to date:
self.max_ever_wealth = float(self.initial_wealth)
# for the coinflip edge, it is total wins/losses:
self.wins = 0
self.losses = 0
# for the number of rounds, we need to remember how many rounds we've played:
self.rounds_elapsed = 0
# the rest proceeds as before:
self.action_space = spaces.Discrete(int(max_wealth_bound * 100))
self.observation_space = spaces.Tuple(
(
spaces.Box(
0, max_wealth_bound, shape=[1], dtype=np.float32
), # current wealth
spaces.Discrete(max_rounds_bound + 1), # rounds elapsed
spaces.Discrete(max_rounds_bound + 1), # wins
spaces.Discrete(max_rounds_bound + 1), # losses
spaces.Box(0, max_wealth_bound, [1], dtype=np.float32),
)
) # maximum observed wealth
self.reward_range = (0, max_wealth)
self.edge = edge
self.wealth = self.initial_wealth
self.max_rounds = max_rounds
self.rounds = self.max_rounds
self.max_wealth = max_wealth
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
bet_in_dollars = min(action / 100.0, self.wealth)
self.rounds -= 1
coinflip = flip(self.edge, self.np_random)
self.wealth = min(self.max_wealth, self.wealth + coinflip * bet_in_dollars)
self.rounds_elapsed += 1
if coinflip:
self.max_ever_wealth = max(self.wealth, self.max_ever_wealth)
self.wins += 1
else:
self.losses += 1
done = self.wealth < 0.01 or self.wealth == self.max_wealth or not self.rounds
reward = self.wealth if done else 0.0
return self._get_obs(), reward, done, {}
def _get_obs(self):
return (
np.array([float(self.wealth)]),
self.rounds_elapsed,
self.wins,
self.losses,
np.array([float(self.max_ever_wealth)]),
)
def reset(self):
# re-init everything to draw new parameters etc, but preserve the RNG for
# reproducibility and pass in the same hyper-parameters as originally specified:
self.__init__(
initial_wealth=self.initial_wealth,
edge_prior_alpha=self.edge_prior_alpha,
edge_prior_beta=self.edge_prior_beta,
max_wealth_alpha=self.max_wealth_alpha,
max_wealth_m=self.max_wealth_m,
max_rounds_mean=self.max_rounds_mean,
max_rounds_sd=self.max_rounds_sd,
reseed=False,
clip_distributions=self.clip_distributions,
)
return self._get_obs()
def render(self, mode="human"):
print(
"Current wealth: ",
self.wealth,
"; Rounds left: ",
self.rounds,
"; True edge: ",
self.edge,
"; True max wealth: ",
self.max_wealth,
"; True stopping time: ",
self.max_rounds,
"; Rounds left: ",
self.max_rounds - self.rounds_elapsed,
)
|
the-stack_0_9406 | #!/usr/bin/env python
import cv2
import datautils.structures.mp
import montage
from .... import log
from .. import utils
logger = log.get_logger(__name__)
#logger.addHandler(logging.StreamHandler())
#logger.setLevel(logging.DEBUG)
class NormSerf(datautils.structures.mp.TimedSerf):
def setup(self, config, grab_buffers, norm_buffers, bg_buffer):
logger.debug(
"NormSerf[%s] setup: %s, %s, %s, %s",
self, config, grab_buffers, norm_buffers, bg_buffer)
self.config = config
self.image_size = config['crop'][:2]
self.grab_buffers = grab_buffers
self.norm_buffers = norm_buffers
self.bg_buffer = bg_buffer
self.setup_buffers()
if 'log_serfs' in config:
utils.log_serf_to_directory(self, config['log_serfs'])
def set_config(self, config):
logger.debug("NormSerf[%s] set_config: %s", self, config)
self.config = config
def setup_buffers(self):
logger.debug("NormSerf[%s] setup_buffers", self)
h, w, s = self.config['crop']
self.grabs = [
montage.io.Image(utils.buffer_as_array(b, 'u2', (h, w, s)))
for b in self.grab_buffers]
self.norms = [
utils.buffer_as_array(b, 'f4', (h, w)) for b in self.norm_buffers]
self.bg = montage.io.Image(
utils.buffer_as_array(self.bg_buffer, 'f4', (h, w)))
def normalize_grab(self, buffer_index):
logger.debug("NormSerf[%s] normalize_grab: %s", self, buffer_index)
# tests on camera node show cv2 is faster (7 ms vs 12 ms)
cv2.multiply(
self.grabs[buffer_index], self.bg,
self.norms[buffer_index], dtype=cv2.CV_32F)
#self.norms[buffer_index][:, :] = self.grabs[buffer_index] * self.bg
self.send('norm', buffer_index)
class NormLord(datautils.structures.mp.Lord):
def __init__(self, config, buffers):
logger.debug(
"NormLord[%s] __init__: %s, %s", self, config, buffers)
datautils.structures.mp.Lord.__init__(self)
self.config = config
self.buffers = buffers
def start(self, wait=True):
logger.debug("NormLord[%s] start", self)
datautils.structures.mp.Lord.start(
self, NormSerf, (
self.config, self.buffers.grab_buffers,
self.buffers.norm_buffers, self.buffers.bg_buffer), wait=wait)
def set_config(self, config):
logger.debug("NormLord[%s] set_config: %s", self, config)
self.send('set_config', config)
def normalize_grab(self, index):
logger.debug("NormLord[%s] normalize_grab: %s", self, index)
self.buffers.lock_grab(index)
self.send('normalize_grab', index)
def norm(self, index):
logger.debug("NormLord[%s] norm: %s", self, index)
self.buffers.unlock_grab(index)
|
the-stack_0_9407 | #!/usr/bin/env python
from operator import itemgetter
import sys
current_year = 0
max_temp = 0
temp = 0
# input comes from STDIN
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# parse the input we got from mapper.py
year, temp = line.split('\t', 1)
# convert count (currently a string) to int
try:
year = int(year)
temp = int(temp)
except ValueError:
# count was not a number, so silently
# ignore/discard this line
continue
# this IF-switch only works because Hadoop sorts map output
# by key (here: word) before it is passed to the reducer
if current_year == year:
max_temp = max(max_temp, temp)
else:
if current_year != 0:
# write result to STDOUT
print('%d\t%d' % (current_year, max_temp))
max_temp = temp
current_year = year
# do not forget to output the last word if needed!
if current_year == year:
print('%d\t%d' % (current_year, max_temp)) |
the-stack_0_9408 | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains some Mixin classes for the db objects.
A bunch of functions on the db objects are really more like
"utility functions": They could live outside the classes
and be called "by hand" passing the appropiate reference.
They usually only use the public API of the object and
rarely use database related stuff.
These functions now live here and get "mixed in" into the
real objects.
"""
import uuid
import re
from datetime import datetime
from pytz import UTC
from werkzeug.utils import cached_property
from mediagoblin.media_types import FileTypeNotSupported
from mediagoblin.tools import common, licenses
from mediagoblin.tools.pluginapi import hook_handle
from mediagoblin.tools.text import cleaned_markdown_conversion
from mediagoblin.tools.url import slugify
from mediagoblin.tools.translate import pass_to_ugettext as _
class CommentingMixin:
"""
Mixin that gives classes methods to get and add the comments on/to it
This assumes the model has a "comments" class which is a ForeignKey to the
Collection model. This will hold a Collection of comments which are
associated to this model. It also assumes the model has an "actor"
ForeignKey which points to the creator/publisher/etc. of the model.
NB: This is NOT the mixin for the Comment Model, this is for
other models which support commenting.
"""
def get_comment_link(self):
# Import here to avoid cyclic imports
from mediagoblin.db.models import Comment, GenericModelReference
gmr = GenericModelReference.query.filter_by(
obj_pk=self.id,
model_type=self.__tablename__
).first()
if gmr is None:
return None
link = Comment.query.filter_by(comment_id=gmr.id).first()
return link
def get_reply_to(self):
link = self.get_comment_link()
if link is None or link.target_id is None:
return None
return link.target()
def soft_delete(self, *args, **kwargs):
link = self.get_comment_link()
if link is not None:
link.delete()
super().soft_delete(*args, **kwargs)
class GeneratePublicIDMixin:
"""
Mixin that ensures that a the public_id field is populated.
The public_id is the ID that is used in the API, this must be globally
unique and dereferencable. This will be the URL for the API view of the
object. It's used in several places, not only is it used to give out via
the API but it's also vital information stored when a soft_deletion occurs
on the `Graveyard.public_id` field, this is needed to follow the spec which
says we have to be able to provide a shell of an object and return a 410
(rather than a 404) when a deleted object has been deleted.
This requires a the urlgen off the request object (`request.urlgen`) to be
provided as it's the ID is a URL.
"""
def get_public_id(self, urlgen):
# Verify that the class this is on actually has a public_id field...
if "public_id" not in self.__table__.columns.keys():
raise Exception("Model has no public_id field")
# Great! the model has a public id, if it's None, let's create one!
if self.public_id is None:
# We need the internal ID for this so ensure we've been saved.
self.save(commit=False)
# Create the URL
self.public_id = urlgen(
"mediagoblin.api.object",
object_type=self.object_type,
id=str(uuid.uuid4()),
qualified=True
)
self.save()
return self.public_id
class UserMixin:
object_type = "person"
@property
def bio_html(self):
return cleaned_markdown_conversion(self.bio)
def url_for_self(self, urlgen, **kwargs):
"""Generate a URL for this User's home page."""
return urlgen('mediagoblin.user_pages.user_home',
user=self.username, **kwargs)
class GenerateSlugMixin:
"""
Mixin to add a generate_slug method to objects.
Depends on:
- self.slug
- self.title
- self.check_slug_used(new_slug)
"""
def generate_slug(self):
"""
Generate a unique slug for this object.
This one does not *force* slugs, but usually it will probably result
in a niceish one.
The end *result* of the algorithm will result in these resolutions for
these situations:
- If we have a slug, make sure it's clean and sanitized, and if it's
unique, we'll use that.
- If we have a title, slugify it, and if it's unique, we'll use that.
- If we can't get any sort of thing that looks like it'll be a useful
slug out of a title or an existing slug, bail, and don't set the
slug at all. Don't try to create something just because. Make
sure we have a reasonable basis for a slug first.
- If we have a reasonable basis for a slug (either based on existing
slug or slugified title) but it's not unique, first try appending
the entry's id, if that exists
- If that doesn't result in something unique, tack on some randomly
generated bits until it's unique. That'll be a little bit of junk,
but at least it has the basis of a nice slug.
"""
#Is already a slug assigned? Check if it is valid
if self.slug:
slug = slugify(self.slug)
# otherwise, try to use the title.
elif self.title:
# assign slug based on title
slug = slugify(self.title)
else:
# We don't have any information to set a slug
return
# We don't want any empty string slugs
if slug == "":
return
# Otherwise, let's see if this is unique.
if self.check_slug_used(slug):
# It looks like it's being used... lame.
# Can we just append the object's id to the end?
if self.id:
slug_with_id = "{}-{}".format(slug, self.id)
if not self.check_slug_used(slug_with_id):
self.slug = slug_with_id
return # success!
# okay, still no success;
# let's whack junk on there till it's unique.
slug += '-' + uuid.uuid4().hex[:4]
# keep going if necessary!
while self.check_slug_used(slug):
slug += uuid.uuid4().hex[:4]
# self.check_slug_used(slug) must be False now so we have a slug that
# we can use now.
self.slug = slug
class MediaEntryMixin(GenerateSlugMixin, GeneratePublicIDMixin):
def check_slug_used(self, slug):
# import this here due to a cyclic import issue
# (db.models -> db.mixin -> db.util -> db.models)
from mediagoblin.db.util import check_media_slug_used
return check_media_slug_used(self.actor, slug, self.id)
@property
def object_type(self):
""" Converts media_type to pump-like type - don't use internally """
return self.media_type.split(".")[-1]
@property
def description_html(self):
"""
Rendered version of the description, run through
Markdown and cleaned with our cleaning tool.
"""
return cleaned_markdown_conversion(self.description)
def get_display_media(self):
"""Find the best media for display.
We try checking self.media_manager.fetching_order if it exists to
pull down the order.
Returns:
(media_size, media_path)
or, if not found, None.
"""
fetch_order = self.media_manager.media_fetch_order
# No fetching order found? well, give up!
if not fetch_order:
return None
media_sizes = self.media_files.keys()
for media_size in fetch_order:
if media_size in media_sizes:
return media_size, self.media_files[media_size]
def get_all_media(self):
"""
Returns all available qualties of a media (except original)
"""
fetch_order = self.media_manager.media_fetch_order
# No fetching order found? well, give up!
if not fetch_order:
return None
media_sizes = self.media_files.keys()
all_media_path = []
for media_size in fetch_order:
if media_size in media_sizes and media_size != 'original':
file_metadata = self.get_file_metadata(media_size)
size = file_metadata['medium_size']
if media_size != 'webm_video':
all_media_path.append((media_size[5:], size,
self.media_files[media_size]))
else:
all_media_path.append(('default', size,
self.media_files[media_size]))
return all_media_path
def main_mediafile(self):
pass
@property
def slug_or_id(self):
if self.slug:
return self.slug
else:
return 'id:%s' % self.id
def url_for_self(self, urlgen, **extra_args):
"""
Generate an appropriate url for ourselves
Use a slug if we have one, else use our 'id'.
"""
uploader = self.get_actor
return urlgen(
'mediagoblin.user_pages.media_home',
user=uploader.username,
media=self.slug_or_id,
**extra_args)
@property
def thumb_url(self):
"""Return the thumbnail URL (for usage in templates)
Will return either the real thumbnail or a default fallback icon."""
# TODO: implement generic fallback in case MEDIA_MANAGER does
# not specify one?
if 'thumb' in self.media_files:
thumb_url = self._app.public_store.file_url(
self.media_files['thumb'])
else:
# No thumbnail in media available. Get the media's
# MEDIA_MANAGER for the fallback icon and return static URL
# Raises FileTypeNotSupported in case no such manager is enabled
manager = self.media_manager
thumb_url = self._app.staticdirector(manager['default_thumb'])
return thumb_url
@property
def original_url(self):
""" Returns the URL for the original image
will return self.thumb_url if original url doesn't exist"""
if "original" not in self.media_files:
return self.thumb_url
return self._app.public_store.file_url(
self.media_files["original"]
)
@property
def icon_url(self):
'''Return the icon URL (for usage in templates) if it exists'''
try:
return self._app.staticdirector(
self.media_manager['type_icon'])
except AttributeError:
return None
@cached_property
def media_manager(self):
"""Returns the MEDIA_MANAGER of the media's media_type
Raises FileTypeNotSupported in case no such manager is enabled
"""
manager = hook_handle(('media_manager', self.media_type))
if manager:
return manager(self)
# Not found? Then raise an error
raise FileTypeNotSupported(
"MediaManager not in enabled types. Check media_type plugins are"
" enabled in config?")
def get_fail_exception(self):
"""
Get the exception that's appropriate for this error
"""
if self.fail_error:
try:
return common.import_component(self.fail_error)
except ImportError:
# TODO(breton): fail_error should give some hint about why it
# failed. fail_error is used as a path to import().
# Unfortunately, I didn't know about that and put general error
# message there. Maybe it's for the best, because for admin,
# we could show even some raw python things. Anyway, this
# should be properly resolved. Now we are in a freeze, that's
# why I simply catch ImportError.
return None
def get_license_data(self):
"""Return license dict for requested license"""
return licenses.get_license_by_url(self.license or "")
def exif_display_iter(self):
if not self.media_data:
return
exif_all = self.media_data.get("exif_all")
for key in exif_all:
label = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', key)
yield label.replace('EXIF', '').replace('Image', ''), exif_all[key]
def exif_display_data_short(self):
"""Display a very short practical version of exif info"""
if not self.media_data:
return
exif_all = self.media_data.get("exif_all")
exif_short = {}
if 'Image DateTimeOriginal' in exif_all:
# format date taken
takendate = datetime.strptime(
exif_all['Image DateTimeOriginal']['printable'],
'%Y:%m:%d %H:%M:%S').date()
taken = takendate.strftime('%B %d %Y')
exif_short.update({'Date Taken': taken})
aperture = None
if 'EXIF FNumber' in exif_all:
fnum = str(exif_all['EXIF FNumber']['printable']).split('/')
# calculate aperture
if len(fnum) == 2:
aperture = "f/%.1f" % (float(fnum[0])/float(fnum[1]))
elif fnum[0] != 'None':
aperture = "f/%s" % (fnum[0])
if aperture:
exif_short.update({'Aperture': aperture})
short_keys = [
('Camera', 'Image Model', None),
('Exposure', 'EXIF ExposureTime', lambda x: '%s sec' % x),
('ISO Speed', 'EXIF ISOSpeedRatings', None),
('Focal Length', 'EXIF FocalLength', lambda x: '%s mm' % x)]
for label, key, fmt_func in short_keys:
try:
val = fmt_func(exif_all[key]['printable']) if fmt_func \
else exif_all[key]['printable']
exif_short.update({label: val})
except KeyError:
pass
return exif_short
class TextCommentMixin(GeneratePublicIDMixin):
object_type = "comment"
@property
def content_html(self):
"""
the actual html-rendered version of the comment displayed.
Run through Markdown and the HTML cleaner.
"""
return cleaned_markdown_conversion(self.content)
def __unicode__(self):
return '<{klass} #{id} {actor} "{comment}">'.format(
klass=self.__class__.__name__,
id=self.id,
actor=self.get_actor,
comment=self.content)
def __repr__(self):
return '<{klass} #{id} {actor} "{comment}">'.format(
klass=self.__class__.__name__,
id=self.id,
actor=self.get_actor,
comment=self.content)
class CollectionMixin(GenerateSlugMixin, GeneratePublicIDMixin):
object_type = "collection"
def check_slug_used(self, slug):
# import this here due to a cyclic import issue
# (db.models -> db.mixin -> db.util -> db.models)
from mediagoblin.db.util import check_collection_slug_used
return check_collection_slug_used(self.actor, slug, self.id)
@property
def description_html(self):
"""
Rendered version of the description, run through
Markdown and cleaned with our cleaning tool.
"""
return cleaned_markdown_conversion(self.description)
@property
def slug_or_id(self):
return (self.slug or self.id)
def url_for_self(self, urlgen, **extra_args):
"""
Generate an appropriate url for ourselves
Use a slug if we have one, else use our 'id'.
"""
creator = self.get_actor
return urlgen(
'mediagoblin.user_pages.user_collection',
user=creator.username,
collection=self.slug_or_id,
**extra_args)
def add_to_collection(self, obj, content=None, commit=True):
""" Adds an object to the collection """
# It's here to prevent cyclic imports
from mediagoblin.db.models import CollectionItem
# Need the ID of this collection for this so check we've got one.
self.save(commit=False)
# Create the CollectionItem
item = CollectionItem()
item.collection = self.id
item.get_object = obj
if content is not None:
item.note = content
self.num_items = self.num_items + 1
# Save both!
self.save(commit=commit)
item.save(commit=commit)
return item
class CollectionItemMixin:
@property
def note_html(self):
"""
the actual html-rendered version of the note displayed.
Run through Markdown and the HTML cleaner.
"""
return cleaned_markdown_conversion(self.note)
class ActivityMixin(GeneratePublicIDMixin):
object_type = "activity"
VALID_VERBS = ["add", "author", "create", "delete", "dislike", "favorite",
"follow", "like", "post", "share", "unfavorite", "unfollow",
"unlike", "unshare", "update", "tag"]
def get_url(self, request):
return request.urlgen(
"mediagoblin.user_pages.activity_view",
username=self.get_actor.username,
id=self.id,
qualified=True
)
def generate_content(self):
""" Produces a HTML content for object """
# some of these have simple and targetted. If self.target it set
# it will pick the targetted. If they DON'T have a targetted version
# the information in targetted won't be added to the content.
verb_to_content = {
"add": {
"simple" : _("{username} added {object}"),
"targetted": _("{username} added {object} to {target}"),
},
"author": {"simple": _("{username} authored {object}")},
"create": {"simple": _("{username} created {object}")},
"delete": {"simple": _("{username} deleted {object}")},
"dislike": {"simple": _("{username} disliked {object}")},
"favorite": {"simple": _("{username} favorited {object}")},
"follow": {"simple": _("{username} followed {object}")},
"like": {"simple": _("{username} liked {object}")},
"post": {
"simple": _("{username} posted {object}"),
"targetted": _("{username} posted {object} to {target}"),
},
"share": {"simple": _("{username} shared {object}")},
"unfavorite": {"simple": _("{username} unfavorited {object}")},
"unfollow": {"simple": _("{username} stopped following {object}")},
"unlike": {"simple": _("{username} unliked {object}")},
"unshare": {"simple": _("{username} unshared {object}")},
"update": {"simple": _("{username} updated {object}")},
"tag": {"simple": _("{username} tagged {object}")},
}
object_map = {
"image": _("an image"),
"comment": _("a comment"),
"collection": _("a collection"),
"video": _("a video"),
"audio": _("audio"),
"person": _("a person"),
}
obj = self.object()
target = None if self.target_id is None else self.target()
actor = self.get_actor
content = verb_to_content.get(self.verb, None)
if content is None or self.object is None:
return
# Decide what to fill the object with
if hasattr(obj, "title") and obj.title.strip(" "):
object_value = obj.title
elif obj.object_type in object_map:
object_value = object_map[obj.object_type]
else:
object_value = _("an object")
# Do we want to add a target (indirect object) to content?
if target is not None and "targetted" in content:
if hasattr(target, "title") and target.title.strip(" "):
target_value = target.title
elif target.object_type in object_map:
target_value = object_map[target.object_type]
else:
target_value = _("an object")
self.content = content["targetted"].format(
username=actor.username,
object=object_value,
target=target_value
)
else:
self.content = content["simple"].format(
username=actor.username,
object=object_value
)
return self.content
def serialize(self, request):
href = request.urlgen(
"mediagoblin.api.object",
object_type=self.object_type,
id=self.id,
qualified=True
)
published = UTC.localize(self.published)
updated = UTC.localize(self.updated)
obj = {
"id": href,
"actor": self.get_actor.serialize(request),
"verb": self.verb,
"published": published.isoformat(),
"updated": updated.isoformat(),
"content": self.content,
"url": self.get_url(request),
"object": self.object().serialize(request),
"objectType": self.object_type,
"links": {
"self": {
"href": href,
},
},
}
if self.generator:
obj["generator"] = self.get_generator.serialize(request)
if self.title:
obj["title"] = self.title
if self.target_id is not None:
obj["target"] = self.target().serialize(request)
return obj
def unseralize(self, data):
"""
Takes data given and set it on this activity.
Several pieces of data are not written on because of security
reasons. For example changing the author or id of an activity.
"""
if "verb" in data:
self.verb = data["verb"]
if "title" in data:
self.title = data["title"]
if "content" in data:
self.content = data["content"]
|
the-stack_0_9409 | import requests
import re
import pytesseract
from PIL import Image
def getPage(baseUrl):
r = requests.get(baseUrl)
if r.status_code != 200:
print("Page does not seem to be online. Could you double check it?")
return r.text
def searchHackWords(content):
comp = re.compile('h[a4]ck[e3]d', re.IGNORECASE)
res = comp.findall(content)
if bool(res):
return res
return None
def checkTextDefacement(baseUrl):
content = getPage(baseUrl)
res = searchHackWords(content)
return res
def checkImgDefacement(baseUrl):
im = Image.open("/home/valle/Downloads/4.jpg")
text = pytesseract.image_to_string(im)
res = searchHackWords(text)
return res
def checkDefacement(baseUrl):
resTxt = checkTextDefacement(baseUrl)
resImg = checkImgDefacement(baseUrl)
if resTxt or resImg:
print("### Possibly hacked ###")
print("Matched terms:")
if resTxt:
for t in resTxt:
print("\t"+t)
if resImg:
for t in resImg:
print("\t"+t)
else:
print("This page seems to be clean")
|
the-stack_0_9410 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 15 14:00:36 2018
@author: Eric
"""
import glob
import random
import pandas as pd
import numpy as np
def more_work(n, user):
all_txt = glob.glob("*.txt")
all_pmc_files = set()
# Gets all of the files that we have done into a set
for txt in all_txt:
file_data = np.genfromtxt(txt, dtype = int)
for pmc_file in file_data:
all_pmc_files.add(pmc_file)
# Gets all of the PMC files
pmcs = glob.glob("..\\..\\annotation_test\\*.html")
# Determines which PMC files we haven't done, and creates a list of N of them
to_do = []
for file in pmcs:
loc = '..\\..\\annotation_test\\PMC'
file_num = int(file.replace(loc, "").replace(".html", "")) # trim the file to have just the number
if not(file_num in all_pmc_files):
to_do.append(file_num)
5
random.shuffle(to_do)
to_do = to_do[:n]
for i in range(len(to_do)):
print(to_do[i])
loc = ".//ordering_list_" + user + ".txt"
done = np.loadtxt(loc, dtype = int)
new_list = np.append(done, to_do)
np.savetxt(loc, new_list)
|
the-stack_0_9411 | from collections import OrderedDict
class ParseError(ValueError):
pass
class WpaSupplicantConf:
"""This class parses a wpa_supplicant configuration file, allows
manipulation of the configured networks and then writing out of
the updated file.
WARNING: Although care has been taken to preserve ordering,
comments will be lost for any wpa_supplicant.conf which is
round-tripped through this class.
"""
def __init__(self, lines):
self._fields = OrderedDict()
self._networks = OrderedDict()
network = None
for line in lines:
line = line.strip()
if not line or line.startswith('#'):
continue
if line == "}":
if network is None:
raise ParseError("unxpected '}'")
ssid = network.pop('ssid', None)
if ssid is None:
raise ParseError('missing "ssid" for network')
self._networks[dequote(ssid)] = network
network = None
continue
parts = [x.strip() for x in line.split('=', 1)]
if len(parts) != 2:
raise ParseError("invalid line: %{!r}".format(line))
left, right = parts
if right == '{':
if left != 'network':
raise ParseError('unsupported section: "{}"'.format(left))
if network is not None:
raise ParseError("can't nest networks")
network = OrderedDict()
else:
if network is None:
self._fields[left] = right
else:
network[left] = right
def fields(self):
return self._fields
def networks(self):
return self._networks
def add_network(self, ssid, **attrs):
self._networks[ssid] = attrs
def remove_network(self, ssid):
self._networks.pop(ssid, None)
def write(self, f):
for name, value in self._fields.items():
f.write("{}={}\n".format(name, value))
for ssid, info in self._networks.items():
f.write("\nnetwork={\n")
f.write(' ssid="{}"\n'.format(ssid))
for name, value in info.items():
f.write(" {}={}\n".format(name, value))
f.write("}\n")
def dequote(v):
if len(v) < 2:
return v
if v.startswith('"') and v.endswith('"'):
return v[1:-1]
return v
|
the-stack_0_9412 | import logging
import socket
log = logging.getLogger(__name__)
POLICY = (
'<cross-domain-policy><allow-access-from domain="*" '
'to-ports="*" /></cross-domain-policy>\0'
)
POLICYREQUEST = "<policy-file-request/>"
def client_handle(sock, address):
log.info("%s:%s: Connection accepted." % address)
sock.settimeout(3)
try:
input_data = sock.recv(128)
if input_data.startswith(POLICYREQUEST):
sock.sendall(POLICY)
log.info("%s:%s: Policy sent. Closing connection." % address)
else:
log.info("%s:%s: Wrong payload. Closing connection." % address)
except socket.timeout:
log.info("%s:%s: Timeout" % address)
sock.close()
|
the-stack_0_9413 | import os
import subprocess
import logging
log = logging.getLogger('grocer-utils')
log.setLevel(logging.INFO)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
def foodcritic(fc_bin, path, fc_strict=False):
"""
Execute foodcritic
:rtype : tuple
:param fc_bin: path to food critic binary
:param path: dir path to exectue FC on
:param fc_strict: bool. true if foodcritic should fail if any of the checks do not pass
:return: tpl. output, errors, returncode
"""
if fc_strict:
cmd = '{0} -f any {1}'.format(fc_bin, path)
else:
cmd = '{0} {1}'.format(fc_bin, path)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True)
output, errors = p.communicate()
return output, errors, p.returncode
def rubocop(rubocop_bin, path):
"""
Execute rubocop
:rtype : tuple
:param rubocop_bin: path to food critic binary
:param path: dir path to exectue rubocop on
:return: tpl. output, errors, returncode
"""
cmd = '{0} {1}'.format(rubocop_bin, path)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True)
output, errors = p.communicate()
return output, errors, p.returncode
def berks(berks_bin, path, action='update'):
"""
Execute various berks commands
:rtype : tuple
:param berks_bin: path to berks bin
:param path: path to change directory to before running berks commands (berks is a dir context aware tool)
:param action: berks action to run, e.g. berks install
:return: tpl. output, errors, returncode
"""
cmd = 'cd {0} && {1} {2}'.format(path, berks_bin, action)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True)
output, errors = p.communicate()
return output, errors, p.returncode
def ruby_syntax(ruby_bin, path):
"""
Check ruby syntax using ruby interpreter -c flag
:rtype : tuple
:param ruby_bin: path to ruby bin
:param path: file path to ruby code to check
:return: tpl. output, errors, returncode
"""
cmd = '{0} -c {1}'.format(ruby_bin, path)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True)
output, errors = p.communicate()
return output, errors, p.returncode
def chefspec(chefspec_bin, path):
raise NotImplementedError
def get_file_types(dir_path):
"""
Get the files in a directory based on type
:rtype : tuple
:param dir_path: str. path to directory to search
:return: 4-part tuple. ruby_files, json_files, md_files, other_type
"""
ruby_files = []
json_files = []
md_files = []
other_type = []
for root, dirs, files in os.walk(dir_path):
if "git" in root:
pass
else:
for _file in files:
if _file[-3:] == '.rb':
ruby_files.append(os.path.join(root,_file))
elif _file[-5:] == '.json':
json_files.append(os.path.join(root,_file))
elif _file[-3:] == '.md':
md_files.append(os.path.join(root,_file))
else:
other_type.append(_file)
return ruby_files, json_files, md_files, other_type
def rspec_test(rspec_bin, path):
"""
excute rspec tests
:param rspec_bin: path to rspec bin
:param path: dir path to recipe dir root
:return: tpl. output, errors, returncode
"""
path = os.path.join(path,'test/integration/default')
if not os.path.isdir(path):
return "No rspec tests found in {0}".format(path), None, 0
cmd = '{0} -c {1}/*'.format(rspec_bin, path)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True)
output, errors = p.communicate()
return output, errors, p.returncode
|
the-stack_0_9416 | # -*- coding: utf-8 -*-
"""
pygments.lexers.r
~~~~~~~~~~~~~~~~~
Lexers for the R/S languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, words, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['RConsoleLexer', 'SLexer', 'RdLexer']
line_re = re.compile('.*?\n')
class RConsoleLexer(Lexer):
"""
For R console transcripts or R CMD BATCH output files.
"""
name = 'RConsole'
aliases = ['rconsole', 'rout']
filenames = ['*.Rout']
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block),
[(0, Generic.Prompt, line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines, need to process them first.
if current_code_block:
# Weave together the prompts and highlight code.
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself, this is output from R.
yield match.start(), Generic.Output, line
# If we happen to end on a code block with nothing after it, need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
.. versionadded:: 0.10
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron']
mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
'text/x-R', 'text/x-r-history', 'text/x-r-profile']
builtins_base = (
'Arg', 'Conj', 'Cstack_info', 'Encoding', 'FALSE',
'Filter', 'Find', 'I', 'ISOdate', 'ISOdatetime', 'Im', 'Inf',
'La.svd', 'Map', 'Math.Date', 'Math.POSIXt', 'Math.data.frame',
'Math.difftime', 'Math.factor', 'Mod', 'NA_character_',
'NA_complex_', 'NA_real_', 'NCOL', 'NROW', 'NULLNA_integer_', 'NaN',
'Negate', 'NextMethod', 'Ops.Date', 'Ops.POSIXt', 'Ops.data.frame',
'Ops.difftime', 'Ops.factor', 'Ops.numeric_version', 'Ops.ordered',
'Position', 'R.Version', 'R.home', 'R.version', 'R.version.string',
'RNGkind', 'RNGversion', 'R_system_version', 'Re', 'Recall',
'Reduce', 'Summary.Date', 'Summary.POSIXct', 'Summary.POSIXlt',
'Summary.data.frame', 'Summary.difftime', 'Summary.factor',
'Summary.numeric_version', 'Summary.ordered', 'Sys.Date',
'Sys.chmod', 'Sys.getenv', 'Sys.getlocale', 'Sys.getpid',
'Sys.glob', 'Sys.info', 'Sys.localeconv', 'Sys.readlink',
'Sys.setFileTime', 'Sys.setenv', 'Sys.setlocale', 'Sys.sleep',
'Sys.time', 'Sys.timezone', 'Sys.umask', 'Sys.unsetenv',
'Sys.which', 'TRUE', 'UseMethod', 'Vectorize', 'abbreviate', 'abs',
'acos', 'acosh', 'addNA', 'addTaskCallback', 'agrep', 'alist',
'all', 'all.equal', 'all.equal.POSIXct', 'all.equal.character',
'all.equal.default', 'all.equal.factor', 'all.equal.formula',
'all.equal.language', 'all.equal.list', 'all.equal.numeric',
'all.equal.raw', 'all.names', 'all.vars', 'any', 'anyDuplicated',
'anyDuplicated.array', 'anyDuplicated.data.frame',
'anyDuplicated.default', 'anyDuplicated.matrix', 'aperm',
'aperm.default', 'aperm.table', 'append', 'apply', 'args',
'arrayInd', 'as.Date', 'as.Date.POSIXct', 'as.Date.POSIXlt',
'as.Date.character', 'as.Date.date', 'as.Date.dates',
'as.Date.default', 'as.Date.factor', 'as.Date.numeric',
'as.POSIXct', 'as.POSIXct.Date', 'as.POSIXct.POSIXlt',
'as.POSIXct.date', 'as.POSIXct.dates', 'as.POSIXct.default',
'as.POSIXct.numeric', 'as.POSIXlt', 'as.POSIXlt.Date',
'as.POSIXlt.POSIXct', 'as.POSIXlt.character', 'as.POSIXlt.date',
'as.POSIXlt.dates', 'as.POSIXlt.default', 'as.POSIXlt.factor',
'as.POSIXlt.numeric', 'as.array', 'as.array.default', 'as.call',
'as.character', 'as.character.Date', 'as.character.POSIXt',
'as.character.condition', 'as.character.default',
'as.character.error', 'as.character.factor', 'as.character.hexmode',
'as.character.numeric_version', 'as.character.octmode',
'as.character.srcref', 'as.complex', 'as.data.frame',
'as.data.frame.AsIs', 'as.data.frame.Date', 'as.data.frame.POSIXct',
'as.data.frame.POSIXlt', 'as.data.frame.array',
'as.data.frame.character', 'as.data.frame.complex',
'as.data.frame.data.frame', 'as.data.frame.default',
'as.data.frame.difftime', 'as.data.frame.factor',
'as.data.frame.integer', 'as.data.frame.list',
'as.data.frame.logical', 'as.data.frame.matrix',
'as.data.frame.model.matrix', 'as.data.frame.numeric',
'as.data.frame.numeric_version', 'as.data.frame.ordered',
'as.data.frame.raw', 'as.data.frame.table', 'as.data.frame.ts',
'as.data.frame.vector', 'as.difftime', 'as.double',
'as.double.POSIXlt', 'as.double.difftime', 'as.environment',
'as.expression', 'as.expression.default', 'as.factor',
'as.function', 'as.function.default', 'as.hexmode', 'as.integer',
'as.list', 'as.list.Date', 'as.list.POSIXct', 'as.list.data.frame',
'as.list.default', 'as.list.environment', 'as.list.factor',
'as.list.function', 'as.list.numeric_version', 'as.logical',
'as.logical.factor', 'as.matrix', 'as.matrix.POSIXlt',
'as.matrix.data.frame', 'as.matrix.default', 'as.matrix.noquote',
'as.name', 'as.null', 'as.null.default', 'as.numeric',
'as.numeric_version', 'as.octmode', 'as.ordered',
'as.package_version', 'as.pairlist', 'as.qr', 'as.raw', 'as.single',
'as.single.default', 'as.symbol', 'as.table', 'as.table.default',
'as.vector', 'as.vector.factor', 'asNamespace', 'asS3', 'asS4',
'asin', 'asinh', 'assign', 'atan', 'atan2', 'atanh',
'attachNamespace', 'attr', 'attr.all.equal', 'attributes',
'autoload', 'autoloader', 'backsolve', 'baseenv', 'basename',
'besselI', 'besselJ', 'besselK', 'besselY', 'beta',
'bindingIsActive', 'bindingIsLocked', 'bindtextdomain', 'bitwAnd',
'bitwNot', 'bitwOr', 'bitwShiftL', 'bitwShiftR', 'bitwXor', 'body',
'bquote', 'browser', 'browserCondition', 'browserSetDebug',
'browserText', 'builtins', 'by', 'by.data.frame', 'by.default',
'bzfile', 'c.Date', 'c.POSIXct', 'c.POSIXlt', 'c.noquote',
'c.numeric_version', 'call', 'callCC', 'capabilities', 'casefold',
'cat', 'category', 'cbind', 'cbind.data.frame', 'ceiling',
'char.expand', 'charToRaw', 'charmatch', 'chartr', 'check_tzones',
'chol', 'chol.default', 'chol2inv', 'choose', 'class',
'clearPushBack', 'close', 'close.connection', 'close.srcfile',
'close.srcfilealias', 'closeAllConnections', 'col', 'colMeans',
'colSums', 'colnames', 'commandArgs', 'comment', 'computeRestarts',
'conditionCall', 'conditionCall.condition', 'conditionMessage',
'conditionMessage.condition', 'conflicts', 'contributors', 'cos',
'cosh', 'crossprod', 'cummax', 'cummin', 'cumprod', 'cumsum', 'cut',
'cut.Date', 'cut.POSIXt', 'cut.default', 'dQuote', 'data.class',
'data.matrix', 'date', 'debug', 'debugonce',
'default.stringsAsFactors', 'delayedAssign', 'deparse', 'det',
'determinant', 'determinant.matrix', 'dget', 'diag', 'diff',
'diff.Date', 'diff.POSIXt', 'diff.default', 'difftime', 'digamma',
'dim', 'dim.data.frame', 'dimnames', 'dimnames.data.frame', 'dir',
'dir.create', 'dirname', 'do.call', 'dput', 'drop', 'droplevels',
'droplevels.data.frame', 'droplevels.factor', 'dump', 'duplicated',
'duplicated.POSIXlt', 'duplicated.array', 'duplicated.data.frame',
'duplicated.default', 'duplicated.matrix',
'duplicated.numeric_version', 'dyn.load', 'dyn.unload', 'eapply',
'eigen', 'else', 'emptyenv', 'enc2native', 'enc2utf8',
'encodeString', 'enquote', 'env.profile', 'environment',
'environmentIsLocked', 'environmentName', 'eval', 'eval.parent',
'evalq', 'exists', 'exp', 'expand.grid', 'expm1', 'expression',
'factor', 'factorial', 'fifo', 'file', 'file.access', 'file.append',
'file.choose', 'file.copy', 'file.create', 'file.exists',
'file.info', 'file.link', 'file.path', 'file.remove', 'file.rename',
'file.show', 'file.symlink', 'find.package', 'findInterval',
'findPackageEnv', 'findRestart', 'floor', 'flush',
'flush.connection', 'force', 'formals', 'format',
'format.AsIs', 'format.Date', 'format.POSIXct', 'format.POSIXlt',
'format.data.frame', 'format.default', 'format.difftime',
'format.factor', 'format.hexmode', 'format.info',
'format.libraryIQR', 'format.numeric_version', 'format.octmode',
'format.packageInfo', 'format.pval', 'format.summaryDefault',
'formatC', 'formatDL', 'forwardsolve', 'gamma', 'gc', 'gc.time',
'gcinfo', 'gctorture', 'gctorture2', 'get', 'getAllConnections',
'getCallingDLL', 'getCallingDLLe', 'getConnection',
'getDLLRegisteredRoutines', 'getDLLRegisteredRoutines.DLLInfo',
'getDLLRegisteredRoutines.character', 'getElement',
'getExportedValue', 'getHook', 'getLoadedDLLs', 'getNamespace',
'getNamespaceExports', 'getNamespaceImports', 'getNamespaceInfo',
'getNamespaceName', 'getNamespaceUsers', 'getNamespaceVersion',
'getNativeSymbolInfo', 'getOption', 'getRversion', 'getSrcLines',
'getTaskCallbackNames', 'geterrmessage', 'gettext', 'gettextf',
'getwd', 'gl', 'globalenv', 'gregexpr', 'grep', 'grepRaw', 'grepl',
'gsub', 'gzcon', 'gzfile', 'head', 'iconv', 'iconvlist',
'icuSetCollate', 'identical', 'identity', 'ifelse', 'importIntoEnv',
'in', 'inherits', 'intToBits', 'intToUtf8', 'interaction', 'interactive',
'intersect', 'inverse.rle', 'invisible', 'invokeRestart',
'invokeRestartInteractively', 'is.R', 'is.array', 'is.atomic',
'is.call', 'is.character', 'is.complex', 'is.data.frame',
'is.double', 'is.element', 'is.environment', 'is.expression',
'is.factor', 'is.finite', 'is.function', 'is.infinite',
'is.integer', 'is.language', 'is.list', 'is.loaded', 'is.logical',
'is.matrix', 'is.na', 'is.na.POSIXlt', 'is.na.data.frame',
'is.na.numeric_version', 'is.name', 'is.nan', 'is.null',
'is.numeric', 'is.numeric.Date', 'is.numeric.POSIXt',
'is.numeric.difftime', 'is.numeric_version', 'is.object',
'is.ordered', 'is.package_version', 'is.pairlist', 'is.primitive',
'is.qr', 'is.raw', 'is.recursive', 'is.single', 'is.symbol',
'is.table', 'is.unsorted', 'is.vector', 'isBaseNamespace',
'isIncomplete', 'isNamespace', 'isOpen', 'isRestart', 'isS4',
'isSeekable', 'isSymmetric', 'isSymmetric.matrix', 'isTRUE',
'isatty', 'isdebugged', 'jitter', 'julian', 'julian.Date',
'julian.POSIXt', 'kappa', 'kappa.default', 'kappa.lm', 'kappa.qr',
'kronecker', 'l10n_info', 'labels', 'labels.default', 'lapply',
'lazyLoad', 'lazyLoadDBexec', 'lazyLoadDBfetch', 'lbeta', 'lchoose',
'length', 'length.POSIXlt', 'letters', 'levels', 'levels.default',
'lfactorial', 'lgamma', 'library.dynam', 'library.dynam.unload',
'licence', 'license', 'list.dirs', 'list.files', 'list2env', 'load',
'loadNamespace', 'loadedNamespaces', 'loadingNamespaceInfo',
'local', 'lockBinding', 'lockEnvironment', 'log', 'log10', 'log1p',
'log2', 'logb', 'lower.tri', 'ls', 'make.names', 'make.unique',
'makeActiveBinding', 'mapply', 'margin.table', 'mat.or.vec',
'match', 'match.arg', 'match.call', 'match.fun', 'max', 'max.col',
'mean', 'mean.Date', 'mean.POSIXct', 'mean.POSIXlt', 'mean.default',
'mean.difftime', 'mem.limits', 'memCompress', 'memDecompress',
'memory.profile', 'merge', 'merge.data.frame', 'merge.default',
'message', 'mget', 'min', 'missing', 'mode', 'month.abb',
'month.name', 'months', 'months.Date', 'months.POSIXt',
'months.abb', 'months.nameletters', 'names', 'names.POSIXlt',
'namespaceExport', 'namespaceImport', 'namespaceImportClasses',
'namespaceImportFrom', 'namespaceImportMethods', 'nargs', 'nchar',
'ncol', 'new.env', 'ngettext', 'nlevels', 'noquote', 'norm',
'normalizePath', 'nrow', 'numeric_version', 'nzchar', 'objects',
'oldClass', 'on.exit', 'open', 'open.connection', 'open.srcfile',
'open.srcfilealias', 'open.srcfilecopy', 'options', 'order',
'ordered', 'outer', 'packBits', 'packageEvent',
'packageHasNamespace', 'packageStartupMessage', 'package_version',
'pairlist', 'parent.env', 'parent.frame', 'parse',
'parseNamespaceFile', 'paste', 'paste0', 'path.expand',
'path.package', 'pipe', 'pmatch', 'pmax', 'pmax.int', 'pmin',
'pmin.int', 'polyroot', 'pos.to.env', 'pretty', 'pretty.default',
'prettyNum', 'print', 'print.AsIs', 'print.DLLInfo',
'print.DLLInfoList', 'print.DLLRegisteredRoutines', 'print.Date',
'print.NativeRoutineList', 'print.POSIXct', 'print.POSIXlt',
'print.by', 'print.condition', 'print.connection',
'print.data.frame', 'print.default', 'print.difftime',
'print.factor', 'print.function', 'print.hexmode',
'print.libraryIQR', 'print.listof', 'print.noquote',
'print.numeric_version', 'print.octmode', 'print.packageInfo',
'print.proc_time', 'print.restart', 'print.rle',
'print.simple.list', 'print.srcfile', 'print.srcref',
'print.summary.table', 'print.summaryDefault', 'print.table',
'print.warnings', 'prmatrix', 'proc.time', 'prod', 'prop.table',
'provideDimnames', 'psigamma', 'pushBack', 'pushBackLength', 'q',
'qr', 'qr.Q', 'qr.R', 'qr.X', 'qr.coef', 'qr.default', 'qr.fitted',
'qr.qty', 'qr.qy', 'qr.resid', 'qr.solve', 'quarters',
'quarters.Date', 'quarters.POSIXt', 'quit', 'quote', 'range',
'range.default', 'rank', 'rapply', 'raw', 'rawConnection',
'rawConnectionValue', 'rawShift', 'rawToBits', 'rawToChar', 'rbind',
'rbind.data.frame', 'rcond', 'read.dcf', 'readBin', 'readChar',
'readLines', 'readRDS', 'readRenviron', 'readline', 'reg.finalizer',
'regexec', 'regexpr', 'registerS3method', 'registerS3methods',
'regmatches', 'remove', 'removeTaskCallback', 'rep', 'rep.Date',
'rep.POSIXct', 'rep.POSIXlt', 'rep.factor', 'rep.int',
'rep.numeric_version', 'rep_len', 'replace', 'replicate',
'requireNamespace', 'restartDescription', 'restartFormals',
'retracemem', 'rev', 'rev.default', 'rle', 'rm', 'round',
'round.Date', 'round.POSIXt', 'row', 'row.names',
'row.names.data.frame', 'row.names.default', 'rowMeans', 'rowSums',
'rownames', 'rowsum', 'rowsum.data.frame', 'rowsum.default',
'sQuote', 'sample', 'sample.int', 'sapply', 'save', 'save.image',
'saveRDS', 'scale', 'scale.default', 'scan', 'search',
'searchpaths', 'seek', 'seek.connection', 'seq', 'seq.Date',
'seq.POSIXt', 'seq.default', 'seq.int', 'seq_along', 'seq_len',
'sequence', 'serialize', 'set.seed', 'setHook', 'setNamespaceInfo',
'setSessionTimeLimit', 'setTimeLimit', 'setdiff', 'setequal',
'setwd', 'shQuote', 'showConnections', 'sign', 'signalCondition',
'signif', 'simpleCondition', 'simpleError', 'simpleMessage',
'simpleWarning', 'simplify2array', 'sin', 'single',
'sinh', 'sink', 'sink.number', 'slice.index', 'socketConnection',
'socketSelect', 'solve', 'solve.default', 'solve.qr', 'sort',
'sort.POSIXlt', 'sort.default', 'sort.int', 'sort.list', 'split',
'split.Date', 'split.POSIXct', 'split.data.frame', 'split.default',
'sprintf', 'sqrt', 'srcfile', 'srcfilealias', 'srcfilecopy',
'srcref', 'standardGeneric', 'stderr', 'stdin', 'stdout', 'stop',
'stopifnot', 'storage.mode', 'strftime', 'strptime', 'strsplit',
'strtoi', 'strtrim', 'structure', 'strwrap', 'sub', 'subset',
'subset.data.frame', 'subset.default', 'subset.matrix',
'substitute', 'substr', 'substring', 'sum', 'summary',
'summary.Date', 'summary.POSIXct', 'summary.POSIXlt',
'summary.connection', 'summary.data.frame', 'summary.default',
'summary.factor', 'summary.matrix', 'summary.proc_time',
'summary.srcfile', 'summary.srcref', 'summary.table',
'suppressMessages', 'suppressPackageStartupMessages',
'suppressWarnings', 'svd', 'sweep', 'sys.call', 'sys.calls',
'sys.frame', 'sys.frames', 'sys.function', 'sys.load.image',
'sys.nframe', 'sys.on.exit', 'sys.parent', 'sys.parents',
'sys.save.image', 'sys.source', 'sys.status', 'system',
'system.file', 'system.time', 'system2', 't', 't.data.frame',
't.default', 'table', 'tabulate', 'tail', 'tan', 'tanh', 'tapply',
'taskCallbackManager', 'tcrossprod', 'tempdir', 'tempfile',
'testPlatformEquivalence', 'textConnection', 'textConnectionValue',
'toString', 'toString.default', 'tolower', 'topenv', 'toupper',
'trace', 'traceback', 'tracemem', 'tracingState', 'transform',
'transform.data.frame', 'transform.default', 'trigamma', 'trunc',
'trunc.Date', 'trunc.POSIXt', 'truncate', 'truncate.connection',
'try', 'tryCatch', 'typeof', 'unclass', 'undebug', 'union',
'unique', 'unique.POSIXlt', 'unique.array', 'unique.data.frame',
'unique.default', 'unique.matrix', 'unique.numeric_version',
'units', 'units.difftime', 'unix.time', 'unlink', 'unlist',
'unloadNamespace', 'unlockBinding', 'unname', 'unserialize',
'unsplit', 'untrace', 'untracemem', 'unz', 'upper.tri', 'url',
'utf8ToInt', 'vapply', 'version', 'warning', 'warnings', 'weekdays',
'weekdays.Date', 'weekdays.POSIXt', 'which', 'which.max',
'which.min', 'with', 'with.default', 'withCallingHandlers',
'withRestarts', 'withVisible', 'within', 'within.data.frame',
'within.list', 'write', 'write.dcf', 'writeBin', 'writeChar',
'writeLines', 'xor', 'xor.hexmode', 'xor.octmode',
'xpdrows.data.frame', 'xtfrm', 'xtfrm.AsIs', 'xtfrm.Date',
'xtfrm.POSIXct', 'xtfrm.POSIXlt', 'xtfrm.Surv', 'xtfrm.default',
'xtfrm.difftime', 'xtfrm.factor', 'xtfrm.numeric_version', 'xzfile',
'zapsmall'
)
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][\w.]*', Text),
# can begin with ., but not if that is followed by a digit
(r'\.[a-zA-Z_][\w.]*', Text),
],
'punctuation': [
(r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation),
],
'keywords': [
(words(builtins_base, suffix=r'(?![\w. =])'),
Keyword.Pseudo),
(r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
r'(?![\w.])',
Keyword.Reserved),
(r'(array|category|character|complex|double|function|integer|list|'
r'logical|matrix|numeric|vector|data.frame|c)'
r'(?![\w.])',
Keyword.Type),
(r'(library|require|attach|detach|source)'
r'(?![\w.])',
Keyword.Namespace)
],
'operators': [
(r'<<?-|->>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator),
(r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator)
],
'builtin_symbols': [
(r'(NULL|NA(_(integer|real|complex|character)_)?|'
r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))'
r'(?![\w.])',
Keyword.Constant),
(r'(T|F)\b', Name.Builtin.Pseudo),
],
'numbers': [
# hex number
(r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex),
# decimal number
(r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?',
Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'`.*?`', String.Backtick),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
# (r'\{', Punctuation, 'block'),
(r'.', Text),
],
# 'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
# ],
'string_squote': [
(r'([^\'\\]|\\.)*\'', String, '#pop'),
],
'string_dquote': [
(r'([^"\\]|\\.)*"', String, '#pop'),
],
}
def analyse_text(text):
if re.search(r'[a-z0-9_\])\s]<-(?!-)', text):
return 0.11
class RdLexer(RegexLexer):
"""
Pygments Lexer for R documentation (Rd) files
This is a very minimal implementation, highlighting little more
than the macros. A description of Rd syntax is found in `Writing R
Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_
and `Parsing Rd files <developer.r-project.org/parseRd.pdf>`_.
.. versionadded:: 1.6
"""
name = 'Rd'
aliases = ['rd']
filenames = ['*.Rd']
mimetypes = ['text/x-r-doc']
# To account for verbatim / LaTeX-like / and R-like areas
# would require parsing.
tokens = {
'root': [
# catch escaped brackets and percent sign
(r'\\[\\{}%]', String.Escape),
# comments
(r'%.*$', Comment),
# special macros with no arguments
(r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant),
# macros
(r'\\[a-zA-Z]+\b', Keyword),
# special preprocessor macros
(r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc),
# non-escaped brackets
(r'[{}]', Name.Builtin),
# everything else
(r'[^\\%\n{}]+', Text),
(r'.', Text),
]
}
|
the-stack_0_9417 | import numpy as np
from sklearn import datasets
from scipy.stats import f
EPSILON = 10e-10 # only to prevent division by zero
def mean_vector_similarity(X, Y):
x_mean = np.mean(X, axis=0)
y_mean = np.mean(Y, axis=0)
sim = float((x_mean.dot(y_mean)) / (np.linalg.norm(x_mean)
* np.linalg.norm(y_mean) + EPSILON))
return sim
|
the-stack_0_9418 | """
Utilities for ESPEI
Classes and functions defined here should have some reuse potential.
"""
import itertools
import re
import os
from collections import namedtuple
import bibtexparser
import numpy as np
import sympy
import dask
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import convert_to_unicode
from distributed import Client
from pycalphad import variables as v
from six import string_types
from sympy import Symbol
from tinydb import TinyDB, where
from tinydb.storages import MemoryStorage
def unpack_piecewise(x):
if isinstance(x, sympy.Piecewise):
return float(x.args[0].expr)
else:
return float(x)
class PickleableTinyDB(TinyDB):
"""A pickleable version of TinyDB that uses MemoryStorage as a default."""
def __getstate__(self):
# first remove the query cache. The cache speed is not important to us.
for table_name in self.tables():
self.table(table_name)._query_cache = {}
pickle_dict = {}
for key, value in self.__dict__.items():
if key == '_table':
pickle_dict[key] = value.all()
else:
pickle_dict[key] = value
return pickle_dict
def __setstate__(self, state):
self.__init__(storage=MemoryStorage)
self.insert_multiple(state['_table'])
class ImmediateClient(Client):
"""
A subclass of distributed.Client that automatically unwraps the Futures
returned by map.
"""
def map(self, f, *iterators, **kwargs):
_client = super(ImmediateClient, self)
result = _client.gather(_client.map(f, *[list(it) for it in iterators], **kwargs))
return result
def sigfigs(x, n):
"""Round x to n significant digits"""
if x != 0:
return np.around(x, -(np.floor(np.log10(np.abs(x)))).astype(np.int) + (n - 1))
else:
return x
def optimal_parameters(trace_array, lnprob_array, kth=0):
"""
Return the optimal parameters in the trace based on the highest likelihood.
If kth is specified, return the kth set of *unique* optimal parameters.
Parameters
----------
trace_array : ndarray
Array of shape (iterations, number of chains, number of parameters)
lnprob_array : ndarray
Array of shape (number of chains, iterations)
kth : int
Zero-indexed optimum. 0 (the default) is the most optimal solution. 1 is
the second most optimal, etc.. Only *unique* solutions will be returned.
Returns
-------
Array of optimal parameters
Notes
-----
It is ok if the calculation did not finish and the arrays are padded with
zeros. The number of chains and iterations in the trace and lnprob arrays
must match.
"""
# Swap first two indices of trace_array for compatability with new version of emcee.
trace_array = np.swapaxes(trace_array, 0, 1)
# indicies of chains + iterations that have non-zero parameters (that step has run)
nz = np.nonzero(np.all(trace_array != 0, axis=-1))
# chain + iteration index with the highest likelihood
unique_params = np.zeros(trace_array.shape[-1])
unique_params_found = -1
# loop through all possible nonzero iterations
for i in range(nz[-1][-1]):
# find the next set of parameters parameters
candidate_index = np.argpartition(-lnprob_array[nz], i)[i]
candidate_params = trace_array[nz][candidate_index]
# if the parameters are unique, make them the new unique parameters
if np.any(candidate_params != unique_params):
unique_params = candidate_params
unique_params_found += 1
# if we have found the kth set of unique parameters, stop
if unique_params_found == kth:
return unique_params
return np.zeros(trace_array.shape[-1])
def database_symbols_to_fit(dbf, symbol_regex="^V[V]?([0-9]+)$"):
"""
Return names of the symbols to fit that match the regular expression
Parameters
----------
dbf : Database
pycalphad Database
symbol_regex : str
Regular expression of the fitting symbols. Defaults to V or VV followed by one or more numbers.
Returns
-------
dict
Context dictionary for different methods of calculation the error.
"""
pattern = re.compile(symbol_regex)
return sorted([x for x in sorted(dbf.symbols.keys()) if pattern.match(x)])
def flexible_open_string(obj):
"""
Return the string of a an object that is either file-like, a file path, or the raw string.
Parameters
----------
obj : string-like or file-like
Either a multiline string, a path, or a file-like object
Returns
-------
str
"""
if isinstance(obj, string_types):
# the obj is a string
if '\n' in obj:
# if the string has linebreaks, then we assume it's a raw string. Return it.
return obj
else:
# assume it is a path
with open(obj) as fp:
read_string = fp.read()
return read_string
elif hasattr(obj, 'read'):
# assume it is file-like
read_string = obj.read()
return read_string
else:
raise ValueError('Unable to determine how to extract the string of the passed object ({}) of type {}. Expected a raw string, file-like, or path-like.'.format(obj, type(obj)))
bibliography_database = PickleableTinyDB(storage=MemoryStorage)
def add_bibtex_to_bib_database(bibtex, bib_db=None):
"""
Add entries from a BibTeX file to the bibliography database
Parameters
----------
bibtex : str
Either a multiline string, a path, or a file-like object of a BibTeX file
bib_db: PickleableTinyDB
Database to put the BibTeX entries. Defaults to a module-level default database
Returns
-------
The modified bibliographic database
"""
if not bib_db:
bib_db = bibliography_database
bibtex_string = flexible_open_string(bibtex)
parser = BibTexParser()
parser.customization = convert_to_unicode
parsed_bibtex = bibtexparser.loads(bibtex_string, parser=parser)
bib_db.insert_multiple(parsed_bibtex.entries)
return bib_db
def bib_marker_map(bib_keys, markers=None):
"""
Return a dict with reference keys and marker dicts
Parameters
----------
bib_keys :
markers : list
List of 2-tuples of ('fillstyle', 'marker') e.g. [('top', 'o'), ('full', 's')].
Defaults to cycling through the filled markers, the different fill styles.
Returns
-------
dict
Dictionary with bib_keys as keys, dict values of formatted strings and marker dicts
Examples
--------
>>> mm = bib_marker_map(['otis2016', 'bocklund2018'])
>>> mm == {'bocklund2018': {'formatted': 'bocklund2018', 'markers': {'fillstyle': 'none', 'marker': 'o'}}, 'otis2016': {'formatted': 'otis2016', 'markers': {'fillstyle': 'none', 'marker': 'v'}}}
True
"""
# TODO: support custom formatting from looking up keys in a bib_db
if not markers:
filled_markers = ['o', 'v', 's', 'd', 'P', 'X', '^', '<', '>']
fill_styles = ['none', 'full', 'top', 'right', 'bottom', 'left']
markers = itertools.product(fill_styles, filled_markers)
b_m_map = dict()
for ref, marker_tuple in zip(sorted(bib_keys), markers):
fill, mark = marker_tuple
b_m_map[ref] = {
'formatted': ref, # just use the key for formatting
'markers': {
'fillstyle': fill,
'marker': mark
}
}
return b_m_map
def parameter_term(expression, symbol):
"""
Determine the term, e.g. T*log(T) that belongs to the symbol in expression
Parameters
----------
expression :
symbol :
Returns
-------
"""
if expression == symbol:
# the parameter is the symbol, so the multiplicative term is 1.
term = 1
else:
if isinstance(expression, sympy.Piecewise):
expression = expression.args[0][0]
if isinstance(expression, sympy.Symbol):
# this is not mathematically correct, but we just need to be able to split it into args
expression = sympy.Add(expression, 1)
if not isinstance(expression, sympy.Add):
raise ValueError('Parameter {} is a {} not a sympy.Add or a Piecewise Add'.format(expression, type(expression)))
expression_terms = expression.args
term = None
for term_coeff in expression_terms:
coeff, root = term_coeff.as_coeff_mul(symbol)
if root == (symbol,):
term = coeff
break
if term is None:
raise ValueError('No multiplicative terms found for Symbol {} in parameter {}'.format(symbol, expression))
return term
def formatted_constituent_array(constituent_array):
"""
Given a constituent array of Species, return the classic CALPHAD-style interaction.
Parameters
----------
constituent_array : list
List of sublattices, which are lists of Species in that sublattice
Returns
-------
str
String of the constituent array formatted in the classic CALPHAD style
Examples
--------
>>> from pycalphad import variables as v
>>> const_array = [[v.Species('CU'), v.Species('MG')], [v.Species('MG')]]
>>> formatted_constituent_array(const_array)
'CU,MG:MG'
"""
return ':'.join([','.join([sp.name for sp in subl]) for subl in constituent_array])
def formatted_parameter(dbf, symbol, unique=True):
"""
Get the deconstructed pretty parts of the parameter/term a symbol belongs to in a Database.
Parameters
----------
dbf : pycalphad.Database
symbol : string or sympy.Symbol
Symbol in the Database to get the parameter for.
unique : bool
If True, will raise if more than one parameter containing the symbol is found.
Returns
-------
FormattedParameter
A named tuple with the following attributes:
``phase_name``, ``interaction``, ``symbol``, ``term``, ``parameter_type``
or ``term_symbol`` (which is just the Symbol * temperature term)
"""
FormattedParameter = namedtuple('FormattedParameter', ['phase_name', 'interaction', 'symbol', 'term', 'parameter_type', 'term_symbol'])
if not isinstance(symbol, Symbol):
symbol = Symbol(symbol)
search_res = dbf._parameters.search(
where('parameter').test(lambda x: symbol in x.free_symbols))
if len(search_res) == 0:
raise ValueError('Symbol {} not found in any parameters.'.format(symbol))
elif (len(search_res) > 1) and unique:
raise ValueError('Parameters found containing Symbol {} are not unique. Found {}.'.format(symbol, search_res))
formatted_parameters = []
for result in search_res:
const_array = formatted_constituent_array(result['constituent_array'])
# format the paramter type to G or L0, L1, ...
parameter_type = '{}{}'.format(result['parameter_type'], result['parameter_order'])
# override non-interacting to G if there's no interaction
has_interaction = ',' in const_array
if not has_interaction:
if (result['parameter_type'] == 'G') or (result['parameter_type'] == 'L'):
parameter_type = 'G'
term = parameter_term(result['parameter'], symbol)
formatted_param = FormattedParameter(result['phase_name'],
const_array,
symbol,
term,
parameter_type,
term*symbol
)
formatted_parameters.append(formatted_param)
if unique:
return formatted_parameters[0]
else:
return formatted_parameters
def build_sitefractions(phase_name, sublattice_configurations, sublattice_occupancies):
"""Convert nested lists of sublattice configurations and occupancies to a list
of dictionaries. The dictionaries map SiteFraction symbols to occupancy
values. Note that zero occupancy site fractions will need to be added
separately since the total degrees of freedom aren't known in this function.
Parameters
----------
phase_name : str
Name of the phase
sublattice_configurations : [[str]]
sublattice configuration
sublattice_occupancies : [[float]]
occupancy of each sublattice
Returns
-------
[[float]]
a list of site fractions over sublattices
"""
result = []
for config, occ in zip(sublattice_configurations, sublattice_occupancies):
sitefracs = {}
config = [[c] if not isinstance(c, (list, tuple)) else c for c in config]
occ = [[o] if not isinstance(o, (list, tuple)) else o for o in occ]
if len(config) != len(occ):
raise ValueError('Sublattice configuration length differs from occupancies')
for sublattice_idx in range(len(config)):
if isinstance(config[sublattice_idx], (list, tuple)) != isinstance(occ[sublattice_idx], (list, tuple)):
raise ValueError('Sublattice configuration type differs from occupancies')
if not isinstance(config[sublattice_idx], (list, tuple)):
# This sublattice is fully occupied by one component
sitefracs[v.SiteFraction(phase_name, sublattice_idx, config[sublattice_idx])] = occ[sublattice_idx]
else:
# This sublattice is occupied by multiple elements
if len(config[sublattice_idx]) != len(occ[sublattice_idx]):
raise ValueError('Length mismatch in sublattice configuration')
for comp, val in zip(config[sublattice_idx], occ[sublattice_idx]):
sitefracs[v.SiteFraction(phase_name, sublattice_idx, comp)] = val
result.append(sitefracs)
return result
def popget(d, key, default=None):
"""
Get the key from the dict, returning the default if not found.
Parameters
----------
d : dict
Dictionary to get key from.
key : object
Key to get from the dictionary.
default : object
Default to return if key is not found in dictionary.
Returns
-------
object
Examples
---------
>>> d = {'ABC': 5.0}
>>> popget(d, 'ZPF', 1.0) == 1.0
True
>>> popget(d, 'ABC', 1.0) == 5.0
True
"""
try:
return d.pop(key)
except KeyError:
return default
def get_dask_config_paths():
"""
Return a list of configuration file paths for dask.
The last path in the list has the highest precedence.
Returns
-------
list
Examples
--------
>>> config_files = get_dask_config_paths()
>>> len(config_files) > 1
True
"""
candidates = dask.config.paths
file_paths = []
for path in candidates:
if os.path.exists(path):
if os.path.isdir(path):
file_paths.extend(sorted([
os.path.join(path, p)
for p in os.listdir(path)
if os.path.splitext(p)[1].lower() in ('.json', '.yaml', '.yml')
]))
else:
file_paths.append(path)
return file_paths
|
the-stack_0_9419 | #!/usr/bin/env python3
"""Common library for reading from and writing to files.
This module provides functions for reading in formatted data from system files
and writing it back out. Examples include reading a string list or integer
matrix from a file.
"""
import csv
from typing import Iterable, Iterator, List, Optional
def ints_from_file(file_name: str, sep: str = ' ') -> Iterator[List[int]]:
"""Reads a list of integer rows from a file.
Args:
file_name: A relative path to the file to read from.
sep: A separator token that appears between integers within a row.
Yields:
Each integer row, in sequence, from ``file_name``, where each row is on
a separate line and integers within a row are separated by ``sep``.
"""
with open(file_name) as input_file:
for line in input_file:
yield [int(token) for token in line.rstrip().split(sep)]
def strings_from_file(
file_name: str,
sep: str = ',',
quote: Optional[str] = '"',
) -> Iterable[str]:
"""Reads a sequence of formatted strings from a file.
Args:
file_name: A relative path to the file to read from.
sep: A separator token that appears between input strings in the file.
quote: If present, designates a custom quotation mark character to be
stripped from the start and end of each input string. If ``None``,
each input string will be interpreted verbatim.
Yields:
Each input string, in sequence, from ``file_name``, where strings are
separated by ``sep`` and quoted with ``quote`` characters.
"""
with open(file_name, newline='') as input_file:
if quote is None or quote == '':
quote_style = csv.QUOTE_NONE
else:
quote_style = csv.QUOTE_ALL
reader = csv.reader(
input_file, delimiter=sep, quotechar=quote, quoting=quote_style)
for row in reader:
for token in row:
yield token
|
the-stack_0_9421 | import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import time
import json
import numpy as np
import cv2
import random
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from lib.options import BaseOptions
from lib.mesh_util import *
from lib.sample_util import *
from lib.train_util import *
from lib.data import *
from lib.model import *
from lib.geometry import index
# get options
opt = BaseOptions().parse()
def train(opt):
# set cuda
cuda = torch.device('cuda:%d' % opt.gpu_id)
# train_dataset = TrainDataset(opt, phase='train')
# test_dataset = TrainDataset(opt, phase='test')
train_dataset = MRIDataset(opt, phase='train')
test_dataset = MRIDataset(opt, phase='test')
projection_mode = train_dataset.projection_mode
# create data loader
train_data_loader = DataLoader(train_dataset,
batch_size=opt.batch_size, shuffle=not opt.serial_batches,
num_workers=opt.num_threads, pin_memory=opt.pin_memory)
print('train data size: ', len(train_data_loader))
# NOTE: batch size should be 1 and use all the points for evaluation
test_data_loader = DataLoader(test_dataset,
batch_size=1, shuffle=False,
num_workers=opt.num_threads, pin_memory=opt.pin_memory)
print('test data size: ', len(test_data_loader))
# create net
netG = HGPIFuNet(opt, projection_mode).to(device=cuda)
optimizerG = torch.optim.RMSprop(netG.parameters(), lr=opt.learning_rate, momentum=0, weight_decay=0)
lr = opt.learning_rate
print('Using Network: ', netG.name)
def set_train():
netG.train()
def set_eval():
netG.eval()
# load checkpoints
if opt.load_netG_checkpoint_path is not None:
print('loading for net G ...', opt.load_netG_checkpoint_path)
netG.load_state_dict(torch.load(opt.load_netG_checkpoint_path, map_location=cuda))
if opt.continue_train:
if opt.resume_epoch < 0:
model_path = '%s/%s/netG_latest' % (opt.checkpoints_path, opt.name)
else:
model_path = '%s/%s/netG_epoch_%d' % (opt.checkpoints_path, opt.name, opt.resume_epoch)
print('Resuming from ', model_path)
netG.load_state_dict(torch.load(model_path, map_location=cuda))
os.makedirs(opt.checkpoints_path, exist_ok=True)
os.makedirs(opt.results_path, exist_ok=True)
os.makedirs('%s/%s' % (opt.checkpoints_path, opt.name), exist_ok=True)
os.makedirs('%s/%s' % (opt.results_path, opt.name), exist_ok=True)
opt_log = os.path.join(opt.results_path, opt.name, 'opt.txt')
with open(opt_log, 'w') as outfile:
outfile.write(json.dumps(vars(opt), indent=2))
# training
start_epoch = 0 if not opt.continue_train else max(opt.resume_epoch,0)
for epoch in range(start_epoch, opt.num_epoch):
epoch_start_time = time.time()
if not opt.eval_only:
set_train()
iter_data_time = time.time()
for train_idx, train_data in enumerate(train_data_loader):
iter_start_time = time.time()
# retrieve the data
image_tensor = train_data['img'].to(device=cuda)
calib_tensor = train_data['calib'].to(device=cuda)
sample_tensor = train_data['samples'].to(device=cuda)
image_tensor, calib_tensor = reshape_multiview_tensors(image_tensor, calib_tensor)
if opt.num_views > 1:
sample_tensor = reshape_sample_tensor(sample_tensor, opt.num_views)
label_tensor = train_data['labels'].to(device=cuda)
# network input and output
res, error = netG.forward(image_tensor, sample_tensor, calib_tensor, labels=label_tensor)
optimizerG.zero_grad()
error.backward()
optimizerG.step()
iter_net_time = time.time()
eta = ((iter_net_time - epoch_start_time) / (train_idx + 1)) * len(train_data_loader) - (
iter_net_time - epoch_start_time)
if train_idx % opt.freq_plot == 0:
print(
'Name: {0} | Epoch: {1} | {2}/{3} | Err: {4:.06f} | LR: {5:.06f} | Sigma: {6:.02f} | dataT: {7:.05f} | netT: {8:.05f} | ETA: {9:02d}:{10:02d}'.format(
opt.name, epoch, train_idx, len(train_data_loader), error.item(), lr, opt.sigma,
iter_start_time - iter_data_time,
iter_net_time - iter_start_time, int(eta // 60),
int(eta - 60 * (eta // 60))))
if train_idx % opt.freq_save == 0 and train_idx != 0:
torch.save(netG.state_dict(), '%s/%s/netG_latest' % (opt.checkpoints_path, opt.name))
torch.save(netG.state_dict(), '%s/%s/netG_epoch_%d' % (opt.checkpoints_path, opt.name, epoch))
if train_idx % opt.freq_save_ply == 0:
save_path = '%s/%s/pred.ply' % (opt.results_path, opt.name)
r = res[0].cpu()
points = sample_tensor[0].transpose(0, 1).cpu()
save_samples_truncted_prob(save_path, points.detach().numpy(), r.detach().numpy())
iter_data_time = time.time()
# update learning rate
lr = adjust_learning_rate(optimizerG, epoch, lr, opt.schedule, opt.gamma)
#### test
with torch.no_grad():
set_eval()
if not opt.no_num_eval:
test_losses = {}
print('calc error (test) ...')
test_errors = calc_error(opt, netG, cuda, test_dataset, 100)
print('eval test MSE: {0:06f} IOU: {1:06f} prec: {2:06f} recall: {3:06f}'.format(*test_errors))
MSE, IOU, prec, recall = test_errors
test_losses['MSE(test)'] = MSE
test_losses['IOU(test)'] = IOU
test_losses['prec(test)'] = prec
test_losses['recall(test)'] = recall
print('calc error (train) ...')
train_dataset.is_train = False
train_errors = calc_error(opt, netG, cuda, train_dataset, 100)
train_dataset.is_train = True
print('eval train MSE: {0:06f} IOU: {1:06f} prec: {2:06f} recall: {3:06f}'.format(*train_errors))
MSE, IOU, prec, recall = train_errors
test_losses['MSE(train)'] = MSE
test_losses['IOU(train)'] = IOU
test_losses['prec(train)'] = prec
test_losses['recall(train)'] = recall
# if not opt.no_gen_mesh:
# print('generate mesh (test) ...')
# for gen_idx in tqdm(range(opt.num_gen_mesh_test)):
# test_data = random.choice(test_dataset)
# save_path = '%s/%s/test_eval_epoch%d_%s.obj' % (
# opt.results_path, opt.name, epoch, test_data['name'])
# gen_mesh(opt, netG, cuda, test_data, save_path)
#
# print('generate mesh (train) ...')
# train_dataset.is_train = False
# for gen_idx in tqdm(range(opt.num_gen_mesh_test)):
# train_data = random.choice(train_dataset)
# save_path = '%s/%s/train_eval_epoch%d_%s.obj' % (
# opt.results_path, opt.name, epoch, train_data['name'])
# gen_mesh(opt, netG, cuda, train_data, save_path)
# train_dataset.is_train = True
if not opt.no_gen_mri:
print('generate mri (test) ...')
for gen_idx in tqdm(range(opt.num_gen_mesh_test)):
test_data = random.choice(test_dataset)
save_path = '%s/%s/test_eval_epoch%d_%s.obj' % (
opt.results_path, opt.name, epoch, test_data['name'])
gen_mri(opt, netG, cuda, test_data, save_path)
print('generate mri (train) ...')
train_dataset.is_train = False
for gen_idx in tqdm(range(opt.num_gen_mesh_test)):
train_data = random.choice(train_dataset)
save_path = '%s/%s/train_eval_epoch%d_%s.obj' % (
opt.results_path, opt.name, epoch, train_data['name'])
gen_mri(opt, netG, cuda, train_data, save_path)
train_dataset.is_train = True
if opt.eval_only:
break
if __name__ == '__main__':
train(opt) |
the-stack_0_9423 | import rolls
class Atributes:
def __init__(self):
self.values = {
"strength": 10,
"dexterity": 10,
"constitution": 10,
"intelligence": 10,
"wisdom": 10,
"charisma": 10
}
self.modifiers = {
"strength": 0,
"dexterity": 0,
"constitution": 0,
"intelligence": 0,
"wisdom": 0,
"charisma": 0
}
self.maxValue = {
"strength": 20,
"dexterity": 20,
"constitution": 20,
"intelligence": 20,
"wisdom": 20,
"charisma": 20
}
def rollAtributes(self,min_val=1,max_val=6, maxAtributeValue=20, numberOfDice=4):
self.values = {
"strength": min(rolls.rollAtribute(min_val,max_val,numberOfDice),maxAtributeValue),
"dexterity": min(rolls.rollAtribute(min_val,max_val,numberOfDice),maxAtributeValue),
"constitution": min(rolls.rollAtribute(min_val,max_val,numberOfDice),maxAtributeValue),
"intelligence": min(rolls.rollAtribute(min_val,max_val,numberOfDice),maxAtributeValue),
"wisdom": min(rolls.rollAtribute(min_val,max_val,numberOfDice),maxAtributeValue),
"charisma": min(rolls.rollAtribute(min_val,max_val,numberOfDice),maxAtributeValue)
}
self.setModifiers()
def setAtribute(self,atribute,value):
try:
self.values[atribute] = value
self.setModifiers()
except:
print("bad selection")
def setAtributes(self,atributes):
if isinstance(atributes,dict):
# If you pass in atributes as a dictionary
for key in self.values.keys():
self.values[key] = atributes[key]
else:
# If you pass in atributes as a list
for k,key in enumerate(self.values.keys()):
self.values[key] = atributes[k]
self.setModifiers()
def setModifiers(self):
keys = self.modifiers.keys()
for key in keys:
self.modifiers[key] = (self.values[key] - 10)//2
def addClassMods(self, classMods):
for key in self.values.keys():
self.maxValue[key] = 20+classMods[key]
self.values[key] = min(self.maxValue[key], self.values[key]+classMods[key])
|
the-stack_0_9426 | # -*- coding: utf-8 -*-
#/usr/bin/python2
'''
By kyubyong park. [email protected].
https://www.github.com/kyubyong/kss
'''
from __future__ import print_function, division
import numpy as np
import librosa
import os, copy
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from scipy import signal
from hyperparams import Hyperparams as hp
import tensorflow as tf
def get_spectrograms(fpath):
'''Parse the wave file in `fpath` and
Returns normalized melspectrogram and linear spectrogram.
Args:
fpath: A string. The full path of a sound file.
Returns:
mel: A 2d array of shape (T, n_mels) and dtype of float32.
mag: A 2d array of shape (T, 1+n_fft/2) and dtype of float32.
'''
# Loading sound file
y, sr = librosa.load(fpath, sr=hp.sr)
# Trimming
y, _ = librosa.effects.trim(y, top_db=40)
# Preemphasis
y = np.append(y[0], y[1:] - hp.preemphasis * y[:-1])
# stft
linear = librosa.stft(y=y,
n_fft=hp.n_fft,
hop_length=hp.hop_length,
win_length=hp.win_length)
# magnitude spectrogram
mag = np.abs(linear) # (1+n_fft//2, T)
# mel spectrogram
mel_basis = librosa.filters.mel(hp.sr, hp.n_fft, hp.n_mels) # (n_mels, 1+n_fft//2)
mel = np.dot(mel_basis, mag) # (n_mels, t)
# to decibel
mel = 20 * np.log10(np.maximum(1e-5, mel))
mag = 20 * np.log10(np.maximum(1e-5, mag))
# normalize
mel = np.clip((mel - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)
mag = np.clip((mag - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)
# Transpose
mel = mel.T.astype(np.float32) # (T, n_mels)
mag = mag.T.astype(np.float32) # (T, 1+n_fft//2)
return mel, mag
def spectrogram2wav(mag):
'''# Generate wave file from linear magnitude spectrogram
Args:
mag: A numpy array of (T, 1+n_fft//2)
Returns:
wav: A 1-D numpy array.
'''
# transpose
mag = mag.T
# de-noramlize
mag = (np.clip(mag, 0, 1) * hp.max_db) - hp.max_db + hp.ref_db
# to amplitude
mag = np.power(10.0, mag * 0.05)
# wav reconstruction
wav = griffin_lim(mag**hp.power)
# de-preemphasis
wav = signal.lfilter([1], [1, -hp.preemphasis], wav)
# trim
wav = trim(wav)
return wav.astype(np.float32)
def griffin_lim(spectrogram):
'''Applies Griffin-Lim's raw.'''
X_best = copy.deepcopy(spectrogram)
for i in range(hp.n_iter):
X_t = invert_spectrogram(X_best)
est = librosa.stft(X_t, hp.n_fft, hp.hop_length, win_length=hp.win_length)
phase = est / np.maximum(1e-8, np.abs(est))
X_best = spectrogram * phase
X_t = invert_spectrogram(X_best)
y = np.real(X_t)
return y
def invert_spectrogram(spectrogram):
'''Applies inverse fft.
Args:
spectrogram: [1+n_fft//2, t]
'''
return librosa.istft(spectrogram, hp.hop_length, win_length=hp.win_length, window="hann")
def plot_alignment(alignment, gs, dir=hp.logdir):
"""Plots the alignment.
Args:
alignment: A numpy array with shape of (encoder_steps, decoder_steps)
gs: (int) global step.
dir: Output path.
"""
if not os.path.exists(dir): os.mkdir(dir)
fig, ax = plt.subplots()
im = ax.imshow(alignment)
fig.colorbar(im)
plt.title('{} Steps'.format(gs))
plt.savefig('{}/alignment_{}.png'.format(dir, gs), format='png')
def guided_attention(g=0.2):
'''Guided attention. Refer to page 3 on the paper.'''
W = np.zeros((hp.max_N, hp.max_T), dtype=np.float32)
for n_pos in range(W.shape[0]):
for t_pos in range(W.shape[1]):
W[n_pos, t_pos] = 1 - np.exp(-(t_pos / float(hp.max_T) - n_pos / float(hp.max_N)) ** 2 / (2 * g * g))
return W
def learning_rate_decay(init_lr, global_step, warmup_steps = 4000.0):
'''Noam scheme from tensor2tensor'''
step = tf.to_float(global_step + 1)
return init_lr * warmup_steps**0.5 * tf.minimum(step * warmup_steps**-1.5, step**-0.5)
def load_spectrograms(fpath):
'''Read the wave file in `fpath`
and extracts spectrograms'''
fname = os.path.basename(fpath)
mel, mag = get_spectrograms(fpath)
t = mel.shape[0]
# Marginal padding for reduction shape sync.
num_paddings = hp.r - (t % hp.r) if t % hp.r != 0 else 0
mel = np.pad(mel, [[0, num_paddings], [0, 0]], mode="constant")
mag = np.pad(mag, [[0, num_paddings], [0, 0]], mode="constant")
# Reduction
mel = mel[::hp.r, :]
return fname, mel, mag
#This is adapted by
# https://github.com/keithito/tacotron/blob/master/util/audio.py#L55-62
def trim(wav, top_db=40, min_silence_sec=0.8):
frame_length = int(hp.sr * min_silence_sec)
hop_length = int(frame_length / 4)
endpoint = librosa.effects.split(wav, frame_length=frame_length,
hop_length=hop_length,
top_db=top_db)[0, 1]
return wav[:endpoint]
def load_j2hcj():
'''
Arg:
jamo: A Hangul Jamo character(0x01100-0x011FF)
Returns:
A dictionary that converts jamo into Hangul Compatibility Jamo(0x03130 - 0x0318F) Character
'''
jamo = u'''␀␃ !,.?ᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄋᄌᄍᄎᄏᄐᄑ하ᅢᅣᅤᅥᅦᅧᅨᅩᅪᅫᅬᅭᅮᅯᅰᅱᅲᅳᅴᅵᆨᆩᆪᆫᆬᆭᆮᆯᆰᆱᆲᆴᆶᆷᆸᆹᆺᆻᆼᆽᆾᆿᇀᇁᇂ'''
hcj = u'''␀␃ !,.?ㄱㄲㄴㄷㄸㄹㅁㅂㅃㅅㅆㅇㅈㅉㅊㅋㅌㅍㅎㅏㅐㅑㅒㅓㅔㅕㅖㅗㅘㅙㅚㅛㅜㅝㅞㅟㅠㅡㅢㅣㄱㄲㄳㄴㄵㄶㄷㄹㄺㄻㄼㄾㅀㅁㅂㅄㅅㅆㅇㅈㅊㅋㅌㅍㅎ'''
assert len(jamo) == len(hcj)
j2hcj = {j: h for j, h in zip(jamo, hcj)}
return j2hcj
def load_j2sj():
'''
Arg:
jamo: A Hangul Jamo character(0x01100-0x011FF)
Returns:
A dictionary that decomposes double consonants into two single consonants.
'''
jamo = u'''␀␃ !,.?ᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄋᄌᄍᄎᄏᄐᄑ하ᅢᅣᅤᅥᅦᅧᅨᅩᅪᅫᅬᅭᅮᅯᅰᅱᅲᅳᅴᅵᆨᆩᆪᆫᆬᆭᆮᆯᆰᆱᆲᆴᆶᆷᆸᆹᆺᆻᆼᆽᆾᆿᇀᇁᇂ'''
sj = u'''␀|␃| |!|,|.|?|ᄀ|ᄀᄀ|ᄂ|ᄃ|ᄃᄃ|ᄅ|ᄆ|ᄇ|ᄇᄇ|ᄉ|ᄉᄉ|ᄋ|ᄌ|ᄌᄌ|ᄎ|ᄏ|ᄐ|ᄑ|ᄒ|ᅡ|ᅢ|ᅣ|ᅤ|ᅥ|ᅦ|ᅧ|ᅨ|ᅩ|ᅪ|ᅫ|ᅬ|ᅭ|ᅮ|ᅯ|ᅰ|ᅱ|ᅲ|ᅳ|ᅴ|ᅵ|ᆨ|ᆨᆨ|ᆨᆺ|ᆫ|ᆫᆽ|ᆫᇂ|ᆮ|ᆯ|ᆯᆨ|ᆯᆷ|ᆯᆸ|ᆯᇀ|ᆯᇂ|ᆷ|ᆸ|ᆸᆺ|ᆺ|ᆺᆺ|ᆼ|ᆽ|ᆾ|ᆿ|ᇀ|ᇁ|ᇂ'''
assert len(jamo)==len(sj.split("|"))
j2sj = {j: s for j, s in zip(jamo, sj.split("|"))}
return j2sj
def load_j2shcj():
'''
Arg:
jamo: A Hangul Jamo character(0x01100-0x011FF)
Returns:
A dictionary that converts jamo into Hangul Compatibility Jamo(0x03130 - 0x0318F) Character.
Double consonants are further decomposed into single consonants.
'''
jamo = u'''␀␃ !,.?ᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄋᄌᄍᄎᄏᄐᄑ하ᅢᅣᅤᅥᅦᅧᅨᅩᅪᅫᅬᅭᅮᅯᅰᅱᅲᅳᅴᅵᆨᆩᆪᆫᆬᆭᆮᆯᆰᆱᆲᆴᆶᆷᆸᆹᆺᆻᆼᆽᆾᆿᇀᇁᇂ'''
shcj = u'''␀|␃| |!|,|.|?|ㄱ|ㄱㄱ|ㄴ|ㄷ|ㄷㄷ|ㄹ|ㅁ|ㅂ|ㅂㅂ|ㅅ|ㅅㅅ|ㅇ|ㅈ|ㅈㅈ|ㅊ|ㅋ|ㅌ|ㅍ|ㅎ|ㅏ|ㅐ|ㅑ|ㅒ|ㅓ|ㅔ|ㅕ|ㅖ|ㅗ|ㅘ|ㅙ|ㅚ|ㅛ|ㅜ|ㅝ|ㅞ|ㅟ|ㅠ|ㅡ|ㅢ|ㅣ|ㄱ|ㄱㄱ|ㄱㅅ|ㄴ|ㄴㅈ|ㄴㅎ|ㄷ|ㄹ|ㄹㄱ|ㄹㅁ|ㄹㅂ|ㄹㅌ|ㄹㅎ|ㅁ|ㅂ|ㅂㅅ|ㅅ|ㅅㅅ|ㅇ|ㅈ|ㅊ|ㅋ|ㅌ|ㅍ|ㅎ'''
assert len(jamo)==len(shcj.split("|"))
j2shcj = {j: s for j, s in zip(jamo, shcj.split("|"))}
return j2shcj
|
the-stack_0_9427 | #
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, [email protected].
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
Tools for assembly module.
"""
__all__ = [
'get_index_of_csr_data',
'fill_csr_matrix'
]
# try to import the fortran routines
use_fortran = False
try:
import amfe.f90_assembly
use_fortran = True
except ImportError:
print('Python was not able to load the fast fortran assembly routines.')
def get_index_of_csr_data(i, j, indptr, indices):
"""
Get the value index of the i,j-element of a matrix in CSR format.
Parameters
----------
i : int
row index which is asked to get the CSR-index for
j : int
column index which is asked to get the CSR-index for
indptr : ndarray
index-ptr-Array of the CSR-Matrix.
indices : ndarray
indices array of CSR-matrix (represents the nonzero column indices)
Returns
-------
k : int
index of the value array of the CSR-matrix, in which value [i,j] is stored.
Notes
-----
This routine works only, if the tuple i,j is acutally a real entry of the matrix. Otherwise the value k=0 will be
returned and an Error Message will be provided.
"""
# indices for row i are stored in indices[indptr[k]:indptr[k+1]]; thus the indptr marks the start and end of the
# part of the indices and val vector where all entries of a row are stored
# set k to the start of data of row k
k = indptr[i]
# search for appearance of j in the nonzero column indices which are stored in indices[k] till
# indices[k+indptr[i+1]]
while j != indices[k]:
# while column j not found search for j in next entry
k += 1
# Check if next search would be in next (wrong) row
if k > indptr[i + 1]:
print('ERROR! The index in the csr matrix is not preallocated!')
k = 0
break
return k
def fill_csr_matrix(indptr, indices, vals, K, k_indices):
"""
Fill the values of K into the vals-array of a sparse CSR Matrix given the k_indices array. The values of K are
added to the current values (typically for assembly processes)
Parameters
----------
indptr : ndarray
indptr-array of a preallocated CSR-Matrix
indices : ndarray
indices-array of a preallocated CSR-Matrix
vals : ndarray
vals-array of a preallocated CSR-Marix
K : ndarray
'small' square array whose values will be distributed into the
CSR-Matrix, Shape is (n,n)
k_indices : ndarray
mapping array of the global indices for the 'small' K array.
The (i,j) entry of K has the global indices (k_indices[i], k_indices[j])
Shape is (n,)
Returns
-------
None
"""
ndof_l = K.shape[0]
for i in range(ndof_l):
for j in range(ndof_l):
l = get_index_of_csr_data(k_indices[i], k_indices[j], indptr, indices)
vals[l] += K[i, j]
return
if use_fortran:
###########################################################################
# Fortran routine that will override the functions above for massive speedup.
###########################################################################
get_index_of_csr_data = amfe.f90_assembly.get_index_of_csr_data
fill_csr_matrix = amfe.f90_assembly.fill_csr_matrix
|
the-stack_0_9429 | # Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions
from highlanderclient.tests.functional.cli.v1 import base_v1
class StandardItemsAvailabilityCLITests(base_v1.HighlanderClientTestBase):
def test_std_workflows_availability(self):
wfs = self.highlander_admin("workflow-list")
self.assertTableStruct(
wfs,
["Name", "Tags", "Input", "Created at", "Updated at"]
)
self.assertIn("std.create_instance",
[workflow["Name"] for workflow in wfs])
wfs = self.highlander_alt_user("workflow-list")
self.assertTableStruct(
wfs,
["Name", "Tags", "Input", "Created at", "Updated at"]
)
self.assertIn("std.create_instance",
[workflow["Name"] for workflow in wfs])
def test_std_actions_availability(self):
acts = self.highlander_admin("action-list")
self.assertTableStruct(
acts,
["Name", "Is system", "Input", "Description",
"Tags", "Created at", "Updated at"]
)
self.assertIn("glance.images_list",
[action["Name"] for action in acts])
acts = self.highlander_alt_user("action-list")
self.assertTableStruct(
acts,
["Name", "Is system", "Input", "Description",
"Tags", "Created at", "Updated at"]
)
self.assertIn("glance.images_list",
[action["Name"] for action in acts])
class WorkbookIsolationCLITests(base_v1.HighlanderClientTestBase):
def test_workbook_name_uniqueness(self):
self.workbook_create(self.wb_def)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_admin,
"workbook-create",
params="{0}".format(self.wb_def)
)
self.workbook_create(self.wb_def, admin=False)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"workbook-create",
params="{0}".format(self.wb_def)
)
def test_wb_isolation(self):
wb = self.workbook_create(self.wb_def)
wb_name = self.get_value_of_field(wb, "Name")
wbs = self.highlander_admin("workbook-list")
self.assertIn(wb_name, [w["Name"] for w in wbs])
alt_wbs = self.highlander_alt_user("workbook-list")
self.assertNotIn(wb_name, [w["Name"] for w in alt_wbs])
def test_get_wb_from_another_tenant(self):
wb = self.workbook_create(self.wb_def)
name = self.get_value_of_field(wb, "Name")
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"workbook-get",
params=name
)
def test_delete_wb_from_another_tenant(self):
wb = self.workbook_create(self.wb_def)
name = self.get_value_of_field(wb, "Name")
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"workbook-delete",
params=name
)
class WorkflowIsolationCLITests(base_v1.HighlanderClientTestBase):
def test_workflow_name_uniqueness(self):
self.workflow_create(self.wf_def)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_admin,
"workflow-create",
params="{0}".format(self.wf_def)
)
self.workflow_create(self.wf_def, admin=False)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"workflow-create",
params="{0}".format(self.wf_def)
)
def test_wf_isolation(self):
wf = self.workflow_create(self.wf_def)
wfs = self.highlander_admin("workflow-list")
self.assertIn(wf[0]["Name"], [w["Name"] for w in wfs])
alt_wfs = self.highlander_alt_user("workflow-list")
self.assertNotIn(wf[0]["Name"], [w["Name"] for w in alt_wfs])
def test_get_wf_from_another_tenant(self):
wf = self.workflow_create(self.wf_def)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"workflow-get",
params=wf[0]["Name"]
)
def test_delete_wf_from_another_tenant(self):
wf = self.workflow_create(self.wf_def)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"workflow-delete",
params=wf[0]["Name"]
)
class ActionIsolationCLITests(base_v1.HighlanderClientTestBase):
def test_actions_name_uniqueness(self):
self.action_create(self.act_def)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_admin,
"action-create",
params="{0}".format(self.act_def)
)
self.action_create(self.act_def, admin=False)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"action-create",
params="{0}".format(self.act_def)
)
def test_action_isolation(self):
act = self.action_create(self.act_def)
acts = self.highlander_admin("action-list")
self.assertIn(act[0]["Name"], [a["Name"] for a in acts])
alt_acts = self.highlander_alt_user("action-list")
self.assertNotIn(act[0]["Name"], [a["Name"] for a in alt_acts])
def test_get_action_from_another_tenant(self):
act = self.action_create(self.act_def)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"action-get",
params=act[0]["Name"]
)
def test_delete_action_from_another_tenant(self):
act = self.action_create(self.act_def)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"action-delete",
params=act[0]["Name"]
)
class CronTriggerIsolationCLITests(base_v1.HighlanderClientTestBase):
def test_cron_trigger_name_uniqueness(self):
wf = self.workflow_create(self.wf_def)
self.cron_trigger_create(
"trigger", wf[0]["Name"], "{}", "5 * * * *")
self.assertRaises(
exceptions.CommandFailed,
self.cron_trigger_create,
"trigger",
"5 * * * *",
wf[0]["Name"],
"{}"
)
wf = self.workflow_create(self.wf_def, admin=False)
self.cron_trigger_create("trigger", wf[0]["Name"], "{}", "5 * * * *",
None, None, admin=False)
self.assertRaises(
exceptions.CommandFailed,
self.cron_trigger_create,
"trigger", wf[0]["Name"], "{}", "5 * * * *",
None, None, admin=False
)
def test_cron_trigger_isolation(self):
wf = self.workflow_create(self.wf_def)
self.cron_trigger_create(
"trigger", wf[0]["Name"], "{}", "5 * * * *")
alt_trs = self.highlander_alt_user("cron-trigger-list")
self.assertNotIn("trigger", [t["Name"] for t in alt_trs])
class ExecutionIsolationCLITests(base_v1.HighlanderClientTestBase):
def test_execution_isolation(self):
wf = self.workflow_create(self.wf_def)
ex = self.execution_create(wf[0]["Name"])
exec_id = self.get_value_of_field(ex, "ID")
execs = self.highlander_admin("execution-list")
self.assertIn(exec_id, [e["ID"] for e in execs])
alt_execs = self.highlander_alt_user("execution-list")
self.assertNotIn(exec_id, [e["ID"] for e in alt_execs])
def test_get_execution_from_another_tenant(self):
wf = self.workflow_create(self.wf_def)
ex = self.execution_create(wf[0]["Name"])
exec_id = self.get_value_of_field(ex, "ID")
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"execution-get",
params=exec_id
)
class EnvironmentIsolationCLITests(base_v1.HighlanderClientTestBase):
def setUp(self):
super(EnvironmentIsolationCLITests, self).setUp()
self.env_file = "env.yaml"
self.create_file("{0}".format(self.env_file),
"name: env\n"
"description: Test env\n"
"variables:\n"
" var: value")
def test_environment_name_uniqueness(self):
self.environment_create(self.env_file)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_admin,
"environment-create",
params=self.env_file
)
self.environment_create(self.env_file, admin=False)
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"environment-create",
params=self.env_file
)
def test_environment_isolation(self):
env = self.environment_create(self.env_file)
env_name = self.get_value_of_field(env, "Name")
envs = self.highlander_admin("environment-list")
self.assertIn(env_name, [en["Name"] for en in envs])
alt_envs = self.highlander_alt_user("environment-list")
self.assertNotIn(env_name, [en["Name"] for en in alt_envs])
def test_get_env_from_another_tenant(self):
env = self.environment_create(self.env_file)
env_name = self.get_value_of_field(env, "Name")
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"environment-get",
params=env_name
)
def test_delete_env_from_another_tenant(self):
env = self.environment_create(self.env_file)
env_name = self.get_value_of_field(env, "Name")
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"environment-delete",
params=env_name
)
class ActionExecutionIsolationCLITests(base_v1.HighlanderClientTestBase):
def test_action_execution_isolation(self):
wf = self.workflow_create(self.wf_def)
wf_exec = self.execution_create(wf[0]["Name"])
direct_ex_id = self.get_value_of_field(wf_exec, 'ID')
self.wait_execution_success(direct_ex_id)
act_execs = self.highlander_admin("action-execution-list")
self.assertIn(wf[0]["Name"],
[act["Workflow name"] for act in act_execs])
alt_act_execs = self.highlander_alt_user("action-execution-list")
self.assertNotIn(wf[0]["Name"],
[act["Workflow name"] for act in alt_act_execs])
def test_get_action_execution_from_another_tenant(self):
wf = self.workflow_create(self.wf_def)
ex = self.execution_create(wf[0]["Name"])
exec_id = self.get_value_of_field(ex, "ID")
self.assertRaises(
exceptions.CommandFailed,
self.highlander_alt_user,
"action-execution-get",
params=exec_id
)
|
the-stack_0_9430 | import sys
import ASAPPpy.feature_extraction as fe
import ASAPPpy.chatbot as cht
from importlib import reload
word2vec_model = None
fasttext_model = None
ptlkb64_model = None
glove300_model = None
numberbatch_model = None
if __name__ == "__main__":
models_loaded = 0
while True:
if models_loaded == 0:
word2vec_model, fasttext_model, ptlkb64_model, glove300_model, numberbatch_model = fe.load_embeddings_models()
models_loaded = 1
cht.chatbot(word2vec_model=word2vec_model, fasttext_model=fasttext_model, ptlkb64_model=ptlkb64_model, glove300_model=glove300_model, numberbatch_model=numberbatch_model)
print("Press enter to re-run the script, CTRL-C to exit")
sys.stdin.readline()
reload(cht)
|
the-stack_0_9433 | """ Module to run the example files and report their success/failure results
Add a function to the ExampleTest class corresponding to an example script to
be tested.
This is done till better strategy for parallel testing is implemented
"""
from pytest import mark
from .example_test_case import ExampleTestCase, get_example_script
from pysph.base.nnps import get_number_of_threads
@mark.skipif(get_number_of_threads() == 1, reason= "N_threads=1; OpenMP does not seem available.")
class TestOpenMPExamples(ExampleTestCase):
@mark.slow
def test_3Ddam_break_example(self):
dt = 2e-5; tf = 13*dt
serial_kwargs = dict(
timestep=dt, tf=tf, pfreq=100, test=None
)
extra_parallel_kwargs = dict(openmp=None)
# Note that we set nprocs=1 here since we do not want
# to run this with mpirun.
self.run_example(
get_example_script('sphysics/dambreak_sphysics.py'),
nprocs=1, atol=1e-14,
serial_kwargs=serial_kwargs,
extra_parallel_kwargs=extra_parallel_kwargs
)
@mark.slow
def test_elliptical_drop_example(self):
tf = 0.0076*0.25
serial_kwargs = dict(kernel='CubicSpline', tf=tf)
extra_parallel_kwargs = dict(openmp=None)
# Note that we set nprocs=1 here since we do not want
# to run this with mpirun.
self.run_example(
'elliptical_drop.py', nprocs=1, atol=1e-14,
serial_kwargs=serial_kwargs,
extra_parallel_kwargs=extra_parallel_kwargs
)
def test_ldcavity_example(self):
dt=1e-4; tf=200*dt
serial_kwargs = dict(timestep=dt, tf=tf, pfreq=500)
extra_parallel_kwargs = dict(openmp=None)
# Note that we set nprocs=1 here since we do not want
# to run this with mpirun.
self.run_example(
'cavity.py', nprocs=1, atol=1e-14,
serial_kwargs=serial_kwargs,
extra_parallel_kwargs=extra_parallel_kwargs
)
|
the-stack_0_9434 | # coding: utf-8
from __future__ import unicode_literals
from ..utils import month_by_name
from .common import InfoExtractor
class FranceInterIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?franceinter\.fr/emissions/(?P<id>[^?#]+)"
_TEST = {
"url": "https://www.franceinter.fr/emissions/affaires-sensibles/affaires-sensibles-07-septembre-2016",
"md5": "9e54d7bdb6fdc02a841007f8a975c094",
"info_dict": {
"id": "affaires-sensibles/affaires-sensibles-07-septembre-2016",
"ext": "mp3",
"title": "Affaire Cahuzac : le contentieux du compte en Suisse",
"description": "md5:401969c5d318c061f86bda1fa359292b",
"thumbnail": r"re:^https?://.*\.jpg",
"upload_date": "20160907",
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r'(?s)<div[^>]+class=["\']page-diffusion["\'][^>]*>.*?<button[^>]+data-url=(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage,
"video url",
group="url",
)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._html_search_meta(["og:image", "twitter:image"], webpage)
upload_date_str = self._search_regex(
r'class=["\']\s*cover-emission-period\s*["\'][^>]*>[^<]+\s+(\d{1,2}\s+[^\s]+\s+\d{4})<',
webpage,
"upload date",
fatal=False,
)
if upload_date_str:
upload_date_list = upload_date_str.split()
upload_date_list.reverse()
upload_date_list[1] = "%02d" % (
month_by_name(upload_date_list[1], lang="fr") or 0
)
upload_date_list[2] = "%02d" % int(upload_date_list[2])
upload_date = "".join(upload_date_list)
else:
upload_date = None
return {
"id": video_id,
"title": title,
"description": description,
"thumbnail": thumbnail,
"upload_date": upload_date,
"formats": [
{
"url": video_url,
"vcodec": "none",
}
],
}
|
the-stack_0_9435 | """ Tensorflow implementation of the face detection / alignment algorithm found at
https://github.com/kpzhang93/MTCNN_face_detection_alignment
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types, iteritems
import numpy as np
import tensorflow as tf
#from math import floor
import cv2
import os
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path, encoding='latin1').item() #pylint: disable=no-member
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iteritems(data_dict[op_name]):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, string_types):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
inp,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding='SAME',
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = int(inp.get_shape()[-1])
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
with tf.variable_scope(name):
i = int(inp.get_shape()[-1])
alpha = self.make_var('alpha', shape=(i,))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):
self.validate_padding(padding)
return tf.nn.max_pool(inp,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def fc(self, inp, num_out, name, relu=True):
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= int(d)
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=name)
return fc
"""
Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension
"""
@layer
def softmax(self, target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keep_dims=True)
target_exp = tf.exp(target-max_axis)
normalize = tf.reduce_sum(target_exp, axis, keep_dims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='PReLU1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='PReLU2')
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='PReLU3')
.conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
.softmax(3,name='prob1'))
(self.feed('PReLU3') #pylint: disable=no-value-for-parameter
.conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
class RNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.fc(128, relu=False, name='conv4')
.prelu(name='prelu4')
.fc(2, relu=False, name='conv5-1')
.softmax(1,name='prob1'))
(self.feed('prelu4') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv5-2'))
class ONet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
.prelu(name='prelu4')
.fc(256, relu=False, name='conv5')
.prelu(name='prelu5')
.fc(2, relu=False, name='conv6-1')
.softmax(1, name='prob1'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv6-2'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(10, relu=False, name='conv6-3'))
def create_mtcnn(sess, model_path):
if not model_path:
model_path,_ = os.path.split(os.path.realpath(__file__))
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
pnet = PNet({'data':data})
pnet.load(os.path.join(model_path, 'det1.npy'), sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
rnet = RNet({'data':data})
rnet.load(os.path.join(model_path, 'det2.npy'), sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
onet = ONet({'data':data})
onet.load(os.path.join(model_path, 'det3.npy'), sess)
pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img})
rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
# im: input image
# minsize: minimum of faces' size
# pnet, rnet, onet: caffemodel
# threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold
# fastresize: resize img from last scale (using in high-resolution images) if fastresize==true
factor_count=0
total_boxes=np.empty((0,9))
points=np.empty(0)
h=img.shape[0]
w=img.shape[1]
minl=np.amin([h, w])
m=12.0/minsize
minl=minl*m
# creat scale pyramid
scales=[]
while minl>=12:
scales += [m*np.power(factor, factor_count)]
minl = minl*factor
factor_count += 1
# first stage
for j in range(len(scales)):
scale=scales[j]
hs=int(np.ceil(h*scale))
ws=int(np.ceil(w*scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data-127.5)*0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0,2,1,3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0,2,1,3))
out1 = np.transpose(out[1], (0,2,1,3))
boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size>0 and pick.size>0:
boxes = boxes[pick,:]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox>0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick,:]
regw = total_boxes[:,2]-total_boxes[:,0]
regh = total_boxes[:,3]-total_boxes[:,1]
qq1 = total_boxes[:,0]+total_boxes[:,5]*regw
qq2 = total_boxes[:,1]+total_boxes[:,6]*regh
qq3 = total_boxes[:,2]+total_boxes[:,7]*regw
qq4 = total_boxes[:,3]+total_boxes[:,8]*regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:,4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox>0:
# second stage
tempimg = np.zeros((24,24,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1,:]
ipass = np.where(score>threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
if total_boxes.shape[0]>0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick,:]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:,pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox>0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48,48,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1,:]
points = out1
ipass = np.where(score>threshold[2])
points = points[:,ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
w = total_boxes[:,2]-total_boxes[:,0]+1
h = total_boxes[:,3]-total_boxes[:,1]+1
points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1
points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1
if total_boxes.shape[0]>0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick,:]
points = points[:,pick]
return total_boxes, points
def bulk_detect_face(images, detection_window_size_ratio, pnet, rnet, onet, threshold, factor):
# im: input image
# minsize: minimum of faces' size
# pnet, rnet, onet: caffemodel
# threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold [0-1]
all_scales = [None] * len(images)
images_with_boxes = [None] * len(images)
for i in range(len(images)):
images_with_boxes[i] = {'total_boxes': np.empty((0, 9))}
# create scale pyramid
for index, img in enumerate(images):
all_scales[index] = []
h = img.shape[0]
w = img.shape[1]
minsize = int(detection_window_size_ratio * np.minimum(w, h))
factor_count = 0
minl = np.amin([h, w])
if minsize <= 12:
minsize = 12
m = 12.0 / minsize
minl = minl * m
while minl >= 12:
all_scales[index].append(m * np.power(factor, factor_count))
minl = minl * factor
factor_count += 1
# # # # # # # # # # # # #
# first stage - fast proposal network (pnet) to obtain face candidates
# # # # # # # # # # # # #
images_obj_per_resolution = {}
# TODO: use some type of rounding to number module 8 to increase probability that pyramid images will have the same resolution across input images
for index, scales in enumerate(all_scales):
h = images[index].shape[0]
w = images[index].shape[1]
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
if (ws, hs) not in images_obj_per_resolution:
images_obj_per_resolution[(ws, hs)] = []
im_data = imresample(images[index], (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_y = np.transpose(im_data, (1, 0, 2)) # caffe uses different dimensions ordering
images_obj_per_resolution[(ws, hs)].append({'scale': scale, 'image': img_y, 'index': index})
for resolution in images_obj_per_resolution:
images_per_resolution = [i['image'] for i in images_obj_per_resolution[resolution]]
outs = pnet(images_per_resolution)
for index in range(len(outs[0])):
scale = images_obj_per_resolution[resolution][index]['scale']
image_index = images_obj_per_resolution[resolution][index]['index']
out0 = np.transpose(outs[0][index], (1, 0, 2))
out1 = np.transpose(outs[1][index], (1, 0, 2))
boxes, _ = generateBoundingBox(out1[:, :, 1].copy(), out0[:, :, :].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
images_with_boxes[image_index]['total_boxes'] = np.append(images_with_boxes[image_index]['total_boxes'],
boxes,
axis=0)
for index, image_obj in enumerate(images_with_boxes):
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
regw = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0]
regh = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1]
qq1 = image_obj['total_boxes'][:, 0] + image_obj['total_boxes'][:, 5] * regw
qq2 = image_obj['total_boxes'][:, 1] + image_obj['total_boxes'][:, 6] * regh
qq3 = image_obj['total_boxes'][:, 2] + image_obj['total_boxes'][:, 7] * regw
qq4 = image_obj['total_boxes'][:, 3] + image_obj['total_boxes'][:, 8] * regh
image_obj['total_boxes'] = np.transpose(np.vstack([qq1, qq2, qq3, qq4, image_obj['total_boxes'][:, 4]]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
image_obj['total_boxes'][:, 0:4] = np.fix(image_obj['total_boxes'][:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
numbox = image_obj['total_boxes'].shape[0]
tempimg = np.zeros((24, 24, 3, numbox))
if numbox > 0:
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['rnet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
# # # # # # # # # # # # #
# second stage - refinement of face candidates with rnet
# # # # # # # # # # # # #
bulk_rnet_input = np.empty((0, 24, 24, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' in image_obj:
bulk_rnet_input = np.append(bulk_rnet_input, image_obj['rnet_input'], axis=0)
out = rnet(bulk_rnet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
i = 0
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' not in image_obj:
continue
rnet_input_count = image_obj['rnet_input'].shape[0]
score_per_image = score[i:i + rnet_input_count]
out0_per_image = out0[:, i:i + rnet_input_count]
ipass = np.where(score_per_image > threshold[1])
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
if image_obj['total_boxes'].shape[0] > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'], 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv[:, pick]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
tempimg = np.zeros((48, 48, 3, numbox))
image_obj['total_boxes'] = np.fix(image_obj['total_boxes']).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['onet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
i += rnet_input_count
# # # # # # # # # # # # #
# third stage - further refinement and facial landmarks positions with onet
# # # # # # # # # # # # #
bulk_onet_input = np.empty((0, 48, 48, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' in image_obj:
bulk_onet_input = np.append(bulk_onet_input, image_obj['onet_input'], axis=0)
out = onet(bulk_onet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
i = 0
ret = []
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' not in image_obj:
ret.append(None)
continue
onet_input_count = image_obj['onet_input'].shape[0]
out0_per_image = out0[:, i:i + onet_input_count]
score_per_image = score[i:i + onet_input_count]
points_per_image = points[:, i:i + onet_input_count]
ipass = np.where(score_per_image > threshold[2])
points_per_image = points_per_image[:, ipass[0]]
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
w = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0] + 1
h = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1] + 1
points_per_image[0:5, :] = np.tile(w, (5, 1)) * points_per_image[0:5, :] + np.tile(
image_obj['total_boxes'][:, 0], (5, 1)) - 1
points_per_image[5:10, :] = np.tile(h, (5, 1)) * points_per_image[5:10, :] + np.tile(
image_obj['total_boxes'][:, 1], (5, 1)) - 1
if image_obj['total_boxes'].shape[0] > 0:
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv))
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Min')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
points_per_image = points_per_image[:, pick]
ret.append((image_obj['total_boxes'], points_per_image))
else:
ret.append(None)
i += onet_input_count
return ret
# function [boundingbox] = bbreg(boundingbox,reg)
def bbreg(boundingbox,reg):
# calibrate bounding boxes
if reg.shape[1]==1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:,2]-boundingbox[:,0]+1
h = boundingbox[:,3]-boundingbox[:,1]+1
b1 = boundingbox[:,0]+reg[:,0]*w
b2 = boundingbox[:,1]+reg[:,1]*h
b3 = boundingbox[:,2]+reg[:,2]*w
b4 = boundingbox[:,3]+reg[:,3]*h
boundingbox[:,0:4] = np.transpose(np.vstack([b1, b2, b3, b4 ]))
return boundingbox
def generateBoundingBox(imap, reg, scale, t):
# use heatmap to generate bounding boxes
stride=2
cellsize=12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:,:,0])
dy1 = np.transpose(reg[:,:,1])
dx2 = np.transpose(reg[:,:,2])
dy2 = np.transpose(reg[:,:,3])
y, x = np.where(imap >= t)
if y.shape[0]==1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y,x)]
reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ]))
if reg.size==0:
reg = np.empty((0,3))
bb = np.transpose(np.vstack([y,x]))
q1 = np.fix((stride*bb+1)/scale)
q2 = np.fix((stride*bb+cellsize-1+1)/scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg])
return boundingbox, reg
# function pick = nms(boxes,threshold,type)
def nms(boxes, threshold, method):
if boxes.size==0:
return np.empty((0,3))
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = (x2-x1+1) * (y2-y1+1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size>0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o<=threshold)]
pick = pick[0:counter]
return pick
# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h)
def pad(total_boxes, w, h):
# compute the padding coordinates (pad the bounding boxes to square)
tmpw = (total_boxes[:,2]-total_boxes[:,0]+1).astype(np.int32)
tmph = (total_boxes[:,3]-total_boxes[:,1]+1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:,0].copy().astype(np.int32)
y = total_boxes[:,1].copy().astype(np.int32)
ex = total_boxes[:,2].copy().astype(np.int32)
ey = total_boxes[:,3].copy().astype(np.int32)
tmp = np.where(ex>w)
edx.flat[tmp] = np.expand_dims(-ex[tmp]+w+tmpw[tmp],1)
ex[tmp] = w
tmp = np.where(ey>h)
edy.flat[tmp] = np.expand_dims(-ey[tmp]+h+tmph[tmp],1)
ey[tmp] = h
tmp = np.where(x<1)
dx.flat[tmp] = np.expand_dims(2-x[tmp],1)
x[tmp] = 1
tmp = np.where(y<1)
dy.flat[tmp] = np.expand_dims(2-y[tmp],1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
# function [bboxA] = rerec(bboxA)
def rerec(bboxA):
# convert bboxA to square
h = bboxA[:,3]-bboxA[:,1]
w = bboxA[:,2]-bboxA[:,0]
l = np.maximum(w, h)
bboxA[:,0] = bboxA[:,0]+w*0.5-l*0.5
bboxA[:,1] = bboxA[:,1]+h*0.5-l*0.5
bboxA[:,2:4] = bboxA[:,0:2] + np.transpose(np.tile(l,(2,1)))
return bboxA
def imresample(img, sz):
im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable
return im_data
# This method is kept for debugging purpose
# h=img.shape[0]
# w=img.shape[1]
# hs, ws = sz
# dx = float(w) / ws
# dy = float(h) / hs
# im_data = np.zeros((hs,ws,3))
# for a1 in range(0,hs):
# for a2 in range(0,ws):
# for a3 in range(0,3):
# im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
# return im_data
|
the-stack_0_9436 | #!/usr/bin/python
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Runs csmith, a C fuzzer, and looks for bugs.
CSMITH_PATH should be set to something like /usr/local/include/csmith
"""
import os
import sys
import shutil
import random
from distutils.spawn import find_executable
from subprocess import check_call, Popen, PIPE, CalledProcessError
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(script_dir))))
from tools import shared
# can add flags like --no-threads --ion-offthread-compile=off
engine = eval('shared.' + sys.argv[1]) if len(sys.argv) > 1 else shared.JS_ENGINES[0]
print('testing js engine', engine)
TEST_BINARYEN = 1
CSMITH = os.environ.get('CSMITH', find_executable('csmith'))
assert CSMITH, 'Could not find CSmith on your PATH. Please set the environment variable CSMITH.'
CSMITH_PATH = os.environ.get('CSMITH_PATH', '/usr/include/csmith')
assert os.path.exists(CSMITH_PATH), 'Please set the environment variable CSMITH_PATH.'
CSMITH_CFLAGS = ['-I', CSMITH_PATH]
filename = os.path.join(os.getcwd(), 'temp_fuzzcode' + str(os.getpid()) + '_')
shared.DEFAULT_TIMEOUT = 5
tried = 0
notes = {'invalid': 0, 'embug': 0}
fails = 0
while 1:
if random.random() < 0.666:
opts = '-O' + str(random.randint(0, 3))
else:
if random.random() < 0.5:
opts = '-Os'
else:
opts = '-Oz'
print('opt level:', opts)
llvm_opts = []
if random.random() < 0.5:
llvm_opts = ['--llvm-opts', str(random.randint(0, 3))]
print('Tried %d, notes: %s' % (tried, notes))
print('1) Generate source')
extra_args = []
if random.random() < 0.5:
extra_args += ['--no-math64']
extra_args += ['--no-bitfields'] # due to pnacl bug 4027, "LLVM ERROR: can't convert calls with illegal types"
# if random.random() < 0.5: extra_args += ['--float'] # XXX hits undefined behavior on float=>int conversions (too big to fit)
if random.random() < 0.5:
extra_args += ['--max-funcs', str(random.randint(10, 30))]
suffix = '.c'
COMP = shared.CLANG_CC
fullname = filename + suffix
check_call([CSMITH, '--no-volatiles', '--no-packed-struct'] + extra_args,
# ['--max-block-depth', '2', '--max-block-size', '2', '--max-expr-complexity', '2', '--max-funcs', '2'],
stdout=open(fullname, 'w'))
print('1) Generate source... %.2f K' % (len(open(fullname).read()) / 1024.))
tried += 1
print('2) Compile natively')
shared.try_delete(filename)
try:
shared.run_process([COMP, '-m32', opts, fullname, '-o', filename + '1'] + CSMITH_CFLAGS + ['-w']) # + shared.get_cflags()
except CalledProcessError:
print('Failed to compile natively using clang')
notes['invalid'] += 1
continue
shared.run_process([COMP, '-m32', opts, '-emit-llvm', '-c', fullname, '-o', filename + '.bc'] + CSMITH_CFLAGS + shared.get_cflags() + ['-w'])
shared.run_process([shared.path_from_root('tools', 'nativize_llvm.py'), filename + '.bc'], stderr=PIPE)
shutil.move(filename + '.bc.run', filename + '2')
shared.run_process([COMP, fullname, '-o', filename + '3'] + CSMITH_CFLAGS + ['-w'])
print('3) Run natively')
try:
correct1 = shared.timeout_run(Popen([filename + '1'], stdout=PIPE, stderr=PIPE), 3)
if 'Segmentation fault' in correct1 or len(correct1) < 10:
raise Exception('segfault')
correct2 = shared.timeout_run(Popen([filename + '2'], stdout=PIPE, stderr=PIPE), 3)
if 'Segmentation fault' in correct2 or len(correct2) < 10:
raise Exception('segfault')
correct3 = shared.timeout_run(Popen([filename + '3'], stdout=PIPE, stderr=PIPE), 3)
if 'Segmentation fault' in correct3 or len(correct3) < 10:
raise Exception('segfault')
if correct1 != correct3:
raise Exception('clang opts change result')
except Exception as e:
print('Failed or infinite looping in native, skipping', e)
notes['invalid'] += 1
continue
fail_output_name = 'newfail_%d_%d%s' % (os.getpid(), fails, suffix)
print('4) Compile JS-ly and compare')
def try_js(args=[]):
shared.try_delete(filename + '.js')
js_args = [shared.EMCC, fullname, '-o', filename + '.js'] + [opts] + llvm_opts + CSMITH_CFLAGS + args + ['-w']
if TEST_BINARYEN:
if random.random() < 0.5:
js_args += ['-g']
if random.random() < 0.5:
# pick random passes
BINARYEN_EXTRA_PASSES = [
"code-pushing",
"duplicate-function-elimination",
"dce",
"remove-unused-brs",
"remove-unused-names",
"local-cse",
"optimize-instructions",
"post-emscripten",
"precompute",
"simplify-locals",
"simplify-locals-nostructure",
"vacuum",
"coalesce-locals",
"reorder-locals",
"merge-blocks",
"remove-unused-module-elements",
"memory-packing",
]
passes = []
while 1:
passes.append(random.choice(BINARYEN_EXTRA_PASSES))
if random.random() < 0.1:
break
js_args += ['-s', 'BINARYEN_EXTRA_PASSES="' + ','.join(passes) + '"']
if random.random() < 0.5:
js_args += ['-s', 'ALLOW_MEMORY_GROWTH=1']
if random.random() < 0.5 and 'ALLOW_MEMORY_GROWTH=1' not in js_args and 'BINARYEN=1' not in js_args:
js_args += ['-s', 'MAIN_MODULE=1']
if random.random() < 0.25:
js_args += ['-s', 'INLINING_LIMIT=1'] # inline nothing, for more call interaction
if random.random() < 0.5:
js_args += ["--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2"]
if random.random() < 0.5:
js_args += ['-s', 'ASSERTIONS=1']
print('(compile)', ' '.join(js_args))
short_args = [shared.EMCC, fail_output_name] + js_args[5:]
escaped_short_args = map(lambda x: ("'" + x + "'") if '"' in x else x, short_args)
open(fullname, 'a').write('\n// ' + ' '.join(escaped_short_args) + '\n\n')
try:
shared.run_process(js_args)
assert os.path.exists(filename + '.js')
return js_args
except Exception:
return False
def execute_js(engine):
print('(run in %s)' % engine)
try:
js = shared.timeout_run(Popen(shared.NODE_JS + [filename + '.js'], stdout=PIPE, stderr=PIPE), 15 * 60)
except Exception:
print('failed to run in primary')
return False
js = js.split('\n')[0] + '\n' # remove any extra printed stuff (node workarounds)
return correct1 == js or correct2 == js
def fail():
global fails
print("EMSCRIPTEN BUG")
notes['embug'] += 1
fails += 1
shutil.copyfile(fullname, fail_output_name)
js_args = try_js()
if not js_args:
fail()
continue
if not execute_js(engine):
fail()
continue
|
the-stack_0_9440 | import pandas as pd
import csv as csv
import glob
#Script that loops through sample CSV data and writes EDA results to .txt file
path = 'sample_data'
files = glob.glob(path + "/*.csv")
def eda():
try:
print("Writing sample data exploratory analysis to file 'eda_info.txt'...")
with open('eda_info.txt', 'w') as f:
for filename in files:
f.write(filename + "\n")
data = pd.read_csv(filename)
data.info(verbose = True, buf=f)
print("File successfully written!")
except:
print("File not found.")
finally:
f.close()
print("File closed.")
eda() |
the-stack_0_9447 | # -*- python -*-
#
# pyqmc.utils.gafqmc_cost module
#
# Wirawan Purwanto
# Created: 20130923
#
#
"""
pyqmc.utils.gafqmc_cost
Cost estimator and analyzer for GAFQMC code.
"""
import numpy
from pyqmc.utils import cost
from pyqmc.utils import linalg_cost
class gafqmc_sparse1_cost_estimator(cost.qmc_cost_estimator):
"""Cost estimator specifically for GAFQMC calculation.
Names of precomputed objects:
* Vijkl = four-indexed two-body operator
* Vss = product of two trial wfn orbitals (same spin) with Vijkl
* Vxs = product of two trial wfn orbitals (opposite spins) with Vijkl
* Ls = product of one trial wfn orbital (for all spins) with L
one-body operator.
"""
# (SPARSE) PRECOMPUTED MATRICES
# Default sparse matrix density (ballpark estimate)
# These values MAY NOT BE CORRECT for your particular calculation!
# These are ok for GTO-basis calculations without frozen core:
dens_Vss = 0.5
dens_Vxs = 0.5
dens_Ls = 0.7
tpref_gf1_ovlp = 9.74193418e-09
tpref_gf1_ovlpinv = 2.25385979e-09
tpref_FB = 1.83499926e-08
tpref_Elocal = 1.09604036e-08
# Turns out, empirically Vss and Vxs densities are the same
def __init__(self):
self.linalg = linalg_cost.linalg_cost_ops()
def compute_mem_cost(self, Print=False):
"""
Estimate a calculation's MEMORY cost based on the given input sizes.
For original sparse method due to Wissam.
Required input:
- nbasis
- nflds
- nwlkmax
- nptot, nup, ndn
- npsitdet
Objects:
Output:
- mem_Vss =
- mem_Vxs =
- mem_rhos =
"""
nwlkmax_proc = self.get_nwlkmax_proc
(M, Nptot, Nu, Nd, F, D) = self.params_wlkr(0)
(dpc, dp, it) = self.params_sys(0)
self.wlk_size = Nptot * M * dpc
self.mem_wlk = self.wlk_size * nwlkmax_proc
self.mem_Lvec = self.get_hsop_dim * F * dp # so far it is double precision
# number of elements in the sparse objects, per determinant
self.count_Vuu_det = self.dens_Vss * M**2 * Nu**2
self.count_Vdd_det = self.dens_Vss * M**2 * Nd**2
self.count_Vud_det = self.dens_Vxs * M**2 * Nu*Nd
self.count_Ls_det = self.dens_Ls * F * M * (Nu+Nd)
# number of elements in the sparse objects, ALL determinants
self.count_Vuu = D * self.count_Vuu_det
self.count_Vdd = D * self.count_Vdd_det
self.count_Vud = D * self.count_Vud_det
self.count_Ls = self.dens_Ls * D * F * M * (Nu+Nd)
# Sparse object are currently stored as records, so here are their sizes:
self.size_Vuu1 = dp + 4 * it
self.size_Vdd1 = dp + 4 * it
self.size_Vud1 = dp + 4 * it
self.size_Ls1 = dp + 2 * it
# memory required by the sparse objects, ALL determinants
self.mem_Vss = (self.count_Vuu + self.count_Vdd) * self.size_Vuu1
self.mem_Vxs = (self.count_Vud) * self.size_Vud1
self.mem_Ls = (self.count_Ls) * self.size_Ls1
if Print:
self.printout_mem()
def printout_mem(self, out=None):
"""
Prints out a report for memory estimate.
"""
# Tentative way to compute naive multithreading task sharing
def task_div(self, D, th):
"""`Task divide-and-share':
The division of D iterations into th threads ---
to approximately account for imperfect task balance in OpenMP way.
"""
inv_th = 1.0 / th
return numpy.ceil(D * inv_th)
def compute_step_cost(self, Print=False):
# Placeholder: will be replaced by fancier stuff later
# for more "symbolic" feel, or function that can give more actual
# estimate of the operation cost.
# For now these are merely
LA = self.linalg
mxm, mxv, vdot, mmtrace, tmmtrace = LA.mxm, LA.mxv, LA.vdot, LA.mmtrace, LA.tmmtrace
(M, Nptot, Nu, Nd, F, D) = self.params_wlkr(0)
try:
th = self.num_threads
except:
th = 1
d_fac = self.task_div(D, th)
#self.cost_pre_Q = d_fac * F * mxm(M,N,N)
#self.cost_Theta = d_fac * mxm(M,N,N) # -- not considered for now
self.ops_gf1_ovlp = d_fac * (mxm(Nu,Nu,M) + mxm(Nd,Nd,M)) # matmul of Psi^hc * Phi
self.ops_gf1_ovlpinv = d_fac * (mxm(Nu,Nu,Nu) + mxm(Nd,Nd,Nd)) # the inverse of ovlp matrix
self.ops_FB = d_fac * self.dens_Ls * F * (mmtrace(M,Nu) + mmtrace(M,Nd)) # the trace part
self.ops_Elocal = d_fac * self.dens_Vss * (2*tmmtrace(M,M,Nu,Nu) + 2*tmmtrace(M,M,Nd,Nd) + tmmtrace(M,M,Nu,Nd)) # the trace part
self.cost_gf1_ovlp = self.tpref_gf1_ovlp * self.ops_gf1_ovlp
self.cost_gf1_ovlpinv = self.tpref_gf1_ovlpinv * self.ops_gf1_ovlpinv
self.cost_FB = self.tpref_FB * self.ops_FB
self.cost_Elocal = self.tpref_FB * self.ops_Elocal
if Print:
self.printout_compute()
|
the-stack_0_9449 | from typing import Any, Dict, List
from sciwing.data.seq_label import SeqLabel
from sciwing.data.line import Line
from sciwing.data.token import Token
from sciwing.data.datasets_manager import DatasetsManager
from sciwing.metrics.BaseMetric import BaseMetric
import subprocess
import wasabi
from collections import defaultdict, Counter
import pathlib
import os
import numpy as np
import uuid
class SummarizationMetrics(BaseMetric):
"""
Returns rouge for every namespace.
The conll2003 metric assumes that the conlleval perl script is available
It writes a file with true labels and pred labels for a namespace
Parses the span level statistics which can then be used to select the model with the best
F1 score
"""
def __init__(
self,
datasets_manager: DatasetsManager,
predicted_tags_namespace_prefix="predicted_tags",
words_namespace: str = "tokens",
):
super(SummarizationMetrics, self).__init__(datasets_manager=datasets_manager)
self.datasets_manager = datasets_manager
self.label_namespaces = datasets_manager.label_namespaces
self.words_namespace = words_namespace
self.namespace_to_vocab = self.datasets_manager.namespace_to_vocab
self.predicted_tags_namespace_prefix = predicted_tags_namespace_prefix
self.msg_printer = wasabi.Printer()
self.rouge_1_counter: Dict[str, List[float]] = defaultdict(list)
self.rouge_2_counter: Dict[str, List[float]] = defaultdict(list)
self.rouge_l_counter: Dict[str, List[float]] = defaultdict(list)
def calc_metric(
self, lines: List[Line], labels: List[Line], model_forward_dict: Dict[str, Any]
) -> None:
# line_tokens: List[List[Token]] = [line.tokens["tokens"] for line in lines]
# true_label_text = [label.text for label in labels]
cwd = os.path.dirname(os.path.realpath(__file__))
for namespace in [self.words_namespace]:
predicted_tags = model_forward_dict.get(
f"{self.predicted_tags_namespace_prefix}_{namespace}"
)
true_summary_tokens: List[List[Token]] = [
summary.tokens[namespace] for summary in labels
]
true_summary_token_strs: List[List[str]] = [
[token.text for token in tokens] for tokens in true_summary_tokens
]
namespace_filename = f"{cwd}/{str(uuid.uuid4())}_{namespace}_pred.txt"
namespace_filename = pathlib.Path(namespace_filename)
predicted_summary_token_strs = []
with open(namespace_filename, "w") as fp:
for line, true_summary_token_strs_, predicted_tags_ in zip(
lines, true_summary_token_strs, predicted_tags
):
predicted_summary_token_strs_ = []
for predicted_tag in predicted_tags_:
predicted_tag = self.namespace_to_vocab[
namespace
].get_token_from_idx(predicted_tag)
predicted_summary_token_strs_.append(predicted_tag)
predicted_summary_token_strs.append(predicted_summary_token_strs_)
fp.write(line.text)
fp.write("Ground Truth")
fp.write(
" ".join([f'"{token}"' for token in true_summary_token_strs_])
)
fp.write("Predicted")
fp.write(
" ".join(
[f'"{token}"' for token in predicted_summary_token_strs_]
)
)
fp.write("\n")
for true_summary_token_strs_, predicted_summary_token_strs_ in zip(
true_summary_token_strs, predicted_summary_token_strs
):
rouge_1 = self._rouge_n(
predicted_summary_token_strs_, true_summary_token_strs_, 1
)
rouge_2 = self._rouge_n(
predicted_summary_token_strs_, true_summary_token_strs_, 2
)
rouge_l = self._rouge_l(
predicted_summary_token_strs_, true_summary_token_strs_
)
rouge_1 = np.round(rouge_1, decimals=3)
rouge_2 = np.round(rouge_2, decimals=3)
rouge_l = np.round(rouge_l, decimals=3)
# update the counter
self.rouge_1_counter[namespace].append(rouge_1)
self.rouge_2_counter[namespace].append(rouge_2)
self.rouge_l_counter[namespace].append(rouge_l)
def get_metric(self) -> Dict[str, Any]:
metrics = {}
for namespace in [self.words_namespace]:
rouge_1s = self.rouge_1_counter[namespace]
rouge_2s = self.rouge_2_counter[namespace]
rouge_ls = self.rouge_l_counter[namespace]
rouge_1 = sum(rouge_1s) / len(rouge_1s)
rouge_2 = sum(rouge_2s) / len(rouge_2s)
rouge_l = sum(rouge_ls) / len(rouge_ls)
rouge_1 = np.round(rouge_1, decimals=3)
rouge_2 = np.round(rouge_2, decimals=3)
rouge_l = np.round(rouge_l, decimals=3)
metrics[namespace] = {
"rouge_1": rouge_1,
"rouge_2": rouge_2,
"rouge_l": rouge_l,
}
return metrics
def report_metrics(self, report_type: str = "wasabi") -> Any:
reports = {}
if report_type == "wasabi":
for namespace in [self.words_namespace]:
metric = self.get_metric()[namespace]
rouge_1 = metric["rouge_1"]
rouge_2 = metric["rouge_2"]
rouge_l = metric["rouge_l"]
# build table
header_row = ["Metric", "Value"]
rows = [
("Rouge_1", rouge_1),
("Rouge_2", rouge_2),
("Rouge_l", rouge_l),
]
table = wasabi.table(rows, header=header_row, divider=True)
reports[namespace] = table
return reports
def reset(self):
self.rouge_1_counter = defaultdict(list)
self.rouge_2_counter = defaultdict(list)
self.rouge_l_counter = defaultdict(list)
def _calc_f1(self, matches, count_for_recall, count_for_precision, alpha):
def safe_div(x1, x2):
return 0 if x2 == 0 else x1 / x2
recall = safe_div(matches, count_for_recall)
precision = safe_div(matches, count_for_precision)
denom = (1.0 - alpha) * precision + alpha * recall
return safe_div(precision * recall, denom)
def _lcs(self, a, b):
longer = a
base = b
if len(longer) < len(base):
longer, base = base, longer
if len(base) == 0:
return 0
row = [0] * len(base)
for c_a in longer:
left = 0
upper_left = 0
for i, c_b in enumerate(base):
up = row[i]
if c_a == c_b:
value = upper_left + 1
else:
value = max(left, up)
row[i] = value
left = value
upper_left = up
return left
def _len_ngram(self, words, n):
return max(len(words) - n + 1, 0)
def _ngram_iter(self, words, n):
for i in range(self._len_ngram(words, n)):
n_gram = words[i : i + n]
yield tuple(n_gram)
def _count_ngrams(self, words, n):
c = Counter(self._ngram_iter(words, n))
return c
def _count_overlap(self, summary_ngrams, reference_ngrams):
result = 0
for k, v in summary_ngrams.items():
result += min(v, reference_ngrams[k])
return result
def _rouge_n(self, pred_summary, true_summary, n, alpha=0.5):
"""
Calculate ROUGE-N score.
Parameters
----------
pred_summary: list of list of str
generated summary after tokenization
true_summary: list of list of str
reference or references to evaluate summary
n: int
ROUGE kind. n=1, calculate when ROUGE-1
alpha: float (0~1)
alpha -> 0: recall is more important
alpha -> 1: precision is more important
F = 1/(alpha * (1/P) + (1 - alpha) * (1/R))
Returns
-------
f1: float
f1 score
"""
pred_ngrams = self._count_ngrams(pred_summary, n)
r_ngrams = self._count_ngrams(true_summary, n)
matches = self._count_overlap(pred_ngrams, r_ngrams)
count_for_recall = self._len_ngram(true_summary, n)
count_for_prec = self._len_ngram(pred_summary, n)
f1 = self._calc_f1(matches, count_for_recall, count_for_prec, alpha)
return f1
def _rouge_l(self, pred_summary, true_summary, alpha=0.5):
"""
Calculate ROUGE-L score.
Parameters
----------
pred_summary: list of list of str
generated summary after tokenization
true_summary: list of list of str
reference or references to evaluate summary
n: int
ROUGE kind. n=1, calculate when ROUGE-1
alpha: float (0~1)
alpha -> 0: recall is more important
alpha -> 1: precision is more important
F = 1/(alpha * (1/P) + (1 - alpha) * (1/R))
Returns
-------
f1: float
f1 score
"""
matches = self._lcs(true_summary, pred_summary)
count_for_recall = len(true_summary)
count_for_prec = len(pred_summary)
f1 = self._calc_f1(matches, count_for_recall, count_for_prec, alpha)
return f1
|
the-stack_0_9452 | import logging
from logging import getLogger
from typing import Sequence, Optional
import base58
from indy_crypto import IndyCryptoError
from crypto.bls.bls_crypto import GroupParams, BlsGroupParamsLoader, BlsCryptoVerifier, BlsCryptoSigner
from indy_crypto.bls import BlsEntity, Generator, VerKey, SignKey, Bls, \
Signature, MultiSignature, ProofOfPossession
logging.getLogger("indy_crypto").setLevel(logging.WARNING)
logger = getLogger()
class BlsGroupParamsLoaderIndyCrypto(BlsGroupParamsLoader):
def load_group_params(self) -> GroupParams:
group_name = 'generator'
g = "3LHpUjiyFC2q2hD7MnwwNmVXiuaFbQx2XkAFJWzswCjgN1utjsCeLzHsKk1nJvFEaS4fcrUmVAkdhtPCYbrVyATZcmzwJReTcJqwqBCPTmTQ9uWPwz6rEncKb2pYYYFcdHa8N17HzVyTqKfgPi4X9pMetfT3A5xCHq54R2pDNYWVLDX"
return GroupParams(group_name, g)
class IndyCryptoBlsUtils:
SEED_LEN = 32
@staticmethod
def bls_to_str(v: BlsEntity) -> str:
try:
return base58.b58encode(v.as_bytes()).decode("utf-8")
except ValueError:
logger.warning('BLS: BLS Entity can not be encoded as base58')
@staticmethod
def bls_from_str(v: str, cls) -> Optional[BlsEntity]:
try:
bts = base58.b58decode(v)
except ValueError:
logger.warning('BLS: value {} can not be decoded to base58'.format(v))
return None
try:
return cls.from_bytes(bts)
except IndyCryptoError as e:
logger.warning('BLS: Indy Crypto error: {}'.format(e))
return None
@staticmethod
def bls_pk_from_str(v: str) -> Optional[VerKey]:
return IndyCryptoBlsUtils.bls_from_str(v, VerKey)
@staticmethod
def prepare_seed(seed):
seed_bytes = None
if isinstance(seed, str):
seed_bytes = seed.encode()
if isinstance(seed, (bytes, bytearray)):
seed_bytes = seed
# TODO: FIXME: indy-crypto supports 32-bit seeds only
if seed_bytes:
if len(seed_bytes) < IndyCryptoBlsUtils.SEED_LEN:
seed_bytes += b'0' * (IndyCryptoBlsUtils.SEED_LEN - len(seed_bytes))
assert (len(seed_bytes) >= IndyCryptoBlsUtils.SEED_LEN)
return seed_bytes
class BlsCryptoVerifierIndyCrypto(BlsCryptoVerifier):
def __init__(self, params: GroupParams):
self._generator = \
IndyCryptoBlsUtils.bls_from_str(params.g, Generator) # type: Generator
def verify_sig(self, signature: str, message: bytes, bls_pk: Optional[VerKey]) -> bool:
bls_signature = IndyCryptoBlsUtils.bls_from_str(signature, Signature)
if bls_signature is None:
return False
if bls_pk is None:
return False
return Bls.verify(bls_signature,
message,
bls_pk,
self._generator)
def verify_multi_sig(self, signature: str, message: bytes, pks: Sequence[Optional[VerKey]]) -> bool:
# TODO: is it expected that we return False if one of the keys is None?
if None in pks:
return False
multi_signature = \
IndyCryptoBlsUtils.bls_from_str(signature, MultiSignature) # type: MultiSignature
if multi_signature is None:
return False
return Bls.verify_multi_sig(multi_sig=multi_signature,
message=message,
ver_keys=pks,
gen=self._generator)
def create_multi_sig(self, signatures: Sequence[str]) -> str:
sigs = [IndyCryptoBlsUtils.bls_from_str(s, Signature) for s in signatures]
bts = MultiSignature.new(sigs)
return IndyCryptoBlsUtils.bls_to_str(bts)
def verify_key_proof_of_possession(self, key_proof: Optional[ProofOfPossession], bls_pk: Optional[VerKey]) -> bool:
if None in [key_proof, bls_pk]:
return False
return Bls.verify_pop(key_proof,
bls_pk,
self._generator)
class BlsCryptoSignerIndyCrypto(BlsCryptoSigner):
def __init__(self, sk: SignKey, pk: VerKey, params: GroupParams):
self._sk = sk # type: SignKey
self.pk = pk # type: VerKey
self._generator = \
IndyCryptoBlsUtils.bls_from_str(params.g, Generator) # type: Generator
@staticmethod
def generate_keys(params: GroupParams, seed=None) -> (SignKey, VerKey, ProofOfPossession):
seed = IndyCryptoBlsUtils.prepare_seed(seed)
gen = IndyCryptoBlsUtils.bls_from_str(params.g, Generator)
sk = SignKey.new(seed)
vk = VerKey.new(gen, sk)
key_proof = ProofOfPossession.new(ver_key=vk, sign_key=sk)
return sk, vk, key_proof
@staticmethod
def generate_key_proof(sk: SignKey, pk: VerKey) -> ProofOfPossession:
return ProofOfPossession.new(ver_key=pk, sign_key=sk)
def sign(self, message: bytes) -> str:
sign = Bls.sign(message, self._sk)
return IndyCryptoBlsUtils.bls_to_str(sign)
|
the-stack_0_9454 | #!/usr/bin/env python
"""Tests client actions related to administrating the client."""
import os
import psutil
import requests
from grr import config
from grr.client import comms
from grr.client.client_actions import admin
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import stats
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.test_lib import client_test_lib
from grr.test_lib import test_lib
class ConfigActionTest(client_test_lib.EmptyActionTest):
"""Tests the client actions UpdateConfiguration and GetConfiguration."""
def setUp(self):
super(ConfigActionTest, self).setUp()
# These tests change the config so we preserve state.
self.config_stubber = test_lib.PreserveConfig()
self.config_stubber.Start()
def tearDown(self):
super(ConfigActionTest, self).tearDown()
self.config_stubber.Stop()
def testUpdateConfiguration(self):
"""Test that we can update the config."""
# A unique name on the filesystem for the writeback.
self.config_file = os.path.join(self.temp_dir, "ConfigActionTest.yaml")
# In a real client, the writeback location should be set to something real,
# but for this test we make it the same as the config file..
config.CONFIG.SetWriteBack(self.config_file)
# Make sure the file is gone
self.assertRaises(IOError, open, self.config_file)
location = ["http://www.example1.com/", "http://www.example2.com/"]
request = rdf_protodict.Dict()
request["Client.server_urls"] = location
request["Client.foreman_check_frequency"] = 3600
result = self.RunAction(admin.UpdateConfiguration, request)
self.assertEqual(result, [])
self.assertEqual(config.CONFIG["Client.foreman_check_frequency"], 3600)
# Test the config file got written.
data = open(self.config_file, "rb").read()
self.assertTrue("server_urls: {0}".format(",".join(location)) in data)
self.urls = []
# Now test that our location was actually updated.
def FakeUrlOpen(url=None, data=None, **_):
self.urls.append(url)
response = requests.Response()
response.status_code = 200
response._content = data
return response
with utils.Stubber(requests, "request", FakeUrlOpen):
client_context = comms.GRRHTTPClient(worker=MockClientWorker())
client_context.MakeRequest("")
# Since the request is successful we only connect to one location.
self.assertTrue(location[0] in self.urls[0])
def testUpdateConfigBlacklist(self):
"""Tests that disallowed fields are not getting updated."""
with test_lib.ConfigOverrider({
"Client.server_urls": ["http://something.com/"],
"Client.server_serial_number": 1
}):
location = ["http://www.example.com"]
request = rdf_protodict.Dict()
request["Client.server_urls"] = location
request["Client.server_serial_number"] = 10
self.RunAction(admin.UpdateConfiguration, request)
# Location can be set.
self.assertEqual(config.CONFIG["Client.server_urls"], location)
# But the server serial number can not be updated.
self.assertEqual(config.CONFIG["Client.server_serial_number"], 1)
def testGetConfig(self):
"""Check GetConfig client action works."""
# Use UpdateConfig to generate a config.
location = ["http://example.com/"]
request = rdf_protodict.Dict()
request["Client.server_urls"] = location
request["Client.foreman_check_frequency"] = 3600
self.RunAction(admin.UpdateConfiguration, request)
# Check that our GetConfig actually gets the real data.
self.RunAction(admin.GetConfiguration)
self.assertEqual(config.CONFIG["Client.foreman_check_frequency"], 3600)
self.assertEqual(config.CONFIG["Client.server_urls"], location)
class MockStatsCollector(object):
"""Mock stats collector for GetClientStatsActionTest."""
# First value in every tuple is a timestamp (as if it was returned by
# time.time()).
cpu_samples = [
(rdfvalue.RDFDatetime().FromSecondsFromEpoch(100), 0.1, 0.1, 10.0),
(rdfvalue.RDFDatetime().FromSecondsFromEpoch(110), 0.1, 0.2, 15.0),
(rdfvalue.RDFDatetime().FromSecondsFromEpoch(120), 0.1, 0.3, 20.0)
] # pyformat: disable
io_samples = [(rdfvalue.RDFDatetime().FromSecondsFromEpoch(100), 100, 100),
(rdfvalue.RDFDatetime().FromSecondsFromEpoch(110), 200, 200),
(rdfvalue.RDFDatetime().FromSecondsFromEpoch(120), 300, 300)]
class MockClientWorker(object):
"""Mock client worker for GetClientStatsActionTest."""
def __init__(self):
self.stats_collector = MockStatsCollector()
class GetClientStatsActionTest(client_test_lib.EmptyActionTest):
"""Test GetClientStats client action."""
def setUp(self):
super(GetClientStatsActionTest, self).setUp()
self.old_boot_time = psutil.boot_time
psutil.boot_time = lambda: 100
def tearDown(self):
super(GetClientStatsActionTest, self).tearDown()
psutil.boot_time = self.old_boot_time
def testReturnsAllDataByDefault(self):
"""Checks that stats collection works."""
stats.STATS.RegisterCounterMetric("grr_client_received_bytes")
stats.STATS.IncrementCounter("grr_client_received_bytes", 1566)
stats.STATS.RegisterCounterMetric("grr_client_sent_bytes")
stats.STATS.IncrementCounter("grr_client_sent_bytes", 2000)
results = self.RunAction(
admin.GetClientStats,
grr_worker=MockClientWorker(),
arg=rdf_client.GetClientStatsRequest())
response = results[0]
self.assertEqual(response.bytes_received, 1566)
self.assertEqual(response.bytes_sent, 2000)
self.assertEqual(len(response.cpu_samples), 3)
for i in range(3):
self.assertEqual(
response.cpu_samples[i].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(100 + i * 10))
self.assertAlmostEqual(response.cpu_samples[i].user_cpu_time, 0.1)
self.assertAlmostEqual(response.cpu_samples[i].system_cpu_time,
0.1 * (i + 1))
self.assertAlmostEqual(response.cpu_samples[i].cpu_percent, 10.0 + 5 * i)
self.assertEqual(len(response.io_samples), 3)
for i in range(3):
self.assertEqual(
response.io_samples[i].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(100 + i * 10))
self.assertEqual(response.io_samples[i].read_bytes, 100 * (i + 1))
self.assertEqual(response.io_samples[i].write_bytes, 100 * (i + 1))
self.assertEqual(response.boot_time, long(100 * 1e6))
def testFiltersDataPointsByStartTime(self):
start_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(117)
results = self.RunAction(
admin.GetClientStats,
grr_worker=MockClientWorker(),
arg=rdf_client.GetClientStatsRequest(start_time=start_time))
response = results[0]
self.assertEqual(len(response.cpu_samples), 1)
self.assertEqual(response.cpu_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(120))
self.assertEqual(len(response.io_samples), 1)
self.assertEqual(response.io_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(120))
def testFiltersDataPointsByEndTime(self):
end_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(102)
results = self.RunAction(
admin.GetClientStats,
grr_worker=MockClientWorker(),
arg=rdf_client.GetClientStatsRequest(end_time=end_time))
response = results[0]
self.assertEqual(len(response.cpu_samples), 1)
self.assertEqual(response.cpu_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(100))
self.assertEqual(len(response.io_samples), 1)
self.assertEqual(response.io_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(100))
def testFiltersDataPointsByStartAndEndTimes(self):
start_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(109)
end_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(113)
results = self.RunAction(
admin.GetClientStats,
grr_worker=MockClientWorker(),
arg=rdf_client.GetClientStatsRequest(
start_time=start_time, end_time=end_time))
response = results[0]
self.assertEqual(len(response.cpu_samples), 1)
self.assertEqual(response.cpu_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(110))
self.assertEqual(len(response.io_samples), 1)
self.assertEqual(response.io_samples[0].timestamp,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(110))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
the-stack_0_9455 | # ======================================================================== #
#
# Copyright (c) 2017 - 2020 scVAE authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================== #
import numpy
from scvae.data.sparse import sparsity
from scvae.data.utilities import standard_deviation
MAXIMUM_NUMBER_OF_VALUES_FOR_NORMAL_STATISTICS_COMPUTATION = 5e8
def summary_statistics(x, name="", tolerance=1e-3, skip_sparsity=False):
batch_size = None
if x.size > MAXIMUM_NUMBER_OF_VALUES_FOR_NORMAL_STATISTICS_COMPUTATION:
batch_size = 1000
x_mean = x.mean()
x_std = standard_deviation(x, ddof=1, batch_size=batch_size)
x_min = x.min()
x_max = x.max()
x_dispersion = x_std**2 / x_mean
if skip_sparsity:
x_sparsity = numpy.nan
else:
x_sparsity = sparsity(x, tolerance=tolerance, batch_size=batch_size)
statistics = {
"name": name,
"mean": x_mean,
"standard deviation": x_std,
"minimum": x_min,
"maximum": x_max,
"dispersion": x_dispersion,
"sparsity": x_sparsity
}
return statistics
def format_summary_statistics(statistics_sets, name="Data set"):
if not isinstance(statistics_sets, list):
statistics_sets = [statistics_sets]
name_width = max(
[len(name)]
+ [len(statistics_set["name"]) for statistics_set in statistics_sets]
)
table_heading = " ".join([
"{:{}}".format(name, name_width),
" mean ", "std. dev. ", "dispersion",
" minimum ", " maximum ", "sparsity"
])
table_rows = [table_heading]
for statistics_set in statistics_sets:
table_row_parts = [
"{:{}}".format(statistics_set["name"], name_width),
"{:<9.5g}".format(statistics_set["mean"]),
"{:<9.5g}".format(statistics_set["standard deviation"]),
"{:<9.5g}".format(statistics_set["dispersion"]),
"{:<11.5g}".format(statistics_set["minimum"]),
"{:<11.5g}".format(statistics_set["maximum"]),
"{:<7.5g}".format(statistics_set["sparsity"]),
]
table_row = " ".join(table_row_parts)
table_rows.append(table_row)
table = "\n".join(table_rows)
return table
|
the-stack_0_9456 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('gitrepo', '0002_gitbranchtrailentry_order'),
('commandrepo', '0004_commandgroupentry_user'),
('bluesteel', '0007_remove_bluesteellayoutentry_archive'),
]
operations = [
migrations.CreateModel(
name='BenchmarkDefinitionEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default=b'Default benchmark name', max_length=128)),
('revision', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('command_set', models.ForeignKey(related_name='benchmark_command_set', to='commandrepo.CommandSetEntry')),
('layout', models.ForeignKey(related_name='benchmark_layout', to='bluesteel.BluesteelLayoutEntry')),
('project', models.ForeignKey(related_name='benchmark_project', to='bluesteel.BluesteelProjectEntry')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BenchmarkExecutionEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('invalidated', models.BooleanField(default=False)),
('revision_target', models.IntegerField(default=-1)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('commit', models.ForeignKey(related_name='benchmark_exec_commit', to='gitrepo.GitCommitEntry')),
('definition', models.ForeignKey(related_name='benchmark_exec_definition', to='benchmark.BenchmarkDefinitionEntry')),
('report', models.ForeignKey(related_name='benchmark_exec_command_set', to='commandrepo.CommandSetEntry')),
],
options={
},
bases=(models.Model,),
),
]
|
the-stack_0_9458 | # -*- coding: utf-8 -*-
"""
Copyright 2021 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================
"""
from django.test import TestCase
import json
import random
class TestScalarRequest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
def init_test(self):
if getattr(self, "category", None) is None:
_init_res = self.client.get("/api/init", format="json")
self.assertEqual(_init_res.status_code, 200)
_init_json = json.loads(_init_res.content)
self.assertEqual(_init_json["code"], 200)
_cate_res = self.client.get("/api/getCategory", format="json")
self.category = json.loads(_cate_res.content)["data"]
def test_get_scalar(self):
self.init_test()
_run = ""
_tag = ""
for k in self.category.keys():
if "scalar" in self.category[k].keys():
_run = k
_tag = random.choice(list(self.category[k]['scalar'].keys()))
break
try:
assert _run != "" and _tag != "", "There is no scalar data in test logs."
except AssertionError as e:
import logging
logging.error(str(e))
return
res = self.client.get("/api/scalar", {'run': _run, 'tag': _tag})
_json = json.loads(res.content)
self.assertEqual(_json["code"], 200)
|
the-stack_0_9460 | '''
module for implementation
of bucket sort
'''
from pyalgo.sort.insertion_sort import insertion_sort
def bucket_sort(arr: list):
l = []
slot_num = 10
for i in range(slot_num):
l.append([])
for j in arr:
index_b = int(slot_num * j)
l[index_b].append(j)
for i in range(slot_num):
l[i] = insertion_sort(l[i])
k = 0
for i in range(slot_num):
for j in range(len(l[i])):
arr[k] = l[i][j]
k += 1
return arr
'''
PyAlgo
Devansh Singh, 2021
''' |
the-stack_0_9463 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Compute Engine operators.
"""
from copy import deepcopy
from typing import Dict
from json_merge_patch import merge
from googleapiclient.errors import HttpError
from airflow import AirflowException
from airflow.contrib.hooks.gcp_compute_hook import GceHook
from airflow.contrib.utils.gcp_field_sanitizer import GcpBodyFieldSanitizer
from airflow.contrib.utils.gcp_field_validator import GcpBodyFieldValidator
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GceBaseOperator(BaseOperator):
"""
Abstract base operator for Google Compute Engine operators to inherit from.
"""
@apply_defaults
def __init__(self,
zone,
resource_id,
project_id=None,
gcp_conn_id='google_cloud_default',
api_version='v1',
*args, **kwargs):
self.project_id = project_id
self.zone = zone
self.resource_id = resource_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self._validate_inputs()
self._hook = GceHook(gcp_conn_id=self.gcp_conn_id, api_version=self.api_version)
super().__init__(*args, **kwargs)
def _validate_inputs(self):
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is missing")
if not self.zone:
raise AirflowException("The required parameter 'zone' is missing")
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing")
def execute(self, context):
pass
class GceInstanceStartOperator(GceBaseOperator):
"""
Starts an instance in Google Compute Engine.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GceInstanceStartOperator`
:param zone: Google Cloud Platform zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param project_id: Optional, Google Cloud Platform Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is
used.
:type project_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud
Platform. Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
"""
# [START gce_instance_start_template_fields]
template_fields = ('project_id', 'zone', 'resource_id', 'gcp_conn_id', 'api_version')
# [END gce_instance_start_template_fields]
@apply_defaults
def __init__(self,
zone,
resource_id,
project_id=None,
gcp_conn_id='google_cloud_default',
api_version='v1',
*args, **kwargs):
super().__init__(
project_id=project_id, zone=zone, resource_id=resource_id,
gcp_conn_id=gcp_conn_id, api_version=api_version, *args, **kwargs)
def execute(self, context):
return self._hook.start_instance(zone=self.zone,
resource_id=self.resource_id,
project_id=self.project_id)
class GceInstanceStopOperator(GceBaseOperator):
"""
Stops an instance in Google Compute Engine.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GceInstanceStopOperator`
:param zone: Google Cloud Platform zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param project_id: Optional, Google Cloud Platform Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is
used.
:type project_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud
Platform. Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
"""
# [START gce_instance_stop_template_fields]
template_fields = ('project_id', 'zone', 'resource_id', 'gcp_conn_id', 'api_version')
# [END gce_instance_stop_template_fields]
@apply_defaults
def __init__(self,
zone,
resource_id,
project_id=None,
gcp_conn_id='google_cloud_default',
api_version='v1',
*args, **kwargs):
super().__init__(
project_id=project_id, zone=zone, resource_id=resource_id,
gcp_conn_id=gcp_conn_id, api_version=api_version, *args, **kwargs)
def execute(self, context):
self._hook.stop_instance(zone=self.zone,
resource_id=self.resource_id,
project_id=self.project_id)
SET_MACHINE_TYPE_VALIDATION_SPECIFICATION = [
dict(name="machineType", regexp="^.+$"),
]
class GceSetMachineTypeOperator(GceBaseOperator):
"""
Changes the machine type for a stopped instance to the machine type specified in
the request.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GceSetMachineTypeOperator`
:param zone: Google Cloud Platform zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param body: Body required by the Compute Engine setMachineType API, as described in
https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType#request-body
:type body: dict
:param project_id: Optional, Google Cloud Platform Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the GCP connection
is used.
:type project_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud
Platform. Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:type validate_body: bool
"""
# [START gce_instance_set_machine_type_template_fields]
template_fields = ('project_id', 'zone', 'resource_id', 'gcp_conn_id', 'api_version')
# [END gce_instance_set_machine_type_template_fields]
@apply_defaults
def __init__(self,
zone,
resource_id,
body,
project_id=None,
gcp_conn_id='google_cloud_default',
api_version='v1',
validate_body=True,
*args, **kwargs):
self.body = body
self._field_validator = None
if validate_body:
self._field_validator = GcpBodyFieldValidator(
SET_MACHINE_TYPE_VALIDATION_SPECIFICATION, api_version=api_version)
super().__init__(
project_id=project_id, zone=zone, resource_id=resource_id,
gcp_conn_id=gcp_conn_id, api_version=api_version, *args, **kwargs)
def _validate_all_body_fields(self):
if self._field_validator:
self._field_validator.validate(self.body)
def execute(self, context):
self._validate_all_body_fields()
return self._hook.set_machine_type(zone=self.zone,
resource_id=self.resource_id,
body=self.body,
project_id=self.project_id)
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION = [
dict(name="name", regexp="^.+$"),
dict(name="description", optional=True),
dict(name="properties", type='dict', optional=True, fields=[
dict(name="description", optional=True),
dict(name="tags", optional=True, fields=[
dict(name="items", optional=True)
]),
dict(name="machineType", optional=True),
dict(name="canIpForward", optional=True),
dict(name="networkInterfaces", optional=True), # not validating deeper
dict(name="disks", optional=True), # not validating the array deeper
dict(name="metadata", optional=True, fields=[
dict(name="fingerprint", optional=True),
dict(name="items", optional=True),
dict(name="kind", optional=True),
]),
dict(name="serviceAccounts", optional=True), # not validating deeper
dict(name="scheduling", optional=True, fields=[
dict(name="onHostMaintenance", optional=True),
dict(name="automaticRestart", optional=True),
dict(name="preemptible", optional=True),
dict(name="nodeAffinitites", optional=True), # not validating deeper
]),
dict(name="labels", optional=True),
dict(name="guestAccelerators", optional=True), # not validating deeper
dict(name="minCpuPlatform", optional=True),
]),
]
GCE_INSTANCE_TEMPLATE_FIELDS_TO_SANITIZE = [
"kind",
"id",
"name",
"creationTimestamp",
"properties.disks.sha256",
"properties.disks.kind",
"properties.disks.sourceImageEncryptionKey.sha256",
"properties.disks.index",
"properties.disks.licenses",
"properties.networkInterfaces.kind",
"properties.networkInterfaces.accessConfigs.kind",
"properties.networkInterfaces.name",
"properties.metadata.kind",
"selfLink"
]
class GceInstanceTemplateCopyOperator(GceBaseOperator):
"""
Copies the instance template, applying specified changes.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GceInstanceTemplateCopyOperator`
:param resource_id: Name of the Instance Template
:type resource_id: str
:param body_patch: Patch to the body of instanceTemplates object following rfc7386
PATCH semantics. The body_patch content follows
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates
Name field is required as we need to rename the template,
all the other fields are optional. It is important to follow PATCH semantics
- arrays are replaced fully, so if you need to update an array you should
provide the whole target array as patch element.
:type body_patch: dict
:param project_id: Optional, Google Cloud Platform Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the GCP connection
is used.
:type project_id: str
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again).
It should be in UUID format as defined in RFC 4122.
:type request_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud
Platform. Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:type validate_body: bool
"""
# [START gce_instance_template_copy_operator_template_fields]
template_fields = ('project_id', 'resource_id', 'request_id',
'gcp_conn_id', 'api_version')
# [END gce_instance_template_copy_operator_template_fields]
@apply_defaults
def __init__(self,
resource_id,
body_patch,
project_id=None,
request_id=None,
gcp_conn_id='google_cloud_default',
api_version='v1',
validate_body=True,
*args, **kwargs):
self.body_patch = body_patch
self.request_id = request_id
self._field_validator = None
if 'name' not in self.body_patch:
raise AirflowException("The body '{}' should contain at least "
"name for the new operator in the 'name' field".
format(body_patch))
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version)
self._field_sanitizer = GcpBodyFieldSanitizer(
GCE_INSTANCE_TEMPLATE_FIELDS_TO_SANITIZE)
super().__init__(
project_id=project_id, zone='global', resource_id=resource_id,
gcp_conn_id=gcp_conn_id, api_version=api_version, *args, **kwargs)
def _validate_all_body_fields(self):
if self._field_validator:
self._field_validator.validate(self.body_patch)
def execute(self, context):
self._validate_all_body_fields()
try:
# Idempotence check (sort of) - we want to check if the new template
# is already created and if is, then we assume it was created by previous run
# of CopyTemplate operator - we do not check if content of the template
# is as expected. Templates are immutable so we cannot update it anyway
# and deleting/recreating is not worth the hassle especially
# that we cannot delete template if it is already used in some Instance
# Group Manager. We assume success if the template is simply present
existing_template = self._hook.get_instance_template(
resource_id=self.body_patch['name'], project_id=self.project_id)
self.log.info(
"The %s template already existed. It was likely created by previous run of the operator. "
"Assuming success.",
existing_template
)
return existing_template
except HttpError as e:
# We actually expect to get 404 / Not Found here as the template should
# not yet exist
if not e.resp.status == 404:
raise e
old_body = self._hook.get_instance_template(resource_id=self.resource_id,
project_id=self.project_id)
new_body = deepcopy(old_body)
self._field_sanitizer.sanitize(new_body)
new_body = merge(new_body, self.body_patch)
self.log.info("Calling insert instance template with updated body: %s", new_body)
self._hook.insert_instance_template(body=new_body,
request_id=self.request_id,
project_id=self.project_id)
return self._hook.get_instance_template(resource_id=self.body_patch['name'],
project_id=self.project_id)
class GceInstanceGroupManagerUpdateTemplateOperator(GceBaseOperator):
"""
Patches the Instance Group Manager, replacing source template URL with the
destination one. API V1 does not have update/patch operations for Instance
Group Manager, so you must use beta or newer API version. Beta is the default.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GceInstanceGroupManagerUpdateTemplateOperator`
:param resource_id: Name of the Instance Group Manager
:type resource_id: str
:param zone: Google Cloud Platform zone where the Instance Group Manager exists.
:type zone: str
:param source_template: URL of the template to replace.
:type source_template: str
:param destination_template: URL of the target template.
:type destination_template: str
:param project_id: Optional, Google Cloud Platform Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the GCP connection is
used.
:type project_id: str
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again).
It should be in UUID format as defined in RFC 4122.
:type request_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud
Platform. Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:type validate_body: bool
"""
# [START gce_igm_update_template_operator_template_fields]
template_fields = ('project_id', 'resource_id', 'zone', 'request_id',
'source_template', 'destination_template',
'gcp_conn_id', 'api_version')
# [END gce_igm_update_template_operator_template_fields]
@apply_defaults
def __init__(self,
resource_id,
zone,
source_template,
destination_template,
project_id=None,
update_policy=None,
request_id=None,
gcp_conn_id='google_cloud_default',
api_version='beta',
*args, **kwargs):
self.zone = zone
self.source_template = source_template
self.destination_template = destination_template
self.request_id = request_id
self.update_policy = update_policy
self._change_performed = False
if api_version == 'v1':
raise AirflowException("Api version v1 does not have update/patch "
"operations for Instance Group Managers. Use beta"
" api version or above")
super().__init__(
project_id=project_id, zone=self.zone, resource_id=resource_id,
gcp_conn_id=gcp_conn_id, api_version=api_version, *args, **kwargs)
def _possibly_replace_template(self, dictionary: Dict) -> None:
if dictionary.get('instanceTemplate') == self.source_template:
dictionary['instanceTemplate'] = self.destination_template
self._change_performed = True
def execute(self, context):
old_instance_group_manager = self._hook.get_instance_group_manager(
zone=self.zone, resource_id=self.resource_id, project_id=self.project_id)
patch_body = {}
if 'versions' in old_instance_group_manager:
patch_body['versions'] = old_instance_group_manager['versions']
if 'instanceTemplate' in old_instance_group_manager:
patch_body['instanceTemplate'] = old_instance_group_manager['instanceTemplate']
if self.update_policy:
patch_body['updatePolicy'] = self.update_policy
self._possibly_replace_template(patch_body)
if 'versions' in patch_body:
for version in patch_body['versions']:
self._possibly_replace_template(version)
if self._change_performed or self.update_policy:
self.log.info(
"Calling patch instance template with updated body: %s",
patch_body)
return self._hook.patch_instance_group_manager(
zone=self.zone, resource_id=self.resource_id,
body=patch_body, request_id=self.request_id,
project_id=self.project_id)
else:
# Idempotence achieved
return True
|
the-stack_0_9464 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
def decode_image(im_file, im_info):
"""read rgb image
Args:
im_file (str|np.ndarray): input can be image path or np.ndarray
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
if isinstance(im_file, str):
with open(im_file, 'rb') as f:
im_read = f.read()
data = np.frombuffer(im_read, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
else:
im = im_file
im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return im, im_info
class Resize(object):
"""resize image by target_size and max_size
Args:
target_size (int): the target size of image
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): method of resize
"""
def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):
if isinstance(target_size, int):
target_size = [target_size, target_size]
self.target_size = target_size
self.keep_ratio = keep_ratio
self.interp = interp
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
assert len(self.target_size) == 2
assert self.target_size[0] > 0 and self.target_size[1] > 0
im_channel = im.shape[2]
im_scale_y, im_scale_x = self.generate_scale(im)
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
im_info['scale_factor'] = np.array(
[im_scale_y, im_scale_x]).astype('float32')
return im, im_info
def generate_scale(self, im):
"""
Args:
im (np.ndarray): image (np.ndarray)
Returns:
im_scale_x: the resize ratio of X
im_scale_y: the resize ratio of Y
"""
origin_shape = im.shape[:2]
im_c = im.shape[2]
if self.keep_ratio:
im_size_min = np.min(origin_shape)
im_size_max = np.max(origin_shape)
target_size_min = np.min(self.target_size)
target_size_max = np.max(self.target_size)
im_scale = float(target_size_min) / float(im_size_min)
if np.round(im_scale * im_size_max) > target_size_max:
im_scale = float(target_size_max) / float(im_size_max)
im_scale_x = im_scale
im_scale_y = im_scale
else:
resize_h, resize_w = self.target_size
im_scale_y = resize_h / float(origin_shape[0])
im_scale_x = resize_w / float(origin_shape[1])
return im_scale_y, im_scale_x
class NormalizeImage(object):
"""normalize image
Args:
mean (list): im - mean
std (list): im / std
is_scale (bool): whether need im / 255
is_channel_first (bool): if True: image shape is CHW, else: HWC
"""
def __init__(self, mean, std, is_scale=True):
self.mean = mean
self.std = std
self.is_scale = is_scale
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.astype(np.float32, copy=False)
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
if self.is_scale:
im = im / 255.0
im -= mean
im /= std
return im, im_info
class Permute(object):
"""permute image
Args:
to_bgr (bool): whether convert RGB to BGR
channel_first (bool): whether convert HWC to CHW
"""
def __init__(self, ):
super(Permute, self).__init__()
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.transpose((2, 0, 1)).copy()
return im, im_info
class PadStride(object):
""" padding image for model with FPN, instead PadBatch(pad_to_stride) in original config
Args:
stride (bool): model with FPN need image shape % stride == 0
"""
def __init__(self, stride=0):
self.coarsest_stride = stride
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
coarsest_stride = self.coarsest_stride
if coarsest_stride <= 0:
return im, im_info
im_c, im_h, im_w = im.shape
pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = im
return padding_im, im_info
def preprocess(im, preprocess_ops):
# process image by preprocess_ops
im_info = {
'scale_factor': np.array(
[1., 1.], dtype=np.float32),
'im_shape': None,
}
im, im_info = decode_image(im, im_info)
for operator in preprocess_ops:
im, im_info = operator(im, im_info)
return im, im_info
|
the-stack_0_9466 | import requests, logging
import pytest, json
from settings import TEST_DATA, DEPLOYMENTS
from suite.resources_utils import (
wait_before_test,
create_items_from_yaml,
wait_before_test,
get_file_contents,
get_service_endpoint,
)
from suite.custom_resources_utils import (
create_crd_from_yaml,
delete_crd,
)
from suite.vs_vsr_resources_utils import(
delete_virtual_server,
create_virtual_server_from_yaml,
patch_virtual_server_from_yaml,
patch_v_s_route_from_yaml,
create_v_s_route_from_yaml,
delete_v_s_route,
)
from suite.policy_resources_utils import(
create_policy_from_yaml,
delete_policy,
read_policy,
)
from suite.ap_resources_utils import (
create_ap_usersig_from_yaml,
delete_ap_usersig,
delete_and_create_ap_policy_from_yaml,
read_ap_custom_resource,
create_ap_logconf_from_yaml,
create_ap_policy_from_yaml,
delete_ap_policy,
delete_ap_logconf,
create_ap_waf_policy_from_yaml,
)
from suite.yaml_utils import get_first_ingress_host_from_yaml, get_name_from_yaml
ap_pol_name = ""
log_name = ""
std_vs_src = f"{TEST_DATA}/ap-waf/standard/virtual-server.yaml"
waf_spec_vs_src = f"{TEST_DATA}/ap-waf/virtual-server-waf-spec.yaml"
waf_route_vs_src = f"{TEST_DATA}/ap-waf/virtual-server-waf-route.yaml"
waf_subroute_vsr_src = f"{TEST_DATA}/ap-waf/virtual-server-route-waf-subroute.yaml"
waf_pol_default_src = f"{TEST_DATA}/ap-waf/policies/waf-default.yaml"
waf_pol_dataguard_src = f"{TEST_DATA}/ap-waf/policies/waf-dataguard.yaml"
ap_policy_uds = "dataguard-alarm-uds"
uds_crd_resource = f"{TEST_DATA}/ap-waf/ap-ic-uds.yaml"
valid_resp_addr = "Server address:"
valid_resp_name = "Server name:"
invalid_resp_title = "Request Rejected"
invalid_resp_body = "The requested URL was rejected. Please consult with your administrator."
@pytest.fixture(scope="class")
def appprotect_setup(request, kube_apis, test_namespace) -> None:
"""
Deploy simple application and all the AppProtect(dataguard-alarm) resources under test in one namespace.
:param request: pytest fixture
:param kube_apis: client apis
:param ingress_controller_endpoint: public endpoint
:param test_namespace:
"""
print("------------------------- Deploy logconf -----------------------------")
src_log_yaml = f"{TEST_DATA}/ap-waf/logconf.yaml"
global log_name
log_name = create_ap_logconf_from_yaml(kube_apis.custom_objects, src_log_yaml, test_namespace)
print("------------------------- Create UserSig CRD resource-----------------------------")
usersig_name = create_ap_usersig_from_yaml(
kube_apis.custom_objects, uds_crd_resource, test_namespace
)
print(f"------------------------- Deploy dataguard-alarm appolicy ---------------------------")
src_pol_yaml = f"{TEST_DATA}/ap-waf/{ap_policy_uds}.yaml"
global ap_pol_name
ap_pol_name = create_ap_policy_from_yaml(kube_apis.custom_objects, src_pol_yaml, test_namespace)
def fin():
print("Clean up:")
delete_ap_policy(kube_apis.custom_objects, ap_pol_name, test_namespace)
delete_ap_usersig(kube_apis.custom_objects, usersig_name, test_namespace)
delete_ap_logconf(kube_apis.custom_objects, log_name, test_namespace)
request.addfinalizer(fin)
def assert_ap_crd_info(ap_crd_info, policy_name) -> None:
"""
Assert fields in AppProtect policy documents
:param ap_crd_info: CRD output from k8s API
:param policy_name:
"""
assert ap_crd_info["kind"] == "APPolicy"
assert ap_crd_info["metadata"]["name"] == policy_name
assert ap_crd_info["spec"]["policy"]["enforcementMode"] == "blocking"
assert (
ap_crd_info["spec"]["policy"]["blocking-settings"]["violations"][0]["name"]
== "VIOL_DATA_GUARD"
)
def assert_invalid_responses(response) -> None:
"""
Assert responses when policy config is blocking requests
:param response: Response
"""
assert invalid_resp_title in response.text
assert invalid_resp_body in response.text
assert response.status_code == 200
def assert_valid_responses(response) -> None:
"""
Assert responses when policy config is allowing requests
:param response: Response
"""
assert valid_resp_name in response.text
assert valid_resp_addr in response.text
assert response.status_code == 200
@pytest.mark.skip_for_nginx_oss
@pytest.mark.appprotect
@pytest.mark.parametrize(
"crd_ingress_controller_with_ap, virtual_server_setup",
[
(
{
"type": "complete",
"extra_args": [
f"-enable-custom-resources",
f"-enable-leader-election=false",
f"-enable-app-protect",
],
},
{"example": "ap-waf", "app_type": "simple",},
)
],
indirect=True,
)
class TestAppProtectWAFPolicyVS:
def restore_default_vs(self, kube_apis, virtual_server_setup) -> None:
"""
Restore VirtualServer without policy spec
"""
delete_virtual_server(
kube_apis.custom_objects, virtual_server_setup.vs_name, virtual_server_setup.namespace
)
create_virtual_server_from_yaml(
kube_apis.custom_objects, std_vs_src, virtual_server_setup.namespace
)
wait_before_test()
@pytest.mark.smoke
@pytest.mark.parametrize(
"vs_src, waf",
[
(waf_spec_vs_src, waf_pol_default_src),
(waf_spec_vs_src, waf_pol_dataguard_src),
(waf_route_vs_src, waf_pol_default_src),
(waf_route_vs_src, waf_pol_dataguard_src),
],
)
def test_ap_waf_policy_block(
self,
kube_apis,
crd_ingress_controller_with_ap,
virtual_server_setup,
appprotect_setup,
test_namespace,
vs_src,
waf,
):
"""
Test waf policy when enabled with default and dataguard-alarm AP Policies
"""
print(f"Create waf policy")
if waf == waf_pol_dataguard_src:
create_ap_waf_policy_from_yaml(
kube_apis.custom_objects,
waf,
test_namespace,
test_namespace,
True,
False,
ap_pol_name,
log_name,
"syslog:server=127.0.0.1:514",
)
elif waf == waf_pol_default_src:
pol_name = create_policy_from_yaml(kube_apis.custom_objects, waf, test_namespace)
else:
pytest.fail(f"Invalid argument")
wait_before_test()
print(f"Patch vs with policy: {vs_src}")
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
vs_src,
virtual_server_setup.namespace,
)
wait_before_test()
ap_crd_info = read_ap_custom_resource(
kube_apis.custom_objects, test_namespace, "appolicies", ap_policy_uds
)
assert_ap_crd_info(ap_crd_info, ap_policy_uds)
wait_before_test(120)
print(
"----------------------- Send request with embedded malicious script----------------------"
)
response1 = requests.get(
virtual_server_setup.backend_1_url + "</script>",
headers={"host": virtual_server_setup.vs_host},
)
print(response1.text)
print(
"----------------------- Send request with blocked keyword in UDS----------------------"
)
response2 = requests.get(
virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host},
data="kic",
)
print(response2.text)
delete_policy(kube_apis.custom_objects, "waf-policy", test_namespace)
self.restore_default_vs(kube_apis, virtual_server_setup)
assert_invalid_responses(response1)
if waf == waf_pol_dataguard_src:
assert_invalid_responses(response2)
elif waf == waf_pol_default_src:
assert_valid_responses(response2)
else:
pytest.fail(f"Invalid arguments")
@pytest.mark.parametrize(
"vs_src, waf",
[(waf_spec_vs_src, waf_pol_dataguard_src), (waf_route_vs_src, waf_pol_dataguard_src),],
)
def test_ap_waf_policy_allow(
self,
kube_apis,
crd_ingress_controller_with_ap,
virtual_server_setup,
appprotect_setup,
test_namespace,
vs_src,
waf,
):
"""
Test waf policy when disabled
"""
print(f"Create waf policy")
create_ap_waf_policy_from_yaml(
kube_apis.custom_objects,
waf,
test_namespace,
test_namespace,
False,
False,
ap_pol_name,
log_name,
"syslog:server=127.0.0.1:514",
)
wait_before_test()
print(f"Patch vs with policy: {vs_src}")
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
vs_src,
virtual_server_setup.namespace,
)
wait_before_test()
ap_crd_info = read_ap_custom_resource(
kube_apis.custom_objects, test_namespace, "appolicies", ap_policy_uds
)
assert_ap_crd_info(ap_crd_info, ap_policy_uds)
wait_before_test(120)
print(
"----------------------- Send request with embedded malicious script----------------------"
)
response1 = requests.get(
virtual_server_setup.backend_1_url + "</script>",
headers={"host": virtual_server_setup.vs_host},
)
print(response1.text)
print(
"----------------------- Send request with blocked keyword in UDS----------------------"
)
response2 = requests.get(
virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host},
data="kic",
)
print(response2.text)
delete_policy(kube_apis.custom_objects, "waf-policy", test_namespace)
self.restore_default_vs(kube_apis, virtual_server_setup)
assert_valid_responses(response1)
assert_valid_responses(response2)
@pytest.mark.flaky(max_runs=3)
def test_ap_waf_policy_logs(
self,
kube_apis,
crd_ingress_controller_with_ap,
virtual_server_setup,
appprotect_setup,
test_namespace,
):
"""
Test waf policy logs
"""
src_syslog_yaml = f"{TEST_DATA}/ap-waf/syslog.yaml"
log_loc = f"/var/log/messages"
create_items_from_yaml(kube_apis, src_syslog_yaml, test_namespace)
syslog_dst = f"syslog-svc.{test_namespace}"
syslog_pod = kube_apis.v1.list_namespaced_pod(test_namespace).items[-1].metadata.name
print(f"Create waf policy")
create_ap_waf_policy_from_yaml(
kube_apis.custom_objects,
waf_pol_dataguard_src,
test_namespace,
test_namespace,
True,
True,
ap_pol_name,
log_name,
f"syslog:server={syslog_dst}:514",
)
wait_before_test()
print(f"Patch vs with policy: {waf_spec_vs_src}")
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
waf_spec_vs_src,
virtual_server_setup.namespace,
)
wait_before_test()
ap_crd_info = read_ap_custom_resource(
kube_apis.custom_objects, test_namespace, "appolicies", ap_policy_uds
)
assert_ap_crd_info(ap_crd_info, ap_policy_uds)
wait_before_test(120)
print(
"----------------------- Send request with embedded malicious script----------------------"
)
response = requests.get(
virtual_server_setup.backend_1_url + "</script>",
headers={"host": virtual_server_setup.vs_host},
)
print(response.text)
log_contents = ""
retry = 0
while "ASM:attack_type" not in log_contents and retry <= 30:
log_contents = get_file_contents(
kube_apis.v1, log_loc, syslog_pod, test_namespace
)
retry += 1
wait_before_test(1)
print(f"Security log not updated, retrying... #{retry}")
delete_policy(kube_apis.custom_objects, "waf-policy", test_namespace)
self.restore_default_vs(kube_apis, virtual_server_setup)
assert_invalid_responses(response)
assert (
f'ASM:attack_type="Non-browser Client,Abuse of Functionality,Cross Site Scripting (XSS)"'
in log_contents
)
assert f'severity="Critical"' in log_contents
assert f'request_status="blocked"' in log_contents
assert f'outcome="REJECTED"' in log_contents
@pytest.mark.skip_for_nginx_oss
@pytest.mark.appprotect
@pytest.mark.parametrize(
"crd_ingress_controller_with_ap, v_s_route_setup",
[
(
{
"type": "complete",
"extra_args": [
f"-enable-custom-resources",
f"-enable-leader-election=false",
f"-enable-app-protect",
],
},
{"example": "virtual-server-route"},
)
],
indirect=True,
)
class TestAppProtectWAFPolicyVSR:
def restore_default_vsr(self, kube_apis, v_s_route_setup) -> None:
"""
Function to revert vsr deployments to standard state
"""
patch_src_m = f"{TEST_DATA}/virtual-server-route/route-multiple.yaml"
patch_v_s_route_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.route_m.name,
patch_src_m,
v_s_route_setup.route_m.namespace,
)
wait_before_test()
@pytest.mark.parametrize(
"ap_enable",
[
True,
# False
],
)
def test_ap_waf_policy_block(
self,
kube_apis,
crd_ingress_controller_with_ap,
v_s_route_setup,
appprotect_setup,
test_namespace,
ap_enable,
):
"""
Test if WAF policy is working with VSR deployments
"""
req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}"
print(f"Create waf policy")
create_ap_waf_policy_from_yaml(
kube_apis.custom_objects,
waf_pol_dataguard_src,
v_s_route_setup.route_m.namespace,
test_namespace,
ap_enable,
ap_enable,
ap_pol_name,
log_name,
"syslog:server=127.0.0.1:514",
)
wait_before_test()
print(f"Patch vsr with policy: {waf_subroute_vsr_src}")
patch_v_s_route_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.route_m.name,
waf_subroute_vsr_src,
v_s_route_setup.route_m.namespace,
)
wait_before_test()
ap_crd_info = read_ap_custom_resource(
kube_apis.custom_objects, test_namespace, "appolicies", ap_policy_uds
)
assert_ap_crd_info(ap_crd_info, ap_policy_uds)
wait_before_test(120)
response = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}+'</script>'",
headers={"host": v_s_route_setup.vs_host},
)
print(response.text)
delete_policy(kube_apis.custom_objects, "waf-policy", v_s_route_setup.route_m.namespace)
self.restore_default_vsr(kube_apis, v_s_route_setup)
if ap_enable == True:
assert_invalid_responses(response)
elif ap_enable == False:
assert_valid_responses(response)
else:
pytest.fail(f"Invalid arguments")
|
the-stack_0_9467 | # type: ignore
import time
from robomaster import robot, logger, logging, sensor # noqa
import patch_ftp # noqa
def data_info(self):
return self._cmd_id, self._direct, self._flag, self._distance
sensor.TofSubject.data_info = data_info
def cb(msg):
print(msg)
def main():
logger.setLevel(logging.ERROR)
ep_robot = robot.Robot()
ep_robot.initialize(conn_type="sta")
ep_robot.chassis.drive_speed(z=30)
ep_robot.sensor.sub_distance(freq=5, callback=cb)
time.sleep(10)
ep_robot.sensor.unsub_distance()
ep_robot.chassis.drive_speed(z=0)
ep_robot.close()
if __name__ == '__main__':
main()
|
the-stack_0_9468 | """Automated data download and IO."""
# Builtins
import glob
import os
import gzip
import bz2
import hashlib
import shutil
import zipfile
import sys
import math
import logging
from functools import partial, wraps
import time
import fnmatch
import urllib.request
import urllib.error
from urllib.parse import urlparse
import socket
import multiprocessing
from netrc import netrc
import ftplib
import ssl
import tarfile
# External libs
import pandas as pd
import numpy as np
from shapely.ops import transform as shp_trafo
from shapely.ops import unary_union
import shapely.geometry as shpg
import requests
# Optional libs
try:
import geopandas as gpd
except ImportError:
pass
try:
import salem
from salem import wgs84
except ImportError:
pass
try:
import rasterio
try:
# rasterio V > 1.0
from rasterio.merge import merge as merge_tool
except ImportError:
from rasterio.tools.merge import merge as merge_tool
except ImportError:
pass
try:
ModuleNotFoundError
except NameError:
ModuleNotFoundError = ImportError
# Locals
import oggm.cfg as cfg
from oggm.exceptions import (InvalidParamsError, NoInternetException,
DownloadVerificationFailedException,
DownloadCredentialsMissingException,
HttpDownloadError, HttpContentTooShortError,
InvalidDEMError, FTPSDownloadError)
# Module logger
logger = logging.getLogger('.'.join(__name__.split('.')[:-1]))
# Github repository and commit hash/branch name/tag name on that repository
# The given commit will be downloaded from github and used as source for
# all sample data
SAMPLE_DATA_GH_REPO = 'OGGM/oggm-sample-data'
SAMPLE_DATA_COMMIT = '18210326c4a212bd75fe21ba5355571e02938ff9'
GDIR_URL = 'https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.1/'
DEMO_GDIR_URL = 'https://cluster.klima.uni-bremen.de/~oggm/demo_gdirs/'
DEMS_GDIR_URL = 'https://cluster.klima.uni-bremen.de/data/gdirs/dems_v0/'
CMIP5_URL = 'https://cluster.klima.uni-bremen.de/~nicolas/cmip5-ng/'
CHECKSUM_URL = 'https://cluster.klima.uni-bremen.de/data/downloads.sha256.hdf'
CHECKSUM_VALIDATION_URL = CHECKSUM_URL + '.sha256'
# Web mercator proj constants
WEB_N_PIX = 256
WEB_EARTH_RADUIS = 6378137.
DEM_SOURCES = ['GIMP', 'ARCTICDEM', 'RAMP', 'TANDEM', 'AW3D30', 'MAPZEN',
'DEM3', 'ASTER', 'SRTM', 'REMA', 'ALASKA', 'COPDEM', 'NASADEM']
_RGI_METADATA = dict()
DEM3REG = {
'ISL': [-25., -13., 63., 67.], # Iceland
'SVALBARD': [9., 35.99, 75., 84.],
'JANMAYEN': [-10., -7., 70., 72.],
'FJ': [36., 68., 79., 90.], # Franz Josef Land
'FAR': [-8., -6., 61., 63.], # Faroer
'BEAR': [18., 20., 74., 75.], # Bear Island
'SHL': [-3., 0., 60., 61.], # Shetland
# Antarctica tiles as UTM zones, large files
'01-15': [-180., -91., -90, -60.],
'16-30': [-91., -1., -90., -60.],
'31-45': [-1., 89., -90., -60.],
'46-60': [89., 189., -90., -60.],
# Greenland tiles
'GL-North': [-72., -11., 76., 84.],
'GL-West': [-62., -42., 64., 76.],
'GL-South': [-52., -40., 59., 64.],
'GL-East': [-42., -17., 64., 76.]
}
# Function
tuple2int = partial(np.array, dtype=np.int64)
lock = None
def mkdir(path, reset=False):
"""Checks if directory exists and if not, create one.
Parameters
----------
reset: erase the content of the directory if exists
Returns
-------
the path
"""
if reset and os.path.exists(path):
shutil.rmtree(path)
try:
os.makedirs(path)
except FileExistsError:
pass
return path
def del_empty_dirs(s_dir):
"""Delete empty directories."""
b_empty = True
for s_target in os.listdir(s_dir):
s_path = os.path.join(s_dir, s_target)
if os.path.isdir(s_path):
if not del_empty_dirs(s_path):
b_empty = False
else:
b_empty = False
if b_empty:
os.rmdir(s_dir)
return b_empty
def findfiles(root_dir, endswith):
"""Finds all files with a specific ending in a directory
Parameters
----------
root_dir : str
The directory to search fo
endswith : str
The file ending (e.g. '.hgt'
Returns
-------
the list of files
"""
out = []
for dirpath, dirnames, filenames in os.walk(root_dir):
for filename in [f for f in filenames if f.endswith(endswith)]:
out.append(os.path.join(dirpath, filename))
return out
def get_lock():
"""Get multiprocessing lock."""
global lock
if lock is None:
# Global Lock
if cfg.PARAMS.get('use_mp_spawn', False):
lock = multiprocessing.get_context('spawn').Lock()
else:
lock = multiprocessing.Lock()
return lock
def get_dl_verify_data(section):
"""Returns a pandas DataFrame with all known download object hashes.
The returned dictionary resolves str: cache_obj_name (without section)
to a tuple int(size) and bytes(sha256)
"""
verify_key = 'dl_verify_data_' + section
if cfg.DATA.get(verify_key) is not None:
return cfg.DATA[verify_key]
verify_file_path = os.path.join(cfg.CACHE_DIR, 'downloads.sha256.hdf')
def verify_file():
"""Check the hash file's own hash"""
logger.info('Checking the download verification file checksum...')
try:
with requests.get(CHECKSUM_VALIDATION_URL) as req:
req.raise_for_status()
verify_file_sha256 = req.text.split(maxsplit=1)[0]
verify_file_sha256 = bytearray.fromhex(verify_file_sha256)
except Exception as e:
verify_file_sha256 = None
logger.warning('Failed getting verification checksum: ' + repr(e))
if os.path.isfile(verify_file_path) and verify_file_sha256:
sha256 = hashlib.sha256()
with open(verify_file_path, 'rb') as f:
for b in iter(lambda: f.read(0xFFFF), b''):
sha256.update(b)
if sha256.digest() != verify_file_sha256:
logger.warning('%s changed or invalid, deleting.'
% (verify_file_path))
os.remove(verify_file_path)
if not np.any(['dl_verify_data_' in k for k in cfg.DATA.keys()]):
# We check the hash file only once per session
# no need to do it at each call
verify_file()
if not os.path.isfile(verify_file_path):
logger.info('Downloading %s to %s...'
% (CHECKSUM_URL, verify_file_path))
with requests.get(CHECKSUM_URL, stream=True) as req:
if req.status_code == 200:
mkdir(os.path.dirname(verify_file_path))
with open(verify_file_path, 'wb') as f:
for b in req.iter_content(chunk_size=0xFFFF):
if b:
f.write(b)
logger.info('Done downloading.')
verify_file()
if not os.path.isfile(verify_file_path):
logger.warning('Downloading and verifying checksums failed.')
return pd.DataFrame()
try:
data = pd.read_hdf(verify_file_path, key=section)
except KeyError:
data = pd.DataFrame()
cfg.DATA[verify_key] = data
return data
def _call_dl_func(dl_func, cache_path):
"""Helper so the actual call to downloads can be overridden
"""
return dl_func(cache_path)
def _cached_download_helper(cache_obj_name, dl_func, reset=False):
"""Helper function for downloads.
Takes care of checking if the file is already cached.
Only calls the actual download function when no cached version exists.
"""
cache_dir = cfg.PATHS['dl_cache_dir']
cache_ro = cfg.PARAMS['dl_cache_readonly']
# A lot of logic below could be simplified but it's also not too important
wd = cfg.PATHS.get('working_dir')
if wd:
# this is for real runs
fb_cache_dir = os.path.join(wd, 'cache')
check_fb_dir = False
else:
# Nothing have been set up yet, this is bad - find a place to write
# This should happen on read-only cluster only but still
wd = os.environ.get('OGGM_WORKDIR')
if wd is not None and os.path.isdir(wd):
fb_cache_dir = os.path.join(wd, 'cache')
else:
fb_cache_dir = os.path.join(cfg.CACHE_DIR, 'cache')
check_fb_dir = True
if not cache_dir:
# Defaults to working directory: it must be set!
if not cfg.PATHS['working_dir']:
raise InvalidParamsError("Need a valid PATHS['working_dir']!")
cache_dir = fb_cache_dir
cache_ro = False
fb_path = os.path.join(fb_cache_dir, cache_obj_name)
if not reset and os.path.isfile(fb_path):
return fb_path
cache_path = os.path.join(cache_dir, cache_obj_name)
if not reset and os.path.isfile(cache_path):
return cache_path
if cache_ro:
if check_fb_dir:
# Add a manual check that we are caching sample data download
if 'oggm-sample-data' not in fb_path:
raise InvalidParamsError('Attempting to download something '
'with invalid global settings.')
cache_path = fb_path
if not cfg.PARAMS['has_internet']:
raise NoInternetException("Download required, but "
"`has_internet` is False.")
mkdir(os.path.dirname(cache_path))
try:
cache_path = _call_dl_func(dl_func, cache_path)
except BaseException:
if os.path.exists(cache_path):
os.remove(cache_path)
raise
return cache_path
def _verified_download_helper(cache_obj_name, dl_func, reset=False):
"""Helper function for downloads.
Verifies the size and hash of the downloaded file against the included
list of known static files.
Uses _cached_download_helper to perform the actual download.
"""
path = _cached_download_helper(cache_obj_name, dl_func, reset)
try:
dl_verify = cfg.PARAMS['dl_verify']
except KeyError:
dl_verify = True
if dl_verify and path and cache_obj_name not in cfg.DL_VERIFIED:
cache_section, cache_path = cache_obj_name.split('/', 1)
data = get_dl_verify_data(cache_section)
if cache_path not in data.index:
logger.info('No known hash for %s' % cache_obj_name)
cfg.DL_VERIFIED[cache_obj_name] = True
else:
# compute the hash
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for b in iter(lambda: f.read(0xFFFF), b''):
sha256.update(b)
sha256 = sha256.digest()
size = os.path.getsize(path)
# check
data = data.loc[cache_path]
if data['size'] != size or bytes(data['sha256']) != sha256:
err = '%s failed to verify!\nis: %s %s\nexpected: %s %s' % (
path, size, sha256.hex(), data[0], data[1].hex())
raise DownloadVerificationFailedException(msg=err, path=path)
logger.info('%s verified successfully.' % path)
cfg.DL_VERIFIED[cache_obj_name] = True
return path
def _requests_urlretrieve(url, path, reporthook, auth=None, timeout=None):
"""Implements the required features of urlretrieve on top of requests
"""
chunk_size = 128 * 1024
chunk_count = 0
with requests.get(url, stream=True, auth=auth, timeout=timeout) as r:
if r.status_code != 200:
raise HttpDownloadError(r.status_code, url)
r.raise_for_status()
size = r.headers.get('content-length') or -1
size = int(size)
if reporthook:
reporthook(chunk_count, chunk_size, size)
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if not chunk:
continue
f.write(chunk)
chunk_count += 1
if reporthook:
reporthook(chunk_count, chunk_size, size)
if chunk_count * chunk_size < size:
raise HttpContentTooShortError()
def _classic_urlretrieve(url, path, reporthook, auth=None, timeout=None):
"""Thin wrapper around pythons urllib urlretrieve
"""
ourl = url
if auth:
u = urlparse(url)
if '@' not in u.netloc:
netloc = auth[0] + ':' + auth[1] + '@' + u.netloc
url = u._replace(netloc=netloc).geturl()
old_def_timeout = socket.getdefaulttimeout()
if timeout is not None:
socket.setdefaulttimeout(timeout)
try:
urllib.request.urlretrieve(url, path, reporthook)
except urllib.error.HTTPError as e:
raise HttpDownloadError(e.code, ourl)
except urllib.error.ContentTooShortError as e:
raise HttpContentTooShortError()
finally:
socket.setdefaulttimeout(old_def_timeout)
class ImplicitFTPTLS(ftplib.FTP_TLS):
""" FTP_TLS subclass that automatically wraps sockets in SSL to support
implicit FTPS.
Taken from https://stackoverflow.com/a/36049814
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sock = None
@property
def sock(self):
"""Return the socket."""
return self._sock
@sock.setter
def sock(self, value):
"""When modifying the socket, ensure that it is ssl wrapped."""
if value is not None and not isinstance(value, ssl.SSLSocket):
value = self.context.wrap_socket(value)
self._sock = value
def _ftps_retrieve(url, path, reporthook, auth=None, timeout=None):
""" Wrapper around ftplib to download from FTPS server
"""
if not auth:
raise DownloadCredentialsMissingException('No authentication '
'credentials given!')
upar = urlparse(url)
# Decide if Implicit or Explicit FTPS is used based on the port in url
if upar.port == 990:
ftps = ImplicitFTPTLS()
elif upar.port == 21:
ftps = ftplib.FTP_TLS()
try:
# establish ssl connection
ftps.connect(host=upar.hostname, port=upar.port, timeout=timeout)
ftps.login(user=auth[0], passwd=auth[1])
ftps.prot_p()
logger.info('Established connection %s' % upar.hostname)
# meta for progress bar size
count = 0
total = ftps.size(upar.path)
bs = 12*1024
def _ftps_progress(data):
outfile.write(data)
nonlocal count
count += 1
reporthook(count, count*bs, total)
with open(path, 'wb') as outfile:
ftps.retrbinary('RETR ' + upar.path, _ftps_progress, blocksize=bs)
except (ftplib.error_perm, socket.timeout, socket.gaierror) as err:
raise FTPSDownloadError(err)
finally:
ftps.close()
def _get_url_cache_name(url):
"""Returns the cache name for any given url.
"""
res = urlparse(url)
return res.netloc.split(':', 1)[0] + res.path
def oggm_urlretrieve(url, cache_obj_name=None, reset=False,
reporthook=None, auth=None, timeout=None):
"""Wrapper around urlretrieve, to implement our caching logic.
Instead of accepting a destination path, it decided where to store the file
and returns the local path.
auth is expected to be either a tuple of ('username', 'password') or None.
"""
if cache_obj_name is None:
cache_obj_name = _get_url_cache_name(url)
def _dlf(cache_path):
logger.info("Downloading %s to %s..." % (url, cache_path))
try:
_requests_urlretrieve(url, cache_path, reporthook, auth, timeout)
except requests.exceptions.InvalidSchema:
if 'ftps://' in url:
_ftps_retrieve(url, cache_path, reporthook, auth, timeout)
else:
_classic_urlretrieve(url, cache_path, reporthook, auth,
timeout)
return cache_path
return _verified_download_helper(cache_obj_name, _dlf, reset)
def _progress_urlretrieve(url, cache_name=None, reset=False,
auth=None, timeout=None):
"""Downloads a file, returns its local path, and shows a progressbar."""
try:
from progressbar import DataTransferBar, UnknownLength
pbar = [None]
def _upd(count, size, total):
if pbar[0] is None:
pbar[0] = DataTransferBar()
if pbar[0].max_value is None:
if total > 0:
pbar[0].start(total)
else:
pbar[0].start(UnknownLength)
pbar[0].update(min(count * size, total))
sys.stdout.flush()
res = oggm_urlretrieve(url, cache_obj_name=cache_name, reset=reset,
reporthook=_upd, auth=auth, timeout=timeout)
try:
pbar[0].finish()
except BaseException:
pass
return res
except (ImportError, ModuleNotFoundError):
return oggm_urlretrieve(url, cache_obj_name=cache_name,
reset=reset, auth=auth, timeout=timeout)
def aws_file_download(aws_path, cache_name=None, reset=False):
with get_lock():
return _aws_file_download_unlocked(aws_path, cache_name, reset)
def _aws_file_download_unlocked(aws_path, cache_name=None, reset=False):
"""Download a file from the AWS drive s3://astgtmv2/
**Note:** you need AWS credentials for this to work.
Parameters
----------
aws_path: path relative to s3://astgtmv2/
"""
while aws_path.startswith('/'):
aws_path = aws_path[1:]
if cache_name is not None:
cache_obj_name = cache_name
else:
cache_obj_name = 'astgtmv2/' + aws_path
def _dlf(cache_path):
raise NotImplementedError("Downloads from AWS are no longer supported")
return _verified_download_helper(cache_obj_name, _dlf, reset)
def file_downloader(www_path, retry_max=5, cache_name=None,
reset=False, auth=None, timeout=None):
"""A slightly better downloader: it tries more than once."""
local_path = None
retry_counter = 0
while retry_counter <= retry_max:
# Try to download
try:
retry_counter += 1
local_path = _progress_urlretrieve(www_path, cache_name=cache_name,
reset=reset, auth=auth,
timeout=timeout)
# if no error, exit
break
except HttpDownloadError as err:
# This works well for py3
if err.code == 404 or err.code == 300:
# Ok so this *should* be an ocean tile
return None
elif err.code >= 500 and err.code < 600:
logger.info("Downloading %s failed with HTTP error %s, "
"retrying in 10 seconds... %s/%s" %
(www_path, err.code, retry_counter, retry_max))
time.sleep(10)
continue
else:
raise
except HttpContentTooShortError as err:
logger.info("Downloading %s failed with ContentTooShortError"
" error %s, retrying in 10 seconds... %s/%s" %
(www_path, err.code, retry_counter, retry_max))
time.sleep(10)
continue
except DownloadVerificationFailedException as err:
if (cfg.PATHS['dl_cache_dir'] and
err.path.startswith(cfg.PATHS['dl_cache_dir']) and
cfg.PARAMS['dl_cache_readonly']):
if not cache_name:
cache_name = _get_url_cache_name(www_path)
cache_name = "GLOBAL_CACHE_INVALID/" + cache_name
retry_counter -= 1
logger.info("Global cache for %s is invalid!")
else:
try:
os.remove(err.path)
except FileNotFoundError:
pass
logger.info("Downloading %s failed with "
"DownloadVerificationFailedException\n %s\n"
"The file might have changed or is corrupted. "
"File deleted. Re-downloading... %s/%s" %
(www_path, err.msg, retry_counter, retry_max))
continue
except requests.ConnectionError as err:
if err.args[0].__class__.__name__ == 'MaxRetryError':
# if request tried often enough we don't have to do this
# this error does happen for not existing ASTERv3 files
return None
else:
# in other cases: try again
logger.info("Downloading %s failed with ConnectionError, "
"retrying in 10 seconds... %s/%s" %
(www_path, retry_counter, retry_max))
time.sleep(10)
continue
except FTPSDownloadError as err:
logger.info("Downloading %s failed with FTPSDownloadError"
" error: '%s', retrying in 10 seconds... %s/%s" %
(www_path, err.orgerr, retry_counter, retry_max))
time.sleep(10)
continue
# See if we managed (fail is allowed)
if not local_path or not os.path.exists(local_path):
logger.warning('Downloading %s failed.' % www_path)
return local_path
def locked_func(func):
"""To decorate a function that needs to be locked for multiprocessing"""
@wraps(func)
def wrapper(*args, **kwargs):
with get_lock():
return func(*args, **kwargs)
return wrapper
def file_extractor(file_path):
"""For archives with only one file inside extract the file to tmpdir."""
filename, file_extension = os.path.splitext(file_path)
# Second one for tar.gz files
f2, ex2 = os.path.splitext(filename)
if ex2 == '.tar':
filename, file_extension = f2, '.tar.gz'
bname = os.path.basename(file_path)
# This is to give a unique name to the tmp file
hid = hashlib.md5(file_path.encode()).hexdigest()[:7] + '_'
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
# Check output extension
def _check_ext(f):
_, of_ext = os.path.splitext(f)
if of_ext not in ['.nc', '.tif']:
raise InvalidParamsError('Extracted file extension not recognized'
': {}'.format(of_ext))
return of_ext
if file_extension == '.zip':
with zipfile.ZipFile(file_path) as zf:
members = zf.namelist()
if len(members) != 1:
raise RuntimeError('Cannot extract multiple files')
o_name = hid + members[0]
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with open(o_path, 'wb') as f:
f.write(zf.read(members[0]))
elif file_extension == '.gz':
# Gzip files cannot be inspected. It's always only one file
# Decide on its name
o_name = hid + os.path.basename(filename)
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with gzip.GzipFile(file_path) as zf:
with open(o_path, 'wb') as outfile:
for line in zf:
outfile.write(line)
elif file_extension == '.bz2':
# bzip2 files cannot be inspected. It's always only one file
# Decide on its name
o_name = hid + os.path.basename(filename)
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with bz2.open(file_path) as zf:
with open(o_path, 'wb') as outfile:
for line in zf:
outfile.write(line)
elif file_extension in ['.tar.gz', '.tar']:
with tarfile.open(file_path) as zf:
members = zf.getmembers()
if len(members) != 1:
raise RuntimeError('Cannot extract multiple files')
o_name = hid + members[0].name
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with open(o_path, 'wb') as f:
f.write(zf.extractfile(members[0]).read())
else:
raise InvalidParamsError('Extension not recognized: '
'{}'.format(file_extension))
# Be sure we don't overfill the folder
cfg.get_lru_handler(tmpdir, ending=of_ext).append(o_path)
return o_path
def download_with_authentication(wwwfile, key):
""" Uses credentials from a local .netrc file to download files
This is function is currently used for TanDEM-X and ASTER
Parameters
----------
wwwfile : str
path to the file to download
key : str
the machine to to look at in the .netrc file
Returns
-------
"""
# Check the cache first. Use dummy download function to assure nothing is
# tried to be downloaded without credentials:
def _always_none(foo):
return None
cache_obj_name = _get_url_cache_name(wwwfile)
dest_file = _verified_download_helper(cache_obj_name, _always_none)
# Grab auth parameters
if not dest_file:
authfile = os.path.expanduser('~/.netrc')
if not os.path.isfile(authfile):
raise DownloadCredentialsMissingException(
(authfile, ' does not exist. Add necessary credentials for ',
key, ' with `oggm_netrc_credentials. You may have to ',
'register at the respective service first.'))
try:
netrc(authfile).authenticators(key)[0]
except TypeError:
raise DownloadCredentialsMissingException(
('Credentials for ', key, ' are not in ', authfile, '. Add ',
'credentials for with `oggm_netrc_credentials`.'))
dest_file = file_downloader(
wwwfile, auth=(netrc(authfile).authenticators(key)[0],
netrc(authfile).authenticators(key)[2]))
return dest_file
def download_oggm_files():
with get_lock():
return _download_oggm_files_unlocked()
def _download_oggm_files_unlocked():
"""Checks if the demo data is already on the cache and downloads it."""
zip_url = 'https://github.com/%s/archive/%s.zip' % \
(SAMPLE_DATA_GH_REPO, SAMPLE_DATA_COMMIT)
odir = os.path.join(cfg.CACHE_DIR)
sdir = os.path.join(cfg.CACHE_DIR,
'oggm-sample-data-%s' % SAMPLE_DATA_COMMIT)
# download only if necessary
if not os.path.exists(sdir):
ofile = file_downloader(zip_url)
with zipfile.ZipFile(ofile) as zf:
zf.extractall(odir)
assert os.path.isdir(sdir)
# list of files for output
out = dict()
for root, directories, filenames in os.walk(sdir):
for filename in filenames:
if filename in out:
# This was a stupid thing, and should not happen
# TODO: duplicates in sample data...
k = os.path.join(os.path.basename(root), filename)
assert k not in out
out[k] = os.path.join(root, filename)
else:
out[filename] = os.path.join(root, filename)
return out
def _download_srtm_file(zone):
with get_lock():
return _download_srtm_file_unlocked(zone)
def _download_srtm_file_unlocked(zone):
"""Checks if the srtm data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
outpath = os.path.join(tmpdir, 'srtm_' + zone + '.tif')
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# Did we download it yet?
wwwfile = ('http://srtm.csi.cgiar.org/wp-content/uploads/files/srtm_5x5/'
'TIFF/srtm_' + zone + '.zip')
dest_file = file_downloader(wwwfile)
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
zf.extractall(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_nasadem_file(zone):
with get_lock():
return _download_nasadem_file_unlocked(zone)
def _download_nasadem_file_unlocked(zone):
"""Checks if the NASADEM data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
wwwfile = ('https://e4ftl01.cr.usgs.gov/MEASURES/NASADEM_HGT.001/'
'2000.02.11/NASADEM_HGT_{}.zip'.format(zone))
demfile = '{}.hgt'.format(zone)
outpath = os.path.join(tmpdir, demfile)
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# Did we download it yet?
dest_file = file_downloader(wwwfile)
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
zf.extract(demfile, path=tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_tandem_file(zone):
with get_lock():
return _download_tandem_file_unlocked(zone)
def _download_tandem_file_unlocked(zone):
"""Checks if the tandem data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
bname = zone.split('/')[-1] + '_DEM.tif'
wwwfile = ('https://download.geoservice.dlr.de/TDM90/files/'
'{}.zip'.format(zone))
outpath = os.path.join(tmpdir, bname)
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
dest_file = download_with_authentication(wwwfile, 'geoservice.dlr.de')
# That means we tried hard but we couldn't find it
if not dest_file:
return None
elif not zipfile.is_zipfile(dest_file):
# If the TanDEM-X tile does not exist, a invalid file is created.
# See https://github.com/OGGM/oggm/issues/893 for more details
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
for fn in zf.namelist():
if 'DEM/' + bname in fn:
break
with open(outpath, 'wb') as fo:
fo.write(zf.read(fn))
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_dem3_viewpano(zone):
with get_lock():
return _download_dem3_viewpano_unlocked(zone)
def _download_dem3_viewpano_unlocked(zone):
"""Checks if the DEM3 data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
outpath = os.path.join(tmpdir, zone + '.tif')
extract_dir = os.path.join(tmpdir, 'tmp_' + zone)
mkdir(extract_dir, reset=True)
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# OK, so see if downloaded already
# some files have a newer version 'v2'
if zone in ['R33', 'R34', 'R35', 'R36', 'R37', 'R38', 'Q32', 'Q33', 'Q34',
'Q35', 'Q36', 'Q37', 'Q38', 'Q39', 'Q40', 'P31', 'P32', 'P33',
'P34', 'P35', 'P36', 'P37', 'P38', 'P39', 'P40']:
ifile = 'http://viewfinderpanoramas.org/dem3/' + zone + 'v2.zip'
elif zone in DEM3REG.keys():
# We prepared these files as tif already
ifile = ('https://cluster.klima.uni-bremen.de/~oggm/dem/'
'DEM3_MERGED/{}.tif'.format(zone))
return file_downloader(ifile)
else:
ifile = 'http://viewfinderpanoramas.org/dem3/' + zone + '.zip'
dfile = file_downloader(ifile)
# None means we tried hard but we couldn't find it
if not dfile:
return None
# ok we have to extract it
with zipfile.ZipFile(dfile) as zf:
zf.extractall(extract_dir)
# Serious issue: sometimes, if a southern hemisphere URL is queried for
# download and there is none, a NH zip file is downloaded.
# Example: http://viewfinderpanoramas.org/dem3/SN29.zip yields N29!
# BUT: There are southern hemisphere files that download properly. However,
# the unzipped folder has the file name of
# the northern hemisphere file. Some checks if correct file exists:
if len(zone) == 4 and zone.startswith('S'):
zonedir = os.path.join(extract_dir, zone[1:])
else:
zonedir = os.path.join(extract_dir, zone)
globlist = glob.glob(os.path.join(zonedir, '*.hgt'))
# take care of the special file naming cases
if zone in DEM3REG.keys():
globlist = glob.glob(os.path.join(extract_dir, '*', '*.hgt'))
if not globlist:
# Final resort
globlist = (findfiles(extract_dir, '.hgt') or
findfiles(extract_dir, '.HGT'))
if not globlist:
raise RuntimeError("We should have some files here, but we don't")
# merge the single HGT files (can be a bit ineffective, because not every
# single file might be exactly within extent...)
rfiles = [rasterio.open(s) for s in globlist]
dest, output_transform = merge_tool(rfiles)
profile = rfiles[0].profile
if 'affine' in profile:
profile.pop('affine')
profile['transform'] = output_transform
profile['height'] = dest.shape[1]
profile['width'] = dest.shape[2]
profile['driver'] = 'GTiff'
with rasterio.open(outpath, 'w', **profile) as dst:
dst.write(dest)
for rf in rfiles:
rf.close()
# delete original files to spare disk space
for s in globlist:
os.remove(s)
del_empty_dirs(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_aster_file(zone):
with get_lock():
return _download_aster_file_unlocked(zone)
def _download_aster_file_unlocked(zone):
"""Checks if the ASTER data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
wwwfile = ('https://e4ftl01.cr.usgs.gov/ASTER_B/ASTT/ASTGTM.003/'
'2000.03.01/{}.zip'.format(zone))
outpath = os.path.join(tmpdir, zone + '_dem.tif')
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# download from NASA Earthdata with credentials
dest_file = download_with_authentication(wwwfile, 'urs.earthdata.nasa.gov')
# That means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
zf.extractall(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_topo_file_from_cluster(fname):
with get_lock():
return _download_topo_file_from_cluster_unlocked(fname)
def _download_topo_file_from_cluster_unlocked(fname):
"""Checks if the special topo data is in the directory and if not,
download it from the cluster.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
outpath = os.path.join(tmpdir, fname)
url = 'https://cluster.klima.uni-bremen.de/data/dems/'
url += fname + '.zip'
dfile = file_downloader(url)
if not os.path.exists(outpath):
logger.info('Extracting ' + fname + '.zip to ' + outpath + '...')
with zipfile.ZipFile(dfile) as zf:
zf.extractall(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_copdem_file(cppfile, tilename):
with get_lock():
return _download_copdem_file_unlocked(cppfile, tilename)
def _download_copdem_file_unlocked(cppfile, tilename):
"""Checks if Copernicus DEM file is in the directory, if not download it.
cppfile : name of the tarfile to download
tilename : name of folder and tif file within the cppfile
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
# tarfiles are extracted in directories per each tile
fpath = '{0}_DEM.tif'.format(tilename)
demfile = os.path.join(tmpdir, fpath)
# check if extracted file exists already
if os.path.exists(demfile):
return demfile
# Did we download it yet?
ftpfile = ('ftps://cdsdata.copernicus.eu:990/' +
'datasets/COP-DEM_GLO-90-DGED/2019_1/' +
cppfile)
dest_file = download_with_authentication(ftpfile,
'spacedata.copernicus.eu')
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(demfile):
tiffile = os.path.join(tilename, 'DEM', fpath)
with tarfile.open(dest_file) as tf:
tmember = tf.getmember(tiffile)
# do not extract the full path of the file
tmember.name = os.path.basename(tf.getmember(tiffile).name)
tf.extract(tmember, tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(demfile)
cfg.get_lru_handler(tmpdir).append(demfile)
return demfile
def _download_aw3d30_file(zone):
with get_lock():
return _download_aw3d30_file_unlocked(zone)
def _download_aw3d30_file_unlocked(fullzone):
"""Checks if the AW3D30 data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
# tarfiles are extracted in directories per each tile
tile = fullzone.split('/')[1]
demfile = os.path.join(tmpdir, tile, tile + '_AVE_DSM.tif')
# check if extracted file exists already
if os.path.exists(demfile):
return demfile
# Did we download it yet?
ftpfile = ('ftp://ftp.eorc.jaxa.jp/pub/ALOS/ext1/AW3D30/release_v1804/'
+ fullzone + '.tar.gz')
try:
dest_file = file_downloader(ftpfile, timeout=180)
except urllib.error.URLError:
# This error is raised if file is not available, could be water
return None
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(demfile):
from oggm.utils import robust_tar_extract
dempath = os.path.dirname(demfile)
robust_tar_extract(dest_file, dempath)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(demfile)
# this tarfile contains several files
for file in os.listdir(dempath):
cfg.get_lru_handler(tmpdir).append(os.path.join(dempath, file))
return demfile
def _download_mapzen_file(zone):
with get_lock():
return _download_mapzen_file_unlocked(zone)
def _download_mapzen_file_unlocked(zone):
"""Checks if the mapzen data is in the directory and if not, download it.
"""
bucket = 'elevation-tiles-prod'
prefix = 'geotiff'
url = 'http://s3.amazonaws.com/%s/%s/%s' % (bucket, prefix, zone)
# That's all
return file_downloader(url, timeout=180)
def _get_centerline_lonlat(gdir):
"""Quick n dirty solution to write the centerlines as a shapefile"""
cls = gdir.read_pickle('centerlines')
olist = []
for j, cl in enumerate(cls[::-1]):
mm = 1 if j == 0 else 0
gs = gpd.GeoSeries()
gs['RGIID'] = gdir.rgi_id
gs['LE_SEGMENT'] = np.rint(np.max(cl.dis_on_line) * gdir.grid.dx)
gs['MAIN'] = mm
tra_func = partial(gdir.grid.ij_to_crs, crs=wgs84)
gs['geometry'] = shp_trafo(tra_func, cl.line)
olist.append(gs)
return olist
def get_prepro_gdir(rgi_version, rgi_id, border, prepro_level, base_url=None):
with get_lock():
return _get_prepro_gdir_unlocked(rgi_version, rgi_id, border,
prepro_level, base_url=base_url)
def _get_prepro_gdir_unlocked(rgi_version, rgi_id, border, prepro_level,
base_url=None):
# Prepro URL
if base_url is None:
base_url = GDIR_URL
if not base_url.endswith('/'):
base_url += '/'
url = base_url
url += 'RGI{}/'.format(rgi_version)
url += 'b_{:03d}/'.format(border)
url += 'L{:d}/'.format(prepro_level)
url += '{}/{}.tar' .format(rgi_id[:8], rgi_id[:11])
tar_base = file_downloader(url)
if tar_base is None:
raise RuntimeError('Could not find file at ' + url)
return tar_base
def srtm_zone(lon_ex, lat_ex):
"""Returns a list of SRTM zones covering the desired extent.
"""
# SRTM are sorted in tiles of 5 degrees
srtm_x0 = -180.
srtm_y0 = 60.
srtm_dx = 5.
srtm_dy = -5.
# quick n dirty solution to be sure that we will cover the whole range
mi, ma = np.min(lon_ex), np.max(lon_ex)
# int() to avoid Deprec warning:
lon_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) + 3)))
mi, ma = np.min(lat_ex), np.max(lat_ex)
# int() to avoid Deprec warning
lat_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) + 3)))
zones = []
for lon in lon_ex:
for lat in lat_ex:
dx = lon - srtm_x0
dy = lat - srtm_y0
assert dy < 0
zx = np.ceil(dx / srtm_dx)
zy = np.ceil(dy / srtm_dy)
zones.append('{:02.0f}_{:02.0f}'.format(zx, zy))
return list(sorted(set(zones)))
def _tandem_path(lon_tile, lat_tile):
# OK we have a proper tile now
# First folder level is sorted from S to N
level_0 = 'S' if lat_tile < 0 else 'N'
level_0 += '{:02d}'.format(abs(lat_tile))
# Second folder level is sorted from W to E, but in 10 steps
level_1 = 'W' if lon_tile < 0 else 'E'
level_1 += '{:03d}'.format(divmod(abs(lon_tile), 10)[0] * 10)
# Level 2 is formating, but depends on lat
level_2 = 'W' if lon_tile < 0 else 'E'
if abs(lat_tile) <= 60:
level_2 += '{:03d}'.format(abs(lon_tile))
elif abs(lat_tile) <= 80:
level_2 += '{:03d}'.format(divmod(abs(lon_tile), 2)[0] * 2)
else:
level_2 += '{:03d}'.format(divmod(abs(lon_tile), 4)[0] * 4)
# Final path
out = (level_0 + '/' + level_1 + '/' +
'TDM1_DEM__30_{}{}'.format(level_0, level_2))
return out
def tandem_zone(lon_ex, lat_ex):
"""Returns a list of TanDEM-X zones covering the desired extent.
"""
# Files are one by one tiles, so lets loop over them
# For higher lats they are stored in steps of 2 and 4. My code below
# is probably giving more files than needed but better safe than sorry
lat_tiles = np.arange(np.floor(lat_ex[0]), np.ceil(lat_ex[1]+1e-9),
dtype=np.int)
zones = []
for lat in lat_tiles:
if abs(lat) < 60:
l0 = np.floor(lon_ex[0])
l1 = np.floor(lon_ex[1])
elif abs(lat) < 80:
l0 = divmod(lon_ex[0], 2)[0] * 2
l1 = divmod(lon_ex[1], 2)[0] * 2
elif abs(lat) < 90:
l0 = divmod(lon_ex[0], 4)[0] * 4
l1 = divmod(lon_ex[1], 4)[0] * 4
lon_tiles = np.arange(l0, l1+1, dtype=np.int)
for lon in lon_tiles:
zones.append(_tandem_path(lon, lat))
return list(sorted(set(zones)))
def _aw3d30_path(lon_tile, lat_tile):
# OK we have a proper tile now
# Folders are sorted with N E S W in 5 degree steps
# But in N and E the lower boundary is indicated
# e.g. N060 contains N060 - N064
# e.g. E000 contains E000 - E004
# but S and W indicate the upper boundary:
# e.g. S010 contains S006 - S010
# e.g. W095 contains W091 - W095
# get letters
ns = 'S' if lat_tile < 0 else 'N'
ew = 'W' if lon_tile < 0 else 'E'
# get lat/lon
lon = abs(5 * np.floor(lon_tile/5))
lat = abs(5 * np.floor(lat_tile/5))
folder = '%s%.3d%s%.3d' % (ns, lat, ew, lon)
filename = '%s%.3d%s%.3d' % (ns, abs(lat_tile), ew, abs(lon_tile))
# Final path
out = folder + '/' + filename
return out
def aw3d30_zone(lon_ex, lat_ex):
"""Returns a list of AW3D30 zones covering the desired extent.
"""
# Files are one by one tiles, so lets loop over them
lon_tiles = np.arange(np.floor(lon_ex[0]), np.ceil(lon_ex[1]+1e-9),
dtype=np.int)
lat_tiles = np.arange(np.floor(lat_ex[0]), np.ceil(lat_ex[1]+1e-9),
dtype=np.int)
zones = []
for lon in lon_tiles:
for lat in lat_tiles:
zones.append(_aw3d30_path(lon, lat))
return list(sorted(set(zones)))
def _extent_to_polygon(lon_ex, lat_ex, to_crs=None):
if lon_ex[0] == lon_ex[1] and lat_ex[0] == lat_ex[1]:
out = shpg.Point(lon_ex[0], lat_ex[0])
else:
x = [lon_ex[0], lon_ex[1], lon_ex[1], lon_ex[0], lon_ex[0]]
y = [lat_ex[0], lat_ex[0], lat_ex[1], lat_ex[1], lat_ex[0]]
out = shpg.Polygon(np.array((x, y)).T)
if to_crs is not None:
out = salem.transform_geometry(out, to_crs=to_crs)
return out
def arcticdem_zone(lon_ex, lat_ex):
"""Returns a list of Arctic-DEM zones covering the desired extent.
"""
gdf = gpd.read_file(get_demo_file('ArcticDEM_Tile_Index_Rel7_by_tile.shp'))
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
return gdf.tile.values if len(gdf) > 0 else []
def rema_zone(lon_ex, lat_ex):
"""Returns a list of REMA-DEM zones covering the desired extent.
"""
gdf = gpd.read_file(get_demo_file('REMA_Tile_Index_Rel1.1.shp'))
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
return gdf.tile.values if len(gdf) > 0 else []
def alaska_dem_zone(lon_ex, lat_ex):
"""Returns a list of Alaska-DEM zones covering the desired extent.
"""
gdf = gpd.read_file(get_demo_file('Alaska_albers_V3_tiles.shp'))
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
return gdf.tile.values if len(gdf) > 0 else []
def copdem_zone(lon_ex, lat_ex):
"""Returns a list of Copernicus DEM tarfile and tilename tuples
"""
# path to the lookup shapefiles
gdf = gpd.read_file(get_demo_file('RGI60_COPDEM_lookup.shp'))
# intersect with lat lon extents
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
# COPDEM is global, if we miss any tile it is worth an error
if (len(gdf) == 0) or (not unary_union(gdf.geometry).contains(p)):
raise InvalidDEMError('Could not find all necessary Copernicus DEM '
'tiles. This should not happen in a global DEM. '
'Check the RGI-CopernicusDEM lookup shapefile '
'for this particular glacier!')
flist = []
for _, g in gdf.iterrows():
cpp = g['CPP File']
eop = g['Eop Id']
eop = eop.split(':')[-2]
assert 'Copernicus' in eop
flist.append((cpp, eop))
return flist
def dem3_viewpano_zone(lon_ex, lat_ex):
"""Returns a list of DEM3 zones covering the desired extent.
http://viewfinderpanoramas.org/Coverage%20map%20viewfinderpanoramas_org3.htm
"""
for _f in DEM3REG.keys():
if (np.min(lon_ex) >= DEM3REG[_f][0]) and \
(np.max(lon_ex) <= DEM3REG[_f][1]) and \
(np.min(lat_ex) >= DEM3REG[_f][2]) and \
(np.max(lat_ex) <= DEM3REG[_f][3]):
# test some weird inset files in Antarctica
if (np.min(lon_ex) >= -91.) and (np.max(lon_ex) <= -90.) and \
(np.min(lat_ex) >= -72.) and (np.max(lat_ex) <= -68.):
return ['SR15']
elif (np.min(lon_ex) >= -47.) and (np.max(lon_ex) <= -43.) and \
(np.min(lat_ex) >= -61.) and (np.max(lat_ex) <= -60.):
return ['SP23']
elif (np.min(lon_ex) >= 162.) and (np.max(lon_ex) <= 165.) and \
(np.min(lat_ex) >= -68.) and (np.max(lat_ex) <= -66.):
return ['SQ58']
# test some rogue Greenland tiles as well
elif (np.min(lon_ex) >= -72.) and (np.max(lon_ex) <= -66.) and \
(np.min(lat_ex) >= 76.) and (np.max(lat_ex) <= 80.):
return ['T19']
elif (np.min(lon_ex) >= -72.) and (np.max(lon_ex) <= -66.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U19']
elif (np.min(lon_ex) >= -66.) and (np.max(lon_ex) <= -60.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U20']
elif (np.min(lon_ex) >= -60.) and (np.max(lon_ex) <= -54.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U21']
elif (np.min(lon_ex) >= -54.) and (np.max(lon_ex) <= -48.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U22']
elif (np.min(lon_ex) >= -25.) and (np.max(lon_ex) <= -13.) and \
(np.min(lat_ex) >= 63.) and (np.max(lat_ex) <= 67.):
return ['ISL']
else:
return [_f]
# if the tile doesn't have a special name, its name can be found like this:
# corrected SRTMs are sorted in tiles of 6 deg longitude and 4 deg latitude
srtm_x0 = -180.
srtm_y0 = 0.
srtm_dx = 6.
srtm_dy = 4.
# quick n dirty solution to be sure that we will cover the whole range
mi, ma = np.min(lon_ex), np.max(lon_ex)
# TODO: Fabien, find out what Johannes wanted with this +3
# +3 is just for the number to become still a bit larger
# int() to avoid Deprec warning
lon_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) / srtm_dy) + 3))
mi, ma = np.min(lat_ex), np.max(lat_ex)
# int() to avoid Deprec warning
lat_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) / srtm_dx) + 3))
zones = []
for lon in lon_ex:
for lat in lat_ex:
dx = lon - srtm_x0
dy = lat - srtm_y0
zx = np.ceil(dx / srtm_dx)
# convert number to letter
zy = chr(int(abs(dy / srtm_dy)) + ord('A'))
if lat >= 0:
zones.append('%s%02.0f' % (zy, zx))
else:
zones.append('S%s%02.0f' % (zy, zx))
return list(sorted(set(zones)))
def aster_zone(lon_ex, lat_ex):
"""Returns a list of ASTGTMV3 zones covering the desired extent.
ASTER v3 tiles are 1 degree x 1 degree
N50 contains 50 to 50.9
E10 contains 10 to 10.9
S70 contains -69.99 to -69.0
W20 contains -19.99 to -19.0
"""
# adding small buffer for unlikely case where one lon/lat_ex == xx.0
lons = np.arange(np.floor(lon_ex[0]-1e-9), np.ceil(lon_ex[1]+1e-9))
lats = np.arange(np.floor(lat_ex[0]-1e-9), np.ceil(lat_ex[1]+1e-9))
zones = []
for lat in lats:
# north or south?
ns = 'S' if lat < 0 else 'N'
for lon in lons:
# east or west?
ew = 'W' if lon < 0 else 'E'
filename = 'ASTGTMV003_{}{:02.0f}{}{:03.0f}'.format(ns, abs(lat),
ew, abs(lon))
zones.append(filename)
return list(sorted(set(zones)))
def nasadem_zone(lon_ex, lat_ex):
"""Returns a list of NASADEM zones covering the desired extent.
NASADEM tiles are 1 degree x 1 degree
N50 contains 50 to 50.9
E10 contains 10 to 10.9
S70 contains -69.99 to -69.0
W20 contains -19.99 to -19.0
"""
# adding small buffer for unlikely case where one lon/lat_ex == xx.0
lons = np.arange(np.floor(lon_ex[0]-1e-9), np.ceil(lon_ex[1]+1e-9))
lats = np.arange(np.floor(lat_ex[0]-1e-9), np.ceil(lat_ex[1]+1e-9))
zones = []
for lat in lats:
# north or south?
ns = 's' if lat < 0 else 'n'
for lon in lons:
# east or west?
ew = 'w' if lon < 0 else 'e'
filename = '{}{:02.0f}{}{:03.0f}'.format(ns, abs(lat), ew,
abs(lon))
zones.append(filename)
return list(sorted(set(zones)))
def mapzen_zone(lon_ex, lat_ex, dx_meter=None, zoom=None):
"""Returns a list of AWS mapzen zones covering the desired extent.
For mapzen one has to specify the level of detail (zoom) one wants. The
best way in OGGM is to specify dx_meter of the underlying map and OGGM
will decide which zoom level works best.
"""
if dx_meter is None and zoom is None:
raise InvalidParamsError('Need either zoom level or dx_meter.')
bottom, top = lat_ex
left, right = lon_ex
ybound = 85.0511
if bottom <= -ybound:
bottom = -ybound
if top <= -ybound:
top = -ybound
if bottom > ybound:
bottom = ybound
if top > ybound:
top = ybound
if right >= 180:
right = 179.999
if left >= 180:
left = 179.999
if dx_meter:
# Find out the zoom so that we are close to the desired accuracy
lat = np.max(np.abs([bottom, top]))
zoom = int(np.ceil(math.log2((math.cos(lat * math.pi / 180) *
2 * math.pi * WEB_EARTH_RADUIS) /
(WEB_N_PIX * dx_meter))))
# According to this we should just always stay above 10 (sorry)
# https://github.com/tilezen/joerd/blob/master/docs/data-sources.md
zoom = 10 if zoom < 10 else zoom
# Code from planetutils
size = 2 ** zoom
xt = lambda x: int((x + 180.0) / 360.0 * size)
yt = lambda y: int((1.0 - math.log(math.tan(math.radians(y)) +
(1 / math.cos(math.radians(y))))
/ math.pi) / 2.0 * size)
tiles = []
for x in range(xt(left), xt(right) + 1):
for y in range(yt(top), yt(bottom) + 1):
tiles.append('/'.join(map(str, [zoom, x, str(y) + '.tif'])))
return tiles
def get_demo_file(fname):
"""Returns the path to the desired OGGM-sample-file.
If Sample data is not cached it will be downloaded from
https://github.com/OGGM/oggm-sample-data
Parameters
----------
fname : str
Filename of the desired OGGM-sample-file
Returns
-------
str
Absolute path to the desired file.
"""
d = download_oggm_files()
if fname in d:
return d[fname]
else:
return None
def get_wgms_files():
"""Get the path to the default WGMS-RGI link file and the data dir.
Returns
-------
(file, dir) : paths to the files
"""
download_oggm_files()
sdir = os.path.join(cfg.CACHE_DIR,
'oggm-sample-data-%s' % SAMPLE_DATA_COMMIT,
'wgms')
datadir = os.path.join(sdir, 'mbdata')
assert os.path.exists(datadir)
outf = os.path.join(sdir, 'rgi_wgms_links_20200415.csv')
outf = pd.read_csv(outf, dtype={'RGI_REG': object})
return outf, datadir
def get_geodetic_files(geodetic_folder_path=None, geodetic_filename=None):
"""Get the path to the combined geodetic and WGMS-RGI link file and the data dir.
Returns
-------
(file, dir) : paths to the files
"""
download_oggm_files()
sdir = os.path.join(cfg.CACHE_DIR,
'oggm-sample-data-%s' % SAMPLE_DATA_COMMIT,
'wgms')
sdir = os.path.join(geodetic_folder_path)
datadir = os.path.join(sdir, 'mbdata_with_geo')
assert os.path.exists(datadir)
outf = os.path.join(sdir, geodetic_filename)
outf = pd.read_csv(outf, dtype={'RGI_REG': object})
return outf, datadir
def get_glathida_file():
"""Get the path to the default GlaThiDa-RGI link file.
Returns
-------
file : paths to the file
"""
# Roll our own
download_oggm_files()
sdir = os.path.join(cfg.CACHE_DIR,
'oggm-sample-data-%s' % SAMPLE_DATA_COMMIT,
'glathida')
outf = os.path.join(sdir, 'rgi_glathida_links.csv')
assert os.path.exists(outf)
return outf
def get_rgi_dir(version=None, reset=False):
"""Path to the RGI directory.
If the RGI files are not present, download them.
Parameters
----------
version : str
'5', '6', defaults to None (linking to the one specified in cfg.PARAMS)
reset : bool
If True, deletes the RGI directory first and downloads the data
Returns
-------
str
path to the RGI directory
"""
with get_lock():
return _get_rgi_dir_unlocked(version=version, reset=reset)
def _get_rgi_dir_unlocked(version=None, reset=False):
rgi_dir = cfg.PATHS['rgi_dir']
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
# Be sure the user gave a sensible path to the RGI dir
if not rgi_dir:
raise InvalidParamsError('The RGI data directory has to be'
'specified explicitly.')
rgi_dir = os.path.abspath(os.path.expanduser(rgi_dir))
rgi_dir = os.path.join(rgi_dir, 'RGIV' + version)
mkdir(rgi_dir, reset=reset)
if version == '50':
dfile = 'http://www.glims.org/RGI/rgi50_files/rgi50.zip'
elif version == '60':
dfile = 'http://www.glims.org/RGI/rgi60_files/00_rgi60.zip'
elif version == '61':
dfile = 'https://cluster.klima.uni-bremen.de/data/rgi/rgi_61.zip'
elif version == '62':
dfile = 'https://cluster.klima.uni-bremen.de/~oggm/rgi/rgi62.zip'
test_file = os.path.join(rgi_dir,
'*_rgi*{}_manifest.txt'.format(version))
if len(glob.glob(test_file)) == 0:
# if not there download it
ofile = file_downloader(dfile, reset=reset)
# Extract root
with zipfile.ZipFile(ofile) as zf:
zf.extractall(rgi_dir)
# Extract subdirs
pattern = '*_rgi{}_*.zip'.format(version)
for root, dirs, files in os.walk(cfg.PATHS['rgi_dir']):
for filename in fnmatch.filter(files, pattern):
zfile = os.path.join(root, filename)
with zipfile.ZipFile(zfile) as zf:
ex_root = zfile.replace('.zip', '')
mkdir(ex_root)
zf.extractall(ex_root)
# delete the zipfile after success
os.remove(zfile)
if len(glob.glob(test_file)) == 0:
raise RuntimeError('Could not find a manifest file in the RGI '
'directory: ' + rgi_dir)
return rgi_dir
def get_rgi_region_file(region, version=None, reset=False):
"""Path to the RGI region file.
If the RGI files are not present, download them.
Parameters
----------
region : str
from '01' to '19'
version : str
'5', '6', defaults to None (linking to the one specified in cfg.PARAMS)
reset : bool
If True, deletes the RGI directory first and downloads the data
Returns
-------
str
path to the RGI shapefile
"""
rgi_dir = get_rgi_dir(version=version, reset=reset)
f = list(glob.glob(rgi_dir + "/*/{}_*.shp".format(region)))
assert len(f) == 1
return f[0]
def get_rgi_glacier_entities(rgi_ids, version=None):
"""Get a list of glacier outlines selected from their RGI IDs.
Will download RGI data if not present.
Parameters
----------
rgi_ids : list of str
the glaciers you want the outlines for
version : str
the rgi version
Returns
-------
geopandas.GeoDataFrame
containing the desired RGI glacier outlines
"""
regions = [s.split('-')[1].split('.')[0] for s in rgi_ids]
if version is None:
version = rgi_ids[0].split('-')[0][-2:]
selection = []
for reg in sorted(np.unique(regions)):
sh = gpd.read_file(get_rgi_region_file(reg, version=version))
selection.append(sh.loc[sh.RGIId.isin(rgi_ids)])
# Make a new dataframe of those
selection = pd.concat(selection)
selection.crs = sh.crs # for geolocalisation
if len(selection) != len(rgi_ids):
raise RuntimeError('Could not find all RGI ids')
return selection
def get_rgi_intersects_dir(version=None, reset=False):
"""Path to the RGI directory containing the intersect files.
If the files are not present, download them.
Parameters
----------
version : str
'5', '6', defaults to None (linking to the one specified in cfg.PARAMS)
reset : bool
If True, deletes the intersects before redownloading them
Returns
-------
str
path to the directory
"""
with get_lock():
return _get_rgi_intersects_dir_unlocked(version=version, reset=reset)
def _get_rgi_intersects_dir_unlocked(version=None, reset=False):
rgi_dir = cfg.PATHS['rgi_dir']
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
# Be sure the user gave a sensible path to the RGI dir
if not rgi_dir:
raise InvalidParamsError('The RGI data directory has to be'
'specified explicitly.')
rgi_dir = os.path.abspath(os.path.expanduser(rgi_dir))
mkdir(rgi_dir)
dfile = 'https://cluster.klima.uni-bremen.de/data/rgi/'
dfile += 'RGI_V{}_Intersects.zip'.format(version)
if version == '62':
dfile = ('https://cluster.klima.uni-bremen.de/~oggm/rgi/'
'rgi62_Intersects.zip')
odir = os.path.join(rgi_dir, 'RGI_V' + version + '_Intersects')
if reset and os.path.exists(odir):
shutil.rmtree(odir)
# A lot of code for backwards compat (sigh...)
if version in ['50', '60']:
test_file = os.path.join(odir, 'Intersects_OGGM_Manifest.txt')
if not os.path.exists(test_file):
# if not there download it
ofile = file_downloader(dfile, reset=reset)
# Extract root
with zipfile.ZipFile(ofile) as zf:
zf.extractall(odir)
if not os.path.exists(test_file):
raise RuntimeError('Could not find a manifest file in the RGI '
'directory: ' + odir)
else:
test_file = os.path.join(odir,
'*ntersect*anifest.txt'.format(version))
if len(glob.glob(test_file)) == 0:
# if not there download it
ofile = file_downloader(dfile, reset=reset)
# Extract root
with zipfile.ZipFile(ofile) as zf:
zf.extractall(odir)
# Extract subdirs
pattern = '*_rgi{}_*.zip'.format(version)
for root, dirs, files in os.walk(cfg.PATHS['rgi_dir']):
for filename in fnmatch.filter(files, pattern):
zfile = os.path.join(root, filename)
with zipfile.ZipFile(zfile) as zf:
ex_root = zfile.replace('.zip', '')
mkdir(ex_root)
zf.extractall(ex_root)
# delete the zipfile after success
os.remove(zfile)
if len(glob.glob(test_file)) == 0:
raise RuntimeError('Could not find a manifest file in the RGI '
'directory: ' + odir)
return odir
def get_rgi_intersects_region_file(region=None, version=None, reset=False):
"""Path to the RGI regional intersect file.
If the RGI files are not present, download them.
Parameters
----------
region : str
from '00' to '19', with '00' being the global file (deprecated).
From RGI version '61' onwards, please use `get_rgi_intersects_entities`
with a list of glaciers instead of relying to the global file.
version : str
'5', '6', '61'... defaults the one specified in cfg.PARAMS
reset : bool
If True, deletes the intersect file before redownloading it
Returns
-------
str
path to the RGI intersects shapefile
"""
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
rgi_dir = get_rgi_intersects_dir(version=version, reset=reset)
if region == '00':
if version in ['50', '60']:
version = 'AllRegs'
region = '*'
else:
raise InvalidParamsError("From RGI version 61 onwards, please use "
"get_rgi_intersects_entities() instead.")
f = list(glob.glob(os.path.join(rgi_dir, "*", '*intersects*' + region +
'_rgi*' + version + '*.shp')))
assert len(f) == 1
return f[0]
def get_rgi_intersects_entities(rgi_ids, version=None):
"""Get a list of glacier intersects selected from their RGI IDs.
Parameters
----------
rgi_ids: list of str
list of rgi_ids you want to look for intersections for
version: str
'5', '6', '61'... defaults the one specified in cfg.PARAMS
Returns
-------
geopandas.GeoDataFrame
with the selected intersects
"""
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
regions = [s.split('-')[1].split('.')[0] for s in rgi_ids]
selection = []
for reg in sorted(np.unique(regions)):
sh = gpd.read_file(get_rgi_intersects_region_file(reg,
version=version))
selection.append(sh.loc[sh.RGIId_1.isin(rgi_ids) |
sh.RGIId_2.isin(rgi_ids)])
# Make a new dataframe of those
selection = pd.concat(selection)
selection.crs = sh.crs # for geolocalisation
return selection
def is_dem_source_available(source, lon_ex, lat_ex):
"""Checks if a DEM source is available for your purpose.
This is only a very rough check! It doesn't mean that the data really is
available, but at least it's worth a try.
Parameters
----------
source : str, required
the source you want to check for
lon_ex : tuple or int, required
a (min_lon, max_lon) tuple delimiting the requested area longitudes
lat_ex : tuple or int, required
a (min_lat, max_lat) tuple delimiting the requested area latitudes
Returns
-------
True or False
"""
from oggm.utils import tolist
lon_ex = tolist(lon_ex, length=2)
lat_ex = tolist(lat_ex, length=2)
def _in_grid(grid_json, lon, lat):
i, j = cfg.DATA['dem_grids'][grid_json].transform(lon, lat,
maskout=True)
return np.all(~ (i.mask | j.mask))
if source == 'GIMP':
return _in_grid('gimpdem_90m_v01.1.json', lon_ex, lat_ex)
elif source == 'ARCTICDEM':
return _in_grid('arcticdem_mosaic_100m_v3.0.json', lon_ex, lat_ex)
elif source == 'RAMP':
return _in_grid('AntarcticDEM_wgs84.json', lon_ex, lat_ex)
elif source == 'REMA':
return _in_grid('REMA_100m_dem.json', lon_ex, lat_ex)
elif source == 'ALASKA':
return _in_grid('Alaska_albers_V3.json', lon_ex, lat_ex)
elif source == 'TANDEM':
return True
elif source == 'AW3D30':
return np.min(lat_ex) > -60
elif source == 'MAPZEN':
return True
elif source == 'DEM3':
return True
elif source == 'ASTER':
return True
elif source == 'SRTM':
return np.max(np.abs(lat_ex)) < 60
elif source == 'COPDEM':
return True
elif source == 'NASADEM':
return (np.min(lat_ex) > -56) and (np.max(lat_ex) < 60)
elif source == 'USER':
return True
elif source is None:
return True
def default_dem_source(lon_ex, lat_ex, rgi_region=None, rgi_subregion=None):
"""Current default DEM source at a given location.
Parameters
----------
lon_ex : tuple or int, required
a (min_lon, max_lon) tuple delimiting the requested area longitudes
lat_ex : tuple or int, required
a (min_lat, max_lat) tuple delimiting the requested area latitudes
rgi_region : str, optional
the RGI region number (required for the GIMP DEM)
rgi_subregion : str, optional
the RGI subregion str (useful for RGI Reg 19)
Returns
-------
the chosen DEM source
"""
from oggm.utils import tolist
lon_ex = tolist(lon_ex, length=2)
lat_ex = tolist(lat_ex, length=2)
# GIMP is in polar stereographic, not easy to test if glacier is on the map
# It would be possible with a salem grid but this is a bit more expensive
# Instead, we are just asking RGI for the region
if rgi_region is not None and int(rgi_region) == 5:
return 'GIMP'
# ARCTIC DEM is not yet automatized
# If we have to automatise this one day, we should use the shapefile
# of the tiles, and then check for RGI region:
# use_without_check = ['03', '05', '06', '07', '09']
# to_test_on_shape = ['01', '02', '04', '08']
# Antarctica
if rgi_region is not None and int(rgi_region) == 19:
if rgi_subregion is None:
raise InvalidParamsError('Must specify subregion for Antarctica')
if rgi_subregion in ['19-01', '19-02', '19-03', '19-04', '19-05']:
# special case for some distant islands
return 'DEM3'
return 'RAMP'
# In high latitudes and an exceptional region in Eastern Russia, DEM3
# exceptional test for eastern russia:
if ((np.min(lat_ex) < -60.) or (np.max(lat_ex) > 60.) or
(np.min(lat_ex) > 59 and np.min(lon_ex) > 170)):
return 'DEM3'
# Everywhere else SRTM
return 'SRTM'
def get_topo_file(lon_ex, lat_ex, rgi_region=None, rgi_subregion=None,
dx_meter=None, zoom=None, source=None):
"""Path(s) to the DEM file(s) covering the desired extent.
If the needed files for covering the extent are not present, download them.
By default it will be referred to SRTM for [-60S; 60N], GIMP for Greenland,
RAMP for Antarctica, and a corrected DEM3 (viewfinderpanoramas.org)
elsewhere.
A user-specified data source can be given with the ``source`` keyword.
Parameters
----------
lon_ex : tuple or int, required
a (min_lon, max_lon) tuple delimiting the requested area longitudes
lat_ex : tuple or int, required
a (min_lat, max_lat) tuple delimiting the requested area latitudes
rgi_region : str, optional
the RGI region number (required for the GIMP DEM)
rgi_subregion : str, optional
the RGI subregion str (useful for RGI Reg 19)
dx_meter : float, required for source='MAPZEN'
the resolution of the glacier map (to decide the zoom level of mapzen)
zoom : int, optional
if you know the zoom already (for MAPZEN only)
source : str or list of str, optional
Name of specific DEM source. See gis.define_glacier_region for details
Returns
-------
tuple: (list with path(s) to the DEM file(s), data source str)
"""
from oggm.utils import tolist
lon_ex = tolist(lon_ex, length=2)
lat_ex = tolist(lat_ex, length=2)
if source is not None and not isinstance(source, str):
# check all user options
for s in source:
demf, source_str = get_topo_file(lon_ex, lat_ex,
rgi_region=rgi_region,
rgi_subregion=rgi_subregion,
source=s)
if demf[0]:
return demf, source_str
# Did the user specify a specific DEM file?
if 'dem_file' in cfg.PATHS and os.path.isfile(cfg.PATHS['dem_file']):
source = 'USER' if source is None else source
if source == 'USER':
return [cfg.PATHS['dem_file']], source
# Some logic to decide which source to take if unspecified
if source is None:
source = default_dem_source(lon_ex, lat_ex, rgi_region=rgi_region,
rgi_subregion=rgi_subregion)
if source not in DEM_SOURCES:
raise InvalidParamsError('`source` must be one of '
'{}'.format(DEM_SOURCES))
# OK go
files = []
if source == 'GIMP':
_file = _download_topo_file_from_cluster('gimpdem_90m_v01.1.tif')
files.append(_file)
if source == 'ARCTICDEM':
zones = arcticdem_zone(lon_ex, lat_ex)
for z in zones:
with get_lock():
url = 'https://cluster.klima.uni-bremen.de/~oggm/'
url += 'dem/ArcticDEM_100m_v3.0/'
url += '{}_100m_v3.0/{}_100m_v3.0_reg_dem.tif'.format(z, z)
files.append(file_downloader(url))
if source == 'RAMP':
_file = _download_topo_file_from_cluster('AntarcticDEM_wgs84.tif')
files.append(_file)
if source == 'ALASKA':
zones = alaska_dem_zone(lon_ex, lat_ex)
for z in zones:
with get_lock():
url = 'https://cluster.klima.uni-bremen.de/~oggm/'
url += 'dem/Alaska_albers_V3/'
url += '{}_Alaska_albers_V3/'.format(z)
url += '{}_Alaska_albers_V3.tif'.format(z)
files.append(file_downloader(url))
if source == 'REMA':
zones = rema_zone(lon_ex, lat_ex)
for z in zones:
with get_lock():
url = 'https://cluster.klima.uni-bremen.de/~oggm/'
url += 'dem/REMA_100m_v1.1/'
url += '{}_100m_v1.1/{}_100m_v1.1_reg_dem.tif'.format(z, z)
files.append(file_downloader(url))
if source == 'TANDEM':
zones = tandem_zone(lon_ex, lat_ex)
for z in zones:
files.append(_download_tandem_file(z))
if source == 'AW3D30':
zones = aw3d30_zone(lon_ex, lat_ex)
for z in zones:
files.append(_download_aw3d30_file(z))
if source == 'MAPZEN':
zones = mapzen_zone(lon_ex, lat_ex, dx_meter=dx_meter, zoom=zoom)
for z in zones:
files.append(_download_mapzen_file(z))
if source == 'ASTER':
zones = aster_zone(lon_ex, lat_ex)
for z in zones:
files.append(_download_aster_file(z))
if source == 'DEM3':
zones = dem3_viewpano_zone(lon_ex, lat_ex)
for z in zones:
files.append(_download_dem3_viewpano(z))
if source == 'SRTM':
zones = srtm_zone(lon_ex, lat_ex)
for z in zones:
files.append(_download_srtm_file(z))
if source == 'COPDEM':
filetuple = copdem_zone(lon_ex, lat_ex)
for cpp, eop in filetuple:
files.append(_download_copdem_file(cpp, eop))
if source == 'NASADEM':
zones = nasadem_zone(lon_ex, lat_ex)
for z in zones:
files.append(_download_nasadem_file(z))
# filter for None (e.g. oceans)
files = [s for s in files if s]
if files:
return files, source
else:
raise InvalidDEMError('Source: {2} no topography file available for '
'extent lat:{0}, lon:{1}!'.
format(lat_ex, lon_ex, source))
def get_cmip5_file(filename, reset=False):
"""Download a global CMIP5 file.
List of files: https://cluster.klima.uni-bremen.de/~nicolas/cmip5-ng/
Parameters
----------
filename : str
the file to download, e.g 'pr_ann_ACCESS1-3_rcp85_r1i1p1_g025.nc'
or 'tas_ann_ACCESS1-3_rcp45_r1i1p1_g025.nc'
reset : bool
force re-download of an existing file
Returns
-------
the path to the netCDF file
"""
prefix = filename.split('_')[0]
dfile = CMIP5_URL + prefix + '/' + filename
return file_downloader(dfile, reset=reset)
def get_ref_mb_glaciers_candidates(rgi_version=None):
"""Reads in the WGMS list of glaciers with available MB data.
Can be found afterwards (and extended) in cdf.DATA['RGIXX_ref_ids'].
"""
if rgi_version is None:
rgi_version = cfg.PARAMS['rgi_version']
if len(rgi_version) == 2:
# We might change this one day
rgi_version = rgi_version[:1]
key = 'RGI{}0_ref_ids'.format(rgi_version)
if key not in cfg.DATA:
flink, _ = get_wgms_files()
cfg.DATA[key] = flink['RGI{}0_ID'.format(rgi_version)].tolist()
return cfg.DATA[key]
def get_ref_mb_glaciers(gdirs):
"""Get the list of glaciers we have valid mass balance measurements for.
To be valid glaciers must have more than 5 years of measurements and
be land terminating. Therefore, the list depends on the time period of the
baseline climate data and this method selects them out of a list
of potential candidates (`gdirs` arg).
Parameters
----------
gdirs : list of :py:class:`oggm.GlacierDirectory` objects
list of glaciers to check for valid reference mass balance data
Returns
-------
ref_gdirs : list of :py:class:`oggm.GlacierDirectory` objects
list of those glaciers with valid reference mass balance data
See Also
--------
get_ref_mb_glaciers_candidates
"""
# Get the links
ref_ids = get_ref_mb_glaciers_candidates(gdirs[0].rgi_version)
# We remove tidewater glaciers and glaciers with < 5 years
ref_gdirs = []
for g in gdirs:
if g.rgi_id not in ref_ids or g.is_tidewater:
continue
try:
mbdf = g.get_ref_mb_data()
if len(mbdf) >= 5:
ref_gdirs.append(g)
except RuntimeError as e:
if 'Please process some climate data before call' in str(e):
raise
return ref_gdirs
def get_ref_mb_glaciers_candidates_geodetic(rgi_version=None, folder_path=None,filename=None):
"""Reads in the WGMS list of glaciers with available MB data.
Can be found afterwards (and extended) in cdf.DATA['RGIXX_ref_ids'].
"""
if rgi_version is None:
rgi_version = cfg.PARAMS['rgi_version']
if len(rgi_version) == 2:
# We might change this one day
rgi_version = rgi_version[:1]
key = 'RGI{}0_ref_ids'.format(rgi_version)
if key not in cfg.DATA:
flink, _ = get_geodetic_files(geodetic_folder_path=folder_path, geodetic_filename=filename)
cfg.DATA[key] = flink['RGI{}0_ID'.format(rgi_version)].tolist()
return cfg.DATA[key]
def get_ref_mb_glaciers_geodetic(gdirs,temp_geodetic_folder_path=None,temp_geodetic_filename=None):
"""Get the list of glaciers we have valid mass balance measurements for.
To be valid glaciers must have more than 5 years of measurements and
be land terminating. Therefore, the list depends on the time period of the
baseline climate data and this method selects them out of a list
of potential candidates (`gdirs` arg).
Parameters
----------
gdirs : list of :py:class:`oggm.GlacierDirectory` objects
list of glaciers to check for valid reference mass balance data
Returns
-------
ref_gdirs : list of :py:class:`oggm.GlacierDirectory`geodetic_folder_path=geo_folder_path, geodetic_filename=geo_file_name objects
list of those glaciers with valid reference mass balance data
See Also
--------
get_ref_mb_glaciers_candidates
"""
# Get the links
ref_ids = get_ref_mb_glaciers_candidates_geodetic(gdirs[0].rgi_version,folder_path=temp_geodetic_folder_path,filename=temp_geodetic_filename)
# We remove tidewater glaciers and glaciers with < 5 years
ref_gdirs = []
for g in gdirs:
if g.rgi_id not in ref_ids or g.is_tidewater:
continue
try:
mbdf = g.get_ref_mb_data_geodetic(folder_path=temp_geodetic_folder_path,filename=temp_geodetic_filename)
if len(mbdf) >= 5:
ref_gdirs.append(g)
except RuntimeError as e:
if 'Please process some climate data before call' in str(e):
raise
return ref_gdirs
|
the-stack_0_9470 | """
post to api data from sanitized_reference_json/
python post_reference_to_api.py
update okta_token only
python post_reference_to_api.py -a
keys that exist in data
2021-05-25 21:16:53,372 - literature logger - INFO - key abstract
2021-05-25 21:16:53,372 - literature logger - INFO - key citation
2021-05-25 21:16:53,372 - literature logger - INFO - key datePublished
2021-05-25 21:16:53,373 - literature logger - INFO - key dateArrivedInPubmed
2021-05-25 21:16:53,373 - literature logger - INFO - key dateLastModified
2021-05-25 21:16:53,373 - literature logger - INFO - key keywords
2021-05-25 21:16:53,373 - literature logger - INFO - key crossReferences
2021-05-25 21:16:53,373 - literature logger - INFO - key title
2021-05-25 21:16:53,373 - literature logger - INFO - key tags
2021-05-25 21:16:53,373 - literature logger - INFO - key issueName
2021-05-25 21:16:53,373 - literature logger - INFO - key issueDate
2021-05-25 21:16:53,373 - literature logger - INFO - key MODReferenceType
2021-05-25 21:16:53,373 - literature logger - INFO - key pubMedType
2021-05-25 21:16:53,373 - literature logger - INFO - key meshTerms
2021-05-25 21:16:53,373 - literature logger - INFO - key allianceCategory
2021-05-25 21:16:53,373 - literature logger - INFO - key volume
2021-05-25 21:16:53,373 - literature logger - INFO - key authors
2021-05-25 21:16:53,373 - literature logger - INFO - key pages
2021-05-25 21:16:53,373 - literature logger - INFO - key publisher
2021-05-25 21:16:53,373 - literature logger - INFO - key resource
2021-05-25 21:16:53,373 - literature logger - INFO - key language
2021-05-25 21:16:53,373 - literature logger - INFO - key modResources
2021-05-25 21:16:53,373 - literature logger - INFO - key MODReferenceTypes
2021-05-25 21:16:53,373 - literature logger - INFO - key resourceAbbreviation
"""
# import requests
import argparse
import json
import logging
import logging.config
import re
from os import environ, listdir, path
from helper_file_processing import (
generate_cross_references_file,
load_ref_xref,
split_identifier,
)
from helper_post_to_api import (
generate_headers,
get_authentication_token,
process_api_request,
update_token,
)
log_file_path = path.join(path.dirname(path.abspath(__file__)), "../logging.conf")
logging.config.fileConfig(log_file_path)
logger = logging.getLogger("literature logger")
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--authorization", action="store_true", help="update authorization token")
parser.add_argument("-f", "--file", action="store", help="take input from input file in full path")
parser.add_argument("-c", "--commandline", nargs="*", action="store", help="placeholder for process_single_pmid.py")
args = vars(parser.parse_args())
def camel_to_snake(name):
"""
:param name:
:return:
"""
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower()
def post_references(input_file, check_file_flag): # noqa: C901
"""
:param input_file:
:param check_file_flag:
:return:
"""
api_port = environ.get("API_PORT")
# base_path = '/home/azurebrd/git/agr_literature_service_demo/src/xml_processing/'
base_path = environ.get("XML_PATH")
files_to_process = []
if input_file == "sanitized":
json_storage_path = base_path + "sanitized_reference_json/"
dir_list = listdir(json_storage_path)
for filename in dir_list:
# logger.info(filename)
if "REFERENCE_" in filename and ".REFERENCE_" not in filename:
# logger.info(filename)
files_to_process.append(json_storage_path + filename)
else:
files_to_process.append(input_file)
keys_to_remove = {"nlm", "primaryId", "modResources", "resourceAbbreviation"}
remap_keys = {"datePublished": "date_published", "dateArrivedInPubmed": "date_arrived_in_pubmed",
"dateLastModified": "date_last_modified", "crossReferences": "cross_references",
"issueName": "issue_name", "issueDate": "issue_date", "pubMedType": "pubmed_type",
"meshTerms": "mesh_terms", "allianceCategory": "category",
"MODReferenceType": "mod_reference_types", "MODReferenceTypes": "mod_reference_types",
"plainLanguageAbstract": "plain_language_abstract",
"pubmedAbstractLanguages": "pubmed_abstract_languages", "publicationStatus": "pubmed_publication_status"}
subkeys_to_remove = {}
remap_subkeys = {}
subkeys_to_remove["mesh_terms"] = {"referenceId"}
subkeys_to_remove["tags"] = {"referenceId"}
subkeys_to_remove["authors"] = {"referenceId", "firstinit", "firstInit", "crossReferences", "collectivename"}
remap_subkeys["mesh_terms"] = {}
remap_subkeys["mesh_terms"]["meshHeadingTerm"] = "heading_term"
remap_subkeys["mesh_terms"]["meshQualfierTerm"] = "qualifier_term"
remap_subkeys["mesh_terms"]["meshQualifierTerm"] = "qualifier_term"
remap_subkeys["mod_reference_types"] = {}
remap_subkeys["mod_reference_types"]["referenceType"] = "reference_type"
remap_subkeys["tags"] = {}
remap_subkeys["tags"]["tagName"] = "tag_name"
remap_subkeys["tags"]["tagSource"] = "tag_source"
remap_subkeys["cross_references"] = {}
remap_subkeys["cross_references"]["id"] = "curie"
remap_subkeys["authors"] = {}
remap_subkeys["authors"]["authorRank"] = "order"
remap_subkeys["authors"]["firstName"] = "first_name"
remap_subkeys["authors"]["lastName"] = "last_name"
remap_subkeys["authors"]["middleNames"] = "middle_names"
remap_subkeys["authors"]["firstname"] = "first_name"
remap_subkeys["authors"]["lastname"] = "last_name"
remap_subkeys["authors"]["middlenames"] = "middle_names"
remap_subkeys["authors"]["correspondingAuthor"] = "corresponding_author"
remap_subkeys["authors"]["firstAuthor"] = "first_author"
keys_found = set([])
# token = ''
# okta_file = base_path + 'okta_token'
# if path.isfile(okta_file):
# with open(okta_file, 'r') as okta_fh:
# token = okta_fh.read().replace('\n', '')
# okta_fh.close
# else:
# token = update_token()
token = get_authentication_token()
headers = generate_headers(token)
api_server = environ.get("API_SERVER", "localhost")
url = "http://" + api_server + ":" + api_port + "/reference/"
reference_primary_id_to_curie_file = base_path + "reference_primary_id_to_curie"
errors_in_posting_reference_file = base_path + "errors_in_posting_reference"
# previously loading from reference_primary_id_to_curie from past run of this script
# already_processed_primary_id = set()
# if check_file_flag == 'yes_file_check':
# if path.isfile(reference_primary_id_to_curie_file):
# with open(reference_primary_id_to_curie_file, 'r') as read_fh:
# for line in read_fh:
# line_data = line.split("\t")
# if line_data[0]:
# already_processed_primary_id.add(line_data[0].rstrip())
# read_fh.close
# this updates from resources in the database, and takes 4 seconds. if updating this script, comment it out after running it once
generate_cross_references_file("resource")
# this updates from references in the database, and takes 88 seconds. if updating this script, comment it out after running it once
generate_cross_references_file("reference")
xref_ref, ref_xref_valid, ref_xref_obsolete = load_ref_xref("resource")
resource_to_curie = {}
for prefix in xref_ref:
for identifier in xref_ref[prefix]:
xref_curie = prefix + ":" + identifier
resource_to_curie[xref_curie] = xref_ref[prefix][identifier]
# previously loading from resource_primary_id_to_curie from past run of post_resource_to_api
# resource_primary_id_to_curie_file = base_path + 'resource_primary_id_to_curie'
# if path.isfile(resource_primary_id_to_curie_file):
# with open(resource_primary_id_to_curie_file, 'r') as read_fh:
# for line in read_fh:
# line_data = line.rstrip().split("\t")
# if line_data[0]:
# resource_to_curie[line_data[0]] = line_data[1]
# read_fh.close
xref_ref, ref_xref_valid, ref_xref_obsolete = load_ref_xref("reference")
process_results = []
with open(reference_primary_id_to_curie_file, "a") as mapping_fh, open(errors_in_posting_reference_file, "a") as error_fh:
for filepath in sorted(files_to_process):
# only test one file for run
# if filepath != json_storage_path + 'REFERENCE_PUBMED_WB_1.json':
# continue
# logger.info("opening file\t%s", filepath)
f = open(filepath)
reference_data = json.load(f)
# counter = 0
for entry in reference_data:
# only take a couple of sample from each file for testing
# counter += 1
# if counter > 2:
# break
# output what we get from the file before converting for the API
# json_object = json.dumps(entry, indent=4)
# print(json_object)
primary_id = entry["primaryId"]
prefix, identifier, separator = split_identifier(primary_id)
if prefix in xref_ref:
if identifier in xref_ref[prefix]:
logger.info("%s\talready in", primary_id)
continue
# previously loading from reference_primary_id_to_curie from past run of this script
# if primary_id in already_processed_primary_id:
# continue
# if primary_id != 'PMID:9643811':
# continue
new_entry = {}
for key in entry:
keys_found.add(key)
# logger.info("key found\t%s\t%s", key, entry[key])
if key in remap_keys:
# logger.info("remap\t%s\t%s", key, remap_keys[key])
# this renames a key, but it can be accessed again in the for key loop, so sometimes a key is
# visited twice while another is skipped, so have to create a new dict to populate instead
# entry[remap_keys[key]] = entry.pop(key)
new_entry[remap_keys[key]] = entry[key]
elif key not in keys_to_remove:
new_entry[key] = entry[key]
for key in remap_subkeys:
if key in new_entry:
# logger.info("%s\t%s\t%s", primary_id, key, new_entry[key])
new_list = []
for sub_element in new_entry[key]:
new_sub_element = {}
for subkey in sub_element:
if subkey in remap_subkeys[key]:
new_sub_element[remap_subkeys[key][subkey]] = sub_element[subkey]
# logger.info('remap subkey\t%s\t%s', subkey, remap_subkeys[key][subkey])
elif key not in subkeys_to_remove or subkey not in subkeys_to_remove[key]:
new_sub_element[subkey] = sub_element[subkey]
new_list.append(new_sub_element)
new_entry[key] = new_list
# can only enter agr resource curie, if resource does not map to one, enter nothing
if "resource" in new_entry:
if new_entry["resource"] in resource_to_curie:
new_entry["resource"] = resource_to_curie[new_entry["resource"]]
else:
del new_entry["resource"]
if "category" in new_entry:
new_entry["category"] = (new_entry["category"].lower().replace(" ", "_"))
if "tags" in new_entry:
for sub_element in new_entry["tags"]:
if "tag_name" in sub_element:
sub_element["tag_name"] = camel_to_snake(sub_element["tag_name"])
if "authors" in new_entry:
for author in new_entry["authors"]:
if "orcid" in author:
# orcid field in json has just the identifier, need to add the prefix
if 'ORCID:' not in author['orcid']:
author['orcid'] = 'ORCID:' + author['orcid']
if 'cross_references' in new_entry:
new_entry['cross_references'] = list(filter(lambda x: "curie" in x and "NLM:" not in x['curie'] and "ISSN:" not in x["curie"], new_entry["cross_references"]))
# output what is sent to API after converting file data
# json_object = json.dumps(new_entry, indent=4)
# print(json_object)
# get rid of this if process_api_request works on a full run
# process_post_tuple = process_post(url, headers, new_entry, primary_id, mapping_fh, error_fh)
# headers = process_post_tuple[0]
# process_text = process_post_tuple[1]
# process_status_code = process_post_tuple[2]
# process_result = {}
# process_result['text'] = process_text
# process_result['status_code'] = process_status_code
# process_results.append(process_result)
api_response_tuple = process_api_request("POST", url, headers, new_entry, primary_id, None, None)
headers = api_response_tuple[0]
response_text = api_response_tuple[1]
response_status_code = api_response_tuple[2]
log_info = api_response_tuple[3]
response_dict = json.loads(response_text)
if log_info:
logger.info(log_info)
if response_status_code == 201:
response_dict = response_dict.replace('"', "")
logger.info("%s\t%s", primary_id, response_dict)
mapping_fh.write("%s\t%s\n" % (primary_id, response_dict))
else:
logger.info("api error %s primaryId %s message %s", str(response_status_code), primary_id, response_dict['detail'])
error_fh.write("api error %s primaryId %s message %s\n" % (str(response_status_code), primary_id, response_dict['detail']))
# if wanting to output keys in data for figuring out mapping
# for key in keys_found:
# logger.info("key %s", key)
mapping_fh.close
error_fh.close
return process_results
# get rid of this if process_api_request works on a full run
# def process_post(url, headers, new_entry, primary_id, mapping_fh, error_fh):
# """
#
# output the json getting posted to the API
# json_object = json.dumps(new_entry, indent = 4)
# print(json_object)
#
# :param url:
# :param headers:
# :param new_entry:
# :param primary_id:
# :param mapping_fh:
# :param error_fh:
# :return:
# """
#
# post_return = requests.post(url, headers=headers, json=new_entry)
# process_text = str(post_return.text)
# process_status_code = str(post_return.status_code)
# logger.info(primary_id + ' text ' + process_text)
# logger.info(primary_id + ' status_code ' + process_status_code)
#
# response_dict = {}
# try:
# response_dict = json.loads(post_return.text)
# except ValueError:
# logger.info("%s\tValueError", primary_id)
# error_fh.write("ERROR %s primaryId did not convert to json\n" % (primary_id))
# return headers, process_text, process_status_code
#
# if (post_return.status_code == 201):
# response_dict = response_dict.replace('"', '')
# logger.info("%s\t%s", primary_id, response_dict)
# mapping_fh.write("%s\t%s\n" % (primary_id, response_dict))
# elif (post_return.status_code == 401):
# logger.info("%s\texpired token", primary_id)
# mapping_fh.write("%s\t%s\n" % (primary_id, response_dict))
# token = update_token()
# headers = generate_headers(token)
# process_post_tuple = process_post(url, headers, new_entry, primary_id, mapping_fh, error_fh)
# headers = process_post_tuple[0]
# process_text = process_post_tuple[1]
# process_status_code = process_post_tuple[2]
# elif (post_return.status_code == 500):
# logger.info("%s\tFAILURE", primary_id)
# mapping_fh.write("%s\t%s\n" % (primary_id, response_dict))
# # if redoing a run and want to skip errors of data having already gone in
# # elif (post_return.status_code == 409):
# # continue
# else:
# logger.info("ERROR %s primaryId %s message %s", post_return.status_code, primary_id, response_dict['detail'])
# error_fh.write("ERROR %s primaryId %s message %s\n" % (post_return.status_code, primary_id, response_dict['detail']))
# return headers, process_text, process_status_code
if __name__ == "__main__":
"""
call main start function
"""
logger.info("Starting post_reference_to_api.py")
if args["authorization"]:
update_token()
elif args["commandline"]:
logger.info("placeholder for process_single_pmid.py")
elif args["file"]:
logger.info("placeholder for parse_pubmed_json_reference.py")
else:
post_references("sanitized", "yes_file_check")
logger.info("ending post_reference_to_api.py")
# pipenv run python post_reference_to_api.py
|
the-stack_0_9473 | # -*- coding: utf-8 -*-
# ____ __ __ ___ _ _ _
# |_ /___ / _|/ _|/ __| (_)___ _ _| |_
# / // -_) _| _| (__| | / -_) ' \ _|
# /___\___|_| |_| \___|_|_\___|_||_\__|
#
"""Zeff Cloud training status."""
__author__ = """Lance Finn Helsten <[email protected]>"""
__copyright__ = """Copyright © 2019, Ziff, Inc. — All Rights Reserved"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import enum
import datetime
import logging
import json
class TrainingStatus(enum.Enum):
"""Model training status."""
unknown = "UNKNOWN"
queued = "QUEUED"
started = "STARTED"
progress = "PCT_COMPLETE"
complete = "COMPLETE"
def __str__(self):
"""Return a user appropriate name of this status."""
return self.name
def __repr__(self):
"""Return a representation of this status."""
return "<%s.%s>" % (self.__class__.__name__, self.name)
class TrainingSessionInfo:
"""Information about the current training session."""
def __init__(self, status_json):
"""Create a new training information.
:param status_json: The status JSON returned from a train
status request.
"""
self.__data = status_json
logging.debug("Training Session JSON: \n%s", self.__data_str())
def __data_str(self):
"""Return the data as a JSON formatted string."""
return json.dumps(self.__data, indent="\t", sort_keys=True)
@property
def status(self) -> TrainingStatus:
"""Return state of current training session."""
value = self.__data["status"]
return TrainingStatus(value if value is not None else "UNKNOWN")
@property
def progress(self) -> float:
"""Return progress, [0.0, 1.0], of current training session."""
value = self.__data["percentComplete"]
return float(value) if value is not None else 0.0
@property
def model_version(self) -> str:
"""Return model version of the current training session."""
value = self.__data["modelVersion"]
return str(value) if value is not None else "unknown"
@property
def model_location(self) -> str:
"""Return the URL to the model."""
value = self.__data["modelLocation"]
return str(value) if value is not None else "unknown"
@property
def created_timestamp(self) -> datetime.datetime:
"""Return the timestamp when this training session was created."""
value = self.__data["createdAt"]
if value is not None:
ret = datetime.datetime.fromisoformat(value)
else:
ret = datetime.datetime.min
return ret
@property
def updated_timestamp(self) -> datetime.datetime:
"""Return timestamp when current session status was last updated."""
value = self.__data["updatedAt"]
if value is not None:
ret = datetime.datetime.fromisoformat(value)
else:
ret = self.created_timestamp
return ret
|
the-stack_0_9474 | """
This module calls the setuplogging function and creates a root logger instance.
All future loggers will inherit these yaml configurations from this root logger.
Python uses __init__.py files to navigate between folders. They are implicitly executed.
"""
import logging
from logconfig.logconfig import setup_logging
from pathlib import Path
# make sure logfiles/ directory exists
p = Path.cwd() / "src/logfiles"
if not p.exists():
Path.mkdir(p)
# setuplogging function called from logconfig.py
setup_logging()
# Root logger instance
logging.RootLogger(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.info("Root logger setup successful") |
the-stack_0_9475 | import numpy as np
from environments.DeterministicMDP import DeterministicMDP
from spaces.DiscreteSpace import DiscreteSpace
class SharedLearningChain(DeterministicMDP):
def __init__(self, name, num_states, N):
# create the state and action space
self.inner_size = N
state_space = DiscreteSpace(N)
action_space = DiscreteSpace(3)
# one maps to 2
starting_state = 1
# specify the transition function
transition_func = np.zeros((N, 3), dtype=np.int32)
# iterate over and fill with the transitions
for i in range(N):
transition_func[i, 0] = i - 1
transition_func[i, 1] = i + 1
transition_func[i, 2] = 0
transition_func[0, 0] = 0
transition_func[N - 1, 1] = N - 1
transition_func[N - 1, 2] = N - 1
# now we define the reward function
reward_function = np.zeros((N, 3), dtype=np.float64)
for i in range(N - 1):
reward_function[i, 2] = -0.1
reward_function[0, 0] = -0.1
reward_function[0, 2] = -0.1
reward_function[1, 0] = -0.1
reward_function[N-2, 1] = 1
super().__init__(name, num_states, action_space, state_space, transition_func, reward_function, starting_state)
def get_name(self):
return "shared_chain"
|
the-stack_0_9476 | #!/usr/bin/env python3
# Copyright (c) 2013-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 2000000
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(
r"^/Feathercoin:("
r"0.13.(0|1|2|3|99)|"
r"0.16.(0|1|2|3|4|99)|"
r"0.17.(0|1|99)|"
r"0.18.(0|1|2|3|4|99)|"
r")")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def dedup(ips):
'''deduplicate by address,port'''
d = {}
for ip in ips:
d[ip['ip'],ip['port']] = ip
return list(d.values())
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
def lookup_asn(net, ip):
'''
Look up the asn for an IP (4 or 6) address by querying cymry.com, or None
if it could not be found.
'''
try:
if net == 'ipv4':
ipaddr = ip
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip.split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in dns.resolver.query('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
except Exception:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n')
return None
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_per_net):
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count = collections.defaultdict(int)
asn_count = collections.defaultdict(int)
for ip in ips_ipv46:
if net_count[ip['net']] == max_per_net:
continue
asn = lookup_asn(ip['net'], ip['ip'])
if asn is None or asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
def ip_stats(ips):
hist = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1
return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion'])
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print('%s Initial' % (ip_stats(ips)), file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr)
# Skip duplicattes (in case multiple seeds files were concatenated)
ips = dedup(ips)
print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr)
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr)
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr)
# Require at least 50% 30-day uptime for clearnet, 10% for onion.
req_uptime = {
'ipv4': 50,
'ipv6': 50,
'onion': 10,
}
ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]]
print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr)
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr)
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
print('%s Filter out hosts with multiple bitcoin ports' % (ip_stats(ips)), file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
the-stack_0_9479 | from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import DetailView
from django.utils.translation import gettext as _
from froide.account.preferences import get_preferences_for_user
from froide.helper.utils import render_403
from ..models import FoiRequest, FoiEvent, FoiAttachment
from ..forms.preferences import request_page_tour_pref, message_received_tour_pref
from ..auth import can_read_foirequest, can_write_foirequest, check_foirequest_auth_code
def shortlink(request, obj_id, url_path=""):
foirequest = get_object_or_404(FoiRequest, pk=obj_id)
if not can_read_foirequest(foirequest, request):
return render_403(request)
url = foirequest.get_absolute_url()
if url_path:
url_path = url_path[1:]
return redirect(url + url_path)
def auth(request, obj_id, code):
foirequest = get_object_or_404(FoiRequest, pk=obj_id)
if check_foirequest_auth_code(foirequest, code):
request.session["pb_auth"] = code
return redirect(foirequest)
if can_read_foirequest(foirequest, request):
return redirect(foirequest)
return render_403(request)
def can_see_attachment(att, can_write):
if att.approved:
return True
if att.redacted_id and not can_write:
return False
if att.converted_id and not can_write:
return False
return True
def show_foirequest(
request, obj, template_name="foirequest/alpha/show.html", context=None, status=200
):
if context is None:
context = {}
context.update(get_foirequest_context(request, obj))
return render(request, template_name, context, status=status)
class FoiRequestView(DetailView):
queryset = FoiRequest.objects.select_related(
"public_body",
"jurisdiction",
"user",
"law",
).prefetch_related(
"tags",
)
template_name = "foirequest/alpha/show.html"
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if not can_read_foirequest(self.object, self.request):
return render_403(self.request)
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
obj = self.object
request = self.request
context.update(get_foirequest_context(request, obj))
return context
def get_foirequest_context(request, obj):
context = {}
all_attachments = FoiAttachment.objects.select_related("redacted").filter(
belongs_to__request=obj
)
can_write = can_write_foirequest(obj, request)
messages = obj.get_messages(with_tags=can_write)
for message in messages:
message.request = obj
message.all_attachments = [
a for a in all_attachments if a.belongs_to_id == message.id
]
# Preempt attribute access
for att in message.all_attachments:
att.belongs_to = message
message.listed_attachments = [
a
for a in all_attachments
if a.belongs_to_id == message.id and can_see_attachment(a, can_write)
]
message.hidden_attachments = [
a for a in message.listed_attachments if a.is_irrelevant
]
message.can_edit_attachments = bool(
[a for a in message.listed_attachments if a.can_edit]
)
message.approved_attachments = [
a
for a in message.listed_attachments
if a.approved and a not in message.hidden_attachments
]
message.unapproved_attachments = [
a
for a in message.listed_attachments
if not a.approved and a not in message.hidden_attachments
]
events = (
FoiEvent.objects.filter(request=obj)
.select_related("user", "request", "public_body")
.order_by("timestamp")
)
event_count = len(events)
last_index = event_count
for message in reversed(obj.messages):
message.events = [
ev for ev in events[:last_index] if ev.timestamp >= message.timestamp
]
last_index = last_index - len(message.events)
# TODO: remove active_tab
active_tab = "info"
if can_write:
active_tab = get_active_tab(obj, context)
context.update({"object": obj, "active_tab": active_tab, "preferences": {}})
if can_write:
preferences = get_preferences_for_user(
request.user, [request_page_tour_pref, message_received_tour_pref]
)
context.update({"preferences": preferences})
if (
obj.reply_received()
and not preferences["foirequest_messagereceived_tour"].value
):
context.update(
{"foirequest_messagereceived_tour": get_messagereceived_tour_data()}
)
elif not preferences["foirequest_requestpage_tour"].value:
context.update({"foirequest_requestpage_tour": get_requestpage_tour_data()})
return context
def get_active_tab(obj, context):
if "postal_reply_form" in context:
return "add-postal-reply"
elif "postal_message_form" in context:
return "add-postal-message"
elif "status_form" in context:
return "set-status"
elif "send_message_form" in context:
return "write-message"
elif "escalation_form" in context:
return "escalate"
if "active_tab" in context:
return context["active_tab"]
if obj.awaits_classification():
return "set-status"
elif obj.is_overdue() and obj.awaits_response():
return "write-message"
return "info"
def get_base_tour_data():
return {
"i18n": {
"done": _("👋 Goodbye!"),
"next": _("Next"),
"previous": _("Previous"),
"close": _("Close"),
"start": _("Next"),
}
}
def get_requestpage_tour_data():
return {
**get_base_tour_data(),
"steps": [
{
"element": "#infobox .info-box__header",
"popover": {
"title": _("Status of request"),
"description": _(
"""Here you can see the status your request. Below you can update the status of your request when you receive a response."""
),
},
},
{
"element": "#due-date",
"popover": {
"title": _("Deadline"),
"description": _(
"""This is the deadline for your request. If the public body has not replied by then, we will let you know, so you can send a reminder. You can also adjust the date if necessary."""
),
},
},
{
"element": "#share-links",
"popover": {
"title": _("Share links"),
"description": _(
"""Here are some quick links for you to share your request with others."""
),
},
},
{
"element": "#download-links",
"popover": {
"title": _("Download"),
"description": _(
"""You can download all messages of your request. The RSS link allows you to subscribe to the request in a feed reader."""
),
},
},
{
"element": "#correspondence-tab",
"popover": {
"title": _("Messages in this request"),
"description": _(
"""Below you find all messages that you sent and received in this request. When you receive a response it appears at the end and we let you know about it via email."""
),
},
},
{
"element": "#correspondence .alpha-message .alpha-message__head",
"popover": {
"title": _("Details of your message"),
"description": _(
"""This is your message. There's more information e.g. about the delivery status of your message when you click on the “Details” link."""
),
},
"position": "top-center",
},
{
"element": ".write-message-top-link",
"popover": {
"title": _("Need to reply or send a reminder?"),
"description": _(
"""This button takes you to the send message form."""
),
},
},
{
"element": ".upload-post-link",
"popover": {
"title": _("Got postal mail?"),
"description": _(
"""When you receive a letter, you can click this button and upload a scan or photo of the letter. You can redact parts of the letter with our tool before publishing it."""
),
},
},
{
"element": ".request-title",
"popover": {
"title": _("The end."),
"description": _(
"""That concludes this tour! We'll let you know via email if anything around your request changes."""
),
"position": "top-center",
},
},
],
}
def get_messagereceived_tour_data():
return {
**get_base_tour_data(),
"steps": [
{
"element": "#infobox .info-box__header",
"popover": {
"title": _("Status of request"),
"description": _(
"""After you read your replies you need to update the status of your request here below."""
),
},
},
{
"element": "#correspondence .alpha-message",
"popover": {
"title": _("Message toolbar"),
"description": _(
"""The “Redact” button allows you to redact the text of a message in case sensitive information is accidentally not automatically removed. The “Problem?” allows you to notify our moderation team, if you have a problem with a message."""
),
"position": "bottom-center",
},
},
{
"element": ".reply-form__wrap",
"popover": {
"title": _("Reply"),
"description": _(
"""At the bottom of the page you can send replies to the public body or start a mediation process with the mediation authority."""
),
"position": "top-center",
},
},
{
"element": "#request-summary",
"popover": {
"title": _("Got the information you asked for?"),
"description": _(
"""When you received documents, you can write a summary of what you have learned."""
),
},
},
{
"element": ".request-title",
"popover": {
"title": _("The end."),
"description": _("""That concludes this tour!"""),
"position": "top-center",
},
},
],
}
|
the-stack_0_9484 | import os.path
import config.basic
################################################################
# Configurations for processing
################################################################
# This is where all of the output files are stored
# Must be writable and have lots of free space...
#base_results_directory = "/home/fs01/lgs23/PALFA/results"
base_results_directory = "/mnt/data1/adam_dev/results"
# The following is the name of the scratch working directory
# basename on the individual processing nodes
base_working_directory = "/tmp"
# The following is the path where the temporary working directory
# should be created. This could be /dev/shm, or simply another
# directory on the worker node.
base_tmp_dir= "/tmp"
# Should not need to change the names of the zaplists...
zaplistdir = os.path.join(config.basic.pipelinedir, "lib", "zaplists")
default_zaplist = os.path.join(zaplistdir, "PALFA.zaplist")
# The following don't currently get used. They are placeholders.
num_cores = 1 # The number of cores to use/request for each job
use_hyperthreading = False # Whether or not to use HyperThreading
# Do only single-pulse search? Added by LGS
do_noaccel = True
import processing_check
processing_check.processing.populate_configs(locals())
processing_check.processing.check_sanity()
|
the-stack_0_9485 | import setuptools
from distutils.core import Extension
with open("README.md") as f:
long_description = f.read()
with open("./src/viztracer/__init__.py") as f:
for line in f.readlines():
if line.startswith("__version__"):
# __version__ = "0.9"
delim = '"' if '"' in line else "'"
version = line.split(delim)[1]
break
else:
print("Can't find version! Stop Here!")
exit(1)
setuptools.setup(
name="viztracer",
version=version,
author="Tian Gao",
author_email="[email protected]",
description="A debugging and profiling tool that can trace and visualize python code execution",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gaogaotiantian/viztracer",
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
package_data={
"viztracer": [
"html/*.js",
"html/*.css",
"html/*.html"
]
},
ext_modules=[
Extension(
"viztracer.snaptrace",
sources=[
"src/viztracer/modules/util.c",
"src/viztracer/modules/snaptrace.c"
],
extra_link_args=["-lpthread"]
)
],
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Software Development :: Bug Tracking",
"Topic :: System :: Logging"
],
python_requires=">=3.6",
extras_require={
"full": ["rich", "orjson"]
},
entry_points={
"console_scripts": [
"viztracer = viztracer:main",
"vizviewer = viztracer:viewer_main",
"vdb = viztracer:sim_main"
]
},
)
|
the-stack_0_9486 |
from st2actions.runners.pythonrunner import Action
import requests
__all__ = [
'NetboxBaseAction'
]
class NetboxBaseAction(Action):
"""Base Action for all Netbox API based actions
"""
def __init__(self, config):
super(NetboxBaseAction, self).__init__(config)
def get(self, endpoint_uri, **kwargs):
"""Make a get request to the API URI passed in
"""
self.logger.debug("Calling base get with kwargs: {}".format(kwargs))
if self.config['use_https']:
url = 'https://'
else:
url = 'http://'
url = url + self.config['hostname'] + endpoint_uri
headers = {
'Authorization': 'Token ' + self.config['api_token'],
'Accept': 'application/json'
}
# transform `in__id` if present
if kwargs.get('id__in'):
kwargs['id__in'] = ','.join(kwargs['id__in'])
self.logger.debug('id__in transformed to {}'.format(kwargs['id__in']))
r = requests.get(url, verify=self.config['ssl_verify'], headers=headers, params=kwargs)
return {'raw': r.json()}
|
the-stack_0_9487 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
This bot uses external filtering programs for munging text.
For example:
python pwb.py piper -filter:"tr A-Z a-z" -page:Wikipedia:Sandbox
Would lower case the article with tr(1).
Muliple -filter commands can be specified:
python pwb.py piper -filter:cat -filter:"tr A-Z a-z" -filter:"tr a-z A-Z" \
-page:Wikipedia:Sandbox
Would pipe the article text through cat(1) (NOOP) and then lower case
it with tr(1) and upper case it again with tr(1)
The following parameters are supported:
-always Always commit changes without asking you to accept them
-filter: Filter the article text through this program, can be
given multiple times to filter through multiple programs in
the order which they are given
The following generators and filters are supported:
¶ms;
"""
#
# (C) Pywikibot team, 2008-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import os
import pipes
import tempfile
import pywikibot
from pywikibot import pagegenerators
from pywikibot.bot import (MultipleSitesBot, ExistingPageBot,
NoRedirectPageBot, AutomaticTWSummaryBot)
from pywikibot.tools import UnicodeType
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {'¶ms;': pagegenerators.parameterHelp} # noqa: N816
class PiperBot(MultipleSitesBot, ExistingPageBot, NoRedirectPageBot,
AutomaticTWSummaryBot):
"""Bot for munging text using external filtering programs."""
summary_key = 'piper-edit-summary'
def __init__(self, generator, **kwargs):
"""
Initializer.
@param generator: The page generator that determines on which pages
to work on.
@type generator: generator
"""
self.availableOptions.update({
'filters': [],
})
super(PiperBot, self).__init__(generator=generator, **kwargs)
@property
def summary_parameters(self):
"""Return the filter parameter."""
return {'filters': ', '.join(self.getOption('filters'))}
def pipe(self, program, text):
"""Pipe a given text through a given program.
@return: processed text after piping
@rtype: str
"""
if not isinstance(text, str): # py2-py3 compatibility
text = text.encode('utf-8')
pipe = pipes.Template()
pipe.append(str(program), '--') # py2-py3 compatibility
# Create a temporary filename to save the piped stuff to
temp_filename = '%s.%s' % (tempfile.mktemp(), 'txt')
with pipe.open(temp_filename, 'w') as file:
file.write(text)
# Now retrieve the munged text
with open(temp_filename, 'r') as file:
unicode_text = file.read()
if not isinstance(unicode_text, UnicodeType): # py2-py3 compatibility
unicode_text = unicode_text.decode('utf-8')
# clean up
os.unlink(temp_filename)
return unicode_text
def treat_page(self):
"""Load the given page, do some changes, and save it."""
# Load the page
text = self.current_page.text
# Munge!
for program in self.getOption('filters'):
text = self.pipe(program, text)
# only save if something was changed
self.put_current(text)
def main(*args):
"""Create and run a PiperBot instance from the given command arguments."""
local_args = pywikibot.handle_args(args)
# This factory is responsible for processing command line arguments
# that are also used by other scripts and that determine on which pages
# to work on.
gen_factory = pagegenerators.GeneratorFactory()
# The program to pipe stuff through
filters = []
options = {}
# Parse command line arguments
for arg in local_args:
option, sep, value = arg.partition(':')
if option == '-filter':
filters.append(value)
elif option == '-always':
options['always'] = True
else:
# check if a standard argument like
# -start:XYZ or -ref:Asdf was given.
gen_factory.handleArg(arg)
options['filters'] = filters
gen = gen_factory.getCombinedGenerator(preload=True)
if gen:
# The preloading generator is responsible for downloading multiple
# pages from the wiki simultaneously.
bot = PiperBot(gen, **options)
bot.run()
return True
else:
pywikibot.bot.suggest_help(missing_generator=True)
return False
if __name__ == '__main__':
main()
|
the-stack_0_9488 | class Solution:
"""
@param nums: A set of numbers
@return: A list of lists
"""
def subsets(self, nums):
# write your code here
if not nums: return [[]]
nums = sorted(nums)
res = []
self.helper(res, [], nums, 0)
return res
def helper(self, res, part, nums, pos):
res.append(list(part))
for i in range(pos, len(nums)):
part.append(nums[i])
self.helper(res, part, nums, i + 1)
part.pop()
s=Solution()
print(s.subsets([1]))
|
the-stack_0_9492 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the SKA Dish simulator.
"""
import pkg_resources
import time
import pytest
from unittest import mock
from tango_simlib import tango_sim_generator
from ska_dish_master_mid.dish_master_behaviour import AzEl, OverrideDish, get_enum_str, set_enum
FGO_FILE_PATH = pkg_resources.resource_filename("ska_dish_master_mid", "dish_master.fgo")
JSON_FILE_PATH = pkg_resources.resource_filename("ska_dish_master_mid", "dish_master_SimDD.json")
class TestMpiDshModel:
@pytest.fixture(scope="function")
def provision_setup(self):
model = tango_sim_generator.configure_device_models(
[FGO_FILE_PATH, JSON_FILE_PATH], "test/nodb/mpidish"
)
return model["test/nodb/mpidish"], OverrideDish()
def test_update_desired_pointing_history(self, provision_setup):
"""Check the logic in get_new_pointing_coordinates and that the update gets
applied correctly
"""
# Note: coords are are sets of 3: [timestamp, azim, elev]
device_model, dish_override = provision_setup
now = time.time()
now_millisec = now * 1000.0
dish_override.desired_pointings = [[now_millisec + 10.0, 2.0, 3.0]]
desired_pointing_coordinates = [now_millisec + 40.0, 5.0, 6.0]
program_track_table_coordinates = [
now_millisec + 70.0,
8.0,
9.0,
now_millisec + 100.0,
11.0,
12.0,
]
# desiredPointing is newest, so must be used
dish_override.last_coordinate_update_timestamp = now - 5.0
device_model.sim_quantities["programTrackTable"].set_val(
program_track_table_coordinates, now - 3.0
)
device_model.sim_quantities["desiredPointing"].set_val(
desired_pointing_coordinates, now - 2.0
)
current_pointings = list(dish_override.desired_pointings)
dish_override.update_desired_pointing_history(device_model)
expected_pointings = current_pointings + [desired_pointing_coordinates]
assert dish_override.desired_pointings == expected_pointings
# programTrackTable is newest, so must be used
dish_override.last_coordinate_update_timestamp = now - 5.0
device_model.sim_quantities["desiredPointing"].set_val(
desired_pointing_coordinates, now - 3.0
)
device_model.sim_quantities["programTrackTable"].set_val(
program_track_table_coordinates, now - 2.0
)
current_pointings = list(dish_override.desired_pointings)
dish_override.update_desired_pointing_history(device_model)
expected_pointings = (
current_pointings
+ [program_track_table_coordinates[0:3]]
+ [program_track_table_coordinates[3:6]]
)
assert dish_override.desired_pointings == expected_pointings
# Neither is newest, so no update expected
current_pointings = list(dish_override.desired_pointings)
dish_override.last_coordinate_update_timestamp = now
device_model.sim_quantities["desiredPointing"].set_val(
desired_pointing_coordinates, now - 2.0
)
device_model.sim_quantities["programTrackTable"].set_val(
program_track_table_coordinates, now - 3.0
)
dish_override.update_desired_pointing_history(device_model)
assert dish_override.desired_pointings == current_pointings
device_model.sim_quantities["desiredPointing"].set_val(
desired_pointing_coordinates, now - 3.0
)
device_model.sim_quantities["programTrackTable"].set_val(
program_track_table_coordinates, now - 2.0
)
dish_override.update_desired_pointing_history(device_model)
assert dish_override.desired_pointings == current_pointings
# New updates, but timestamps in the past, so no update expected
desired_pointing_coordinates = [now_millisec - 40.0, 5.0, 6.0]
program_track_table_coordinates = [
now_millisec - 60.0,
8.0,
9.0,
now_millisec - 50.0,
10.0,
11.0,
]
dish_override.last_coordinate_update_timestamp = now - 10
device_model.sim_quantities["desiredPointing"].set_val(desired_pointing_coordinates, now)
device_model.sim_quantities["programTrackTable"].set_val(
program_track_table_coordinates, now - 1.0
)
dish_override.update_desired_pointing_history(device_model)
assert dish_override.desired_pointings == current_pointings
dish_override.last_coordinate_update_timestamp = now - 10
device_model.sim_quantities["desiredPointing"].set_val(
desired_pointing_coordinates, now - 1.0
)
device_model.sim_quantities["programTrackTable"].set_val(
program_track_table_coordinates, now
)
dish_override.update_desired_pointing_history(device_model)
assert dish_override.desired_pointings == current_pointings
def test_pointing_state_reports_track_when_on_target(self, provision_setup):
def _update_pointing_state(device_model, dish_override):
now = time.time()
# ensure dish is in allowed mode before requesting track
# track command will change pointing state to slew
set_enum(device_model.sim_quantities["dishMode"], "OPERATE", now)
dish_override.action_track(device_model)
# update pointing state to TRACK if dish is on target, otherwise report slew
dish_override.update_movement_attributes(device_model, now)
current_pointing_state = get_enum_str(device_model.sim_quantities["pointingState"])
return current_pointing_state
device_model, dish_override = provision_setup
# ensure pointing state reports TRACK for requested and
# actual position default values of AzEl(0, 30)
current_pointing_state = _update_pointing_state(device_model, dish_override)
assert current_pointing_state == "TRACK"
# ensure pointing state reports SLEW when the dish is not on target
dish_override.requested_position = AzEl(azim=10.0, elev=40.0)
current_pointing_state = _update_pointing_state(device_model, dish_override)
assert current_pointing_state == "SLEW"
# move the dish to the desired position and check that pointing state is TRACK
dish_override.actual_position = AzEl(azim=10.0, elev=40.0)
current_pointing_state = _update_pointing_state(device_model, dish_override)
assert current_pointing_state == "TRACK"
def test_achieved_pointing_changes_when_dish_is_stowing(self, provision_setup):
device_model, dish_override = provision_setup
# send the dish closer to the stow position
dish_override.requested_position = AzEl(azim=0.0, elev=82.0)
dish_override.actual_position = AzEl(azim=0.0, elev=82.0)
# record initial az, el before movement
initial_az = device_model.sim_quantities["achievedPointing"].last_val[1]
initial_el = device_model.sim_quantities["achievedPointing"].last_val[2]
# request stow mode and move the dish close to the stow position
dish_override.action_setstowmode(device_model, tango_dev=mock.Mock())
stow_position = dish_override.STOW_ELEV_POSITION
dish_far_from_target = True
last_time = time.time()
timeout = time.time() + 5 # 5 seconds from now
while dish_far_from_target:
start_time = time.time()
dish_override.pre_update(device_model, start_time, start_time - last_time)
last_time = start_time
current_el = device_model.sim_quantities["achievedPointing"].last_val[2]
dish_far_from_target = not (stow_position - current_el == pytest.approx(1, abs=1))
time.sleep(1)
if timeout < start_time:
raise Exception("Timeout occurred")
current_az = device_model.sim_quantities["achievedPointing"].last_val[1]
current_el = device_model.sim_quantities["achievedPointing"].last_val[2]
assert current_el != initial_el, "The stow command did not move the dish at all"
assert (
current_az == initial_az
), "The dish should only move in elevation to stow, azimuth movement detected"
assert stow_position - current_el == pytest.approx(
1, abs=1
), "Dish did not arrive at stow position"
|
the-stack_0_9493 | def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Age", "instances": 23, "metric_value": 0.9986, "depth": 1}
if obj[6]>1:
# {"feature": "Children", "instances": 13, "metric_value": 0.8905, "depth": 2}
if obj[8]>0:
# {"feature": "Income", "instances": 7, "metric_value": 0.9852, "depth": 3}
if obj[11]<=3:
# {"feature": "Passanger", "instances": 4, "metric_value": 0.8113, "depth": 4}
if obj[0]>1:
return 'True'
elif obj[0]<=1:
return 'False'
else: return 'False'
elif obj[11]>3:
return 'False'
else: return 'False'
elif obj[8]<=0:
return 'True'
else: return 'True'
elif obj[6]<=1:
# {"feature": "Distance", "instances": 10, "metric_value": 0.8813, "depth": 2}
if obj[16]>1:
# {"feature": "Education", "instances": 6, "metric_value": 1.0, "depth": 3}
if obj[9]>1:
# {"feature": "Weather", "instances": 4, "metric_value": 0.8113, "depth": 4}
if obj[1]<=1:
return 'False'
elif obj[1]>1:
return 'True'
else: return 'True'
elif obj[9]<=1:
return 'True'
else: return 'True'
elif obj[16]<=1:
return 'False'
else: return 'False'
else: return 'False'
|
the-stack_0_9494 | # -*- coding: utf-8 -*-
#
# Copyright 2017-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Options for BigMLer retrain option
"""
def get_retrain_options(defaults=None):
"""Retrain-related options
"""
if defaults is None:
defaults = {}
options = {
# Resource ID
'--id': {
'dest': 'resource_id',
'default': defaults.get('resource_id', None),
'help': ("ID for the resource to be retrained.")},
# path to the data file to be added
'--add': {
'dest': 'add',
'default': defaults.get('add', None),
'help': ("Path to the data file to be added.")},
# maximum number of datasets to be used when retraining
'--window-size': {
'type': int,
'dest': 'window_size',
'default': defaults.get('window_size', -1),
'help': ("Maximum number of datasets to be used in retraining."
" When not set, the new dataset will be added to the"
" last one used.")}
}
return options
|
the-stack_0_9495 | import json
from typing import Dict, List
from mach_nix.data.providers import WheelDependencyProvider, SdistDependencyProvider, NixpkgsDependencyProvider
from mach_nix.data.nixpkgs import NixpkgsIndex
from mach_nix.generators import ExpressionGenerator
from mach_nix.resolver import ResolvedPkg
def unindent(text: str, remove: int):
# removes indentation of text
# also strips leading newlines
return ''.join(map(lambda l: l[remove:], text.splitlines(keepends=True)))
class OverridesGenerator(ExpressionGenerator):
def __init__(
self,
py_ver,
nixpkgs: NixpkgsIndex,
pypi_fetcher_commit,
pypi_fetcher_sha256,
disable_checks,
*args,
**kwargs):
self.nixpkgs = nixpkgs
self.disable_checks = disable_checks
self.pypi_fetcher_commit = pypi_fetcher_commit
self.pypi_fetcher_sha256 = pypi_fetcher_sha256
self.py_ver_nix = py_ver.nix()
super(OverridesGenerator, self).__init__(*args, **kwargs)
def generate(self, reqs) -> str:
pkgs = self.resolver.resolve(reqs)
pkgs = dict(sorted(((p.name, p) for p in pkgs), key=lambda x: x[1].name))
return self._gen_python_env(pkgs)
def _gen_imports(self):
out = f"""
{{ pkgs, python, ... }}:
with builtins;
with pkgs.lib;
let
pypi_fetcher_src = builtins.fetchTarball {{
name = "nix-pypi-fetcher";
url = "https://github.com/DavHau/nix-pypi-fetcher/tarball/{self.pypi_fetcher_commit}";
# Hash obtained using `nix-prefetch-url --unpack <url>`
sha256 = "{self.pypi_fetcher_sha256}";
}};
pypiFetcher = import pypi_fetcher_src {{ inherit pkgs; }};
fetchPypi = pypiFetcher.fetchPypi;
fetchPypiWheel = pypiFetcher.fetchPypiWheel;
isPyModule = pkg:
isAttrs pkg && hasAttr "pythonModule" pkg;
normalizeName = name: (replaceStrings ["_"] ["-"] (toLower name));
depNamesOther = [
"depsBuildBuild"
"depsBuildBuildPropagated"
"nativeBuildInputs"
"propagatedNativeBuildInputs"
"depsBuildTarget"
"depsBuildTargetPropagated"
"depsHostHost"
"depsHostHostPropagated"
"depsTargetTarget"
"depsTargetTargetPropagated"
"checkInputs"
"installCheckInputs"
];
depNamesAll = depNamesOther ++ [
"propagatedBuildInputs"
"buildInputs"
];
updatePythonDepsRec = newPkgs: pkg:
if ! isPyModule pkg then pkg else
let
pname = normalizeName (get_pname pkg);
newP =
if newPkgs ? "${{pname}}" && pkg != newPkgs."${{pname}}" then
trace "Updated inherited nixpkgs dep ${{pname}} from ${{pkg.version}} to ${{newPkgs."${{pname}}".version}}"
newPkgs."${{pname}}"
else
pkg;
in
newP.overrideAttrs (old: mapAttrs (n: v:
if elem n depNamesAll then
map (p: updatePythonDepsRec newPkgs p) v
else v
) old);
override = pkg:
if hasAttr "overridePythonAttrs" pkg then
pkg.overridePythonAttrs
else
pkg.overrideAttrs;
nameMap = {{
pytorch = "torch";
}};
get_pname = pkg:
let
res = tryEval (
if pkg ? src.pname then
pkg.src.pname
else if pkg ? pname then
let pname = pkg.pname; in
if nameMap ? "${{pname}}" then nameMap."${{pname}}" else pname
else ""
);
in
toString res.value;
get_passthru = pypi_name: nix_name:
# if pypi_name is in nixpkgs, we must pick it, otherwise risk infinite recursion.
let
python_pkgs = python.pkgs;
pname = if hasAttr "${{pypi_name}}" python_pkgs then pypi_name else nix_name;
in
if hasAttr "${{pname}}" python_pkgs then
let result = (tryEval
(if isNull python_pkgs."${{pname}}" then
{{}}
else
python_pkgs."${{pname}}".passthru));
in
if result.success then result.value else {{}}
else {{}};
tests_on_off = enabled: pySelf: pySuper:
let
mod = {{
doCheck = enabled;
doInstallCheck = enabled;
}};
in
{{
buildPythonPackage = args: pySuper.buildPythonPackage ( args // {{
doCheck = enabled;
doInstallCheck = enabled;
}} );
buildPythonApplication = args: pySuper.buildPythonPackage ( args // {{
doCheck = enabled;
doInstallCheck = enabled;
}} );
}};
pname_passthru_override = pySelf: pySuper: {{
fetchPypi = args: (pySuper.fetchPypi args).overrideAttrs (oa: {{
passthru = {{ inherit (args) pname; }};
}});
}};
mergeOverrides = with pkgs.lib; foldl composeExtensions (self: super: {{}});
merge_with_overr = enabled: overr:
mergeOverrides [(tests_on_off enabled) pname_passthru_override overr];
"""
return unindent(out, 12)
def _gen_build_inputs(self, build_inputs_local, build_inputs_nixpkgs) -> str:
name = lambda n: f'python-self."{n}"' if '.' in n else n
build_inputs_str = ' '.join(
name(b) for b in sorted(build_inputs_local | build_inputs_nixpkgs))
return build_inputs_str
def _gen_prop_build_inputs(self, prop_build_inputs_local, prop_build_inputs_nixpkgs) -> str:
name = lambda n: f'python-self."{n}"' if '.' in n else n
prop_build_inputs_str = ' '.join(
name(b) for b in sorted(prop_build_inputs_local | prop_build_inputs_nixpkgs))
return prop_build_inputs_str
def _gen_overrideAttrs(
self, name, ver, circular_deps, nix_name, provider, build_inputs_str, prop_build_inputs_str,
keep_src=False):
out = f"""
"{name}" = override python-super.{nix_name} ( oldAttrs:
(mapAttrs (n: v: if elem n depNamesOther then map (dep: updatePythonDepsRec python-self dep) v else v ) oldAttrs) // {{
pname = "{name}";
version = "{ver}";
passthru = (get_passthru "{name}" "{nix_name}") // {{ provider = "{provider}"; }};
buildInputs = with python-self; (map (dep: updatePythonDepsRec python-self dep) (oldAttrs."buildInputs" or [])) ++ [ {build_inputs_str} ];
propagatedBuildInputs = with python-self; (map (dep: updatePythonDepsRec python-self dep) (oldAttrs."propagatedBuildInputs" or [])) ++ [ {prop_build_inputs_str} ];"""
if not keep_src:
out += f"""
src = fetchPypi "{name}" "{ver}";"""
if circular_deps:
out += f"""
pipInstallFlags = "--no-dependencies";"""
out += """
}
);\n"""
return unindent(out, 8)
def _gen_builPythonPackage(self, name, ver, circular_deps, nix_name, build_inputs_str, prop_build_inputs_str):
out = f"""
"{name}" = python-self.buildPythonPackage {{
pname = "{name}";
version = "{ver}";
src = fetchPypi "{name}" "{ver}";
passthru = (get_passthru "{name}" "{nix_name}") // {{ provider = "sdist"; }};"""
if circular_deps:
out += f"""
pipInstallFlags = "--no-dependencies";"""
if build_inputs_str.strip():
out += f"""
buildInputs = with python-self; [ {build_inputs_str} ];"""
if prop_build_inputs_str.strip():
out += f"""
propagatedBuildInputs = with python-self; [ {prop_build_inputs_str} ];"""
out += """
};\n"""
return unindent(out, 8)
def _gen_wheel_buildPythonPackage(self, name, ver, circular_deps, nix_name, prop_build_inputs_str, fname):
manylinux = "manylinux1 ++ " if 'manylinux' in fname else ''
# dontStrip added due to this bug - https://github.com/pypa/manylinux/issues/119
out = f"""
"{name}" = python-self.buildPythonPackage {{
pname = "{name}";
version = "{ver}";
src = fetchPypiWheel "{name}" "{ver}" "{fname}";
format = "wheel";
dontStrip = true;
passthru = (get_passthru "{name}" "{nix_name}") // {{ provider = "wheel"; }};"""
if circular_deps:
out += f"""
pipInstallFlags = "--no-dependencies";"""
if manylinux:
out += f"""
nativeBuildInputs = [ autoPatchelfHook ];
autoPatchelfIgnoreMissingDeps = true;"""
if prop_build_inputs_str.strip() or manylinux:
out += f"""
propagatedBuildInputs = with python-self; {manylinux}[ {prop_build_inputs_str} ];"""
out += """
};\n"""
return unindent(out, 8)
def _gen_overrides(self, pkgs: Dict[str, ResolvedPkg], overrides_keys):
pkg_names_str = "".join(
(f"ps.\"{name}\"\n{' ' * 14}"
for (name, pkg) in pkgs.items() if pkg.is_root))
check = json.dumps(not self.disable_checks)
out = f"""
select_pkgs = ps: [
{pkg_names_str.strip()}
];
overrides = manylinux1: autoPatchelfHook: merge_with_overr {check} (python-self: python-super: {{
"""
out = unindent(out, 10)
for pkg in pkgs.values():
if pkg.name not in overrides_keys:
continue
overlays_required = True
build_inputs_local = {b for b in pkg.build_inputs if b in overrides_keys}
build_inputs_nixpkgs = set(pkg.build_inputs) - build_inputs_local
prop_build_inputs_local = {b for b in pkg.prop_build_inputs if b in overrides_keys}
prop_build_inputs_nixpkgs = set(pkg.prop_build_inputs) - prop_build_inputs_local
# convert build inputs to string
build_inputs_str = self._gen_build_inputs(build_inputs_local, build_inputs_nixpkgs, ).strip()
# convert prop build inputs to string
prop_build_inputs_str = self._gen_prop_build_inputs(
prop_build_inputs_local, prop_build_inputs_nixpkgs).strip()
# SDIST
if isinstance(pkg.provider_info.provider, SdistDependencyProvider):
# generate package overlays either via `overrideAttrs` if package already exists in nixpkgs,
# or by creating it from scratch using `buildPythonPackage`
nix_name = self._get_ref_name(pkg.name, pkg.ver)
if self.nixpkgs.exists(pkg.name):
out += self._gen_overrideAttrs(
pkg.name,
pkg.provider_info.provider.deviated_version(pkg.name, pkg.ver),
pkg.removed_circular_deps,
nix_name,
'sdist',
build_inputs_str,
prop_build_inputs_str)
else:
out += self._gen_builPythonPackage(
pkg.name,
pkg.provider_info.provider.deviated_version(pkg.name, pkg.ver),
pkg.removed_circular_deps,
nix_name,
build_inputs_str,
prop_build_inputs_str)
# WHEEL
elif isinstance(pkg.provider_info.provider, WheelDependencyProvider):
out += self._gen_wheel_buildPythonPackage(
pkg.name,
pkg.provider_info.provider.deviated_version(pkg.name, pkg.ver),
pkg.removed_circular_deps,
self._get_ref_name(pkg.name, pkg.ver),
prop_build_inputs_str,
pkg.provider_info.wheel_fname)
# NIXPKGS
elif isinstance(pkg.provider_info.provider, NixpkgsDependencyProvider):
nix_name = self.nixpkgs.find_best_nixpkgs_candidate(pkg.name, pkg.ver)
out += self._gen_overrideAttrs(
pkg.name,
pkg.ver,
pkg.removed_circular_deps,
nix_name,
'nixpkgs',
build_inputs_str,
prop_build_inputs_str,
keep_src=True)
end_overlay_section = f"""
}});
"""
return out + unindent(end_overlay_section, 14)
def _get_ref_name(self, name, ver) -> str:
if self.nixpkgs.exists(name):
return self.nixpkgs.find_best_nixpkgs_candidate(name, ver)
return name
def _gen_python_env(self, pkgs: Dict[str, ResolvedPkg]):
overrides_keys = {p.name for p in pkgs.values()}
out = self._gen_imports() + self._gen_overrides(pkgs, overrides_keys)
python_with_packages = f"""
in
{{ inherit overrides select_pkgs; }}
"""
return out + unindent(python_with_packages, 12)
|
the-stack_0_9497 | # from https://www.datacamp.com/community/tutorials/face-detection-python-opencv
# from https://github.com/parulnith/Face-Detection-in-Python-using-OpenCV
# Import the necessary libraries
import numpy as np
import cv2
import matplotlib.pyplot as plt
def convertToRGB(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def detect_faces(cascade, test_image, scaleFactor = 1.1):
# create a copy of the image to prevent any changes to the original one.
image_copy = test_image.copy()
#convert the test image to gray scale as opencv face detector expects gray images
gray_image = cv2.cvtColor(image_copy, cv2.COLOR_BGR2GRAY)
# Applying the haar classifier to detect faces
faces_rect = cascade.detectMultiScale(gray_image, scaleFactor=scaleFactor, minNeighbors=5)
for (x, y, w, h) in faces_rect:
cv2.rectangle(image_copy, (x, y), (x+w, y+h), (0, 255, 0), 2)
return image_copy
#loading image
test_image = cv2.imread('data/baby1.png')
#call the function to detect faces
haar_cascade_face = cv2.CascadeClassifier('data/haarcascades/haarcascade_frontalface_alt2.xml')
faces = detect_faces(haar_cascade_face, test_image)
#convert to RGB and display image
plt.imshow(convertToRGB(faces))
plt.show()
|
the-stack_0_9500 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import SchleemsTestFramework
from test_framework.util import *
class P2PMempoolTests(SchleemsTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
#connect a mininode
aTestNode = NodeConnCB()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
#request mempool
aTestNode.send_message(msg_mempool())
aTestNode.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
|
the-stack_0_9501 | from setuptools import setup, find_packages
install_requirements = ['splinter', 'docopt']
version = '0.2.0'
try:
import importlib
except ImportError:
install_requirements.append('importlib')
setup(
name='ticketmachine',
version=version,
description='The universal travel ticket machine',
#long_description=open('README.md').read(),
author='Tomas Babej',
author_email='[email protected]',
license='MIT',
url='https://github.com/tbabej/ticketmachine',
download_url='https://github.com/tbabej/ticketmachine/downloads',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=install_requirements,
classifiers=[
'Development Status :: 4 - Beta',
],
entry_points={
'console_scripts': [
'ticketmachine = ticketmachine.main:main',
]
},
)
|
the-stack_0_9503 | import os
from spirl.models.closed_loop_spirl_mdl import ClSPiRLMdl
from spirl.components.logger import Logger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.kitchen import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.kitchen.src.kitchen_data_loader import KitchenStateSeqDataset
from spirl.models.bc_atomic import BCMdl
current_dir = os.path.dirname(os.path.realpath(__file__))
fewshot_dataset = KitchenStateSeqDataset(
data_path='data/kitchen/kitchen-demo-microwave_kettle_topknob_switch.hdf5',
subseq_len=10,
)
env = AttrDict(
task_list = ['microwave', 'kettle', 'top burner', 'light switch']
)
contra_model_cf = AttrDict(
state_dimension=data_spec.state_dim,
hidden_size=128,
feature_size=32,
)
configuration = {
'model': ClSPiRLMdl,
'logger': Logger,
'data_dir': '.',
'epoch_cycles_train': 1,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 50,
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'offline_data': False,
'contra_config': contra_model_cf,
'contra_ckpt': './experiments/contrastive/kitchen/exact-mixed-all/exact_model.pt',
'bc_model': BCMdl,
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=10,
kl_div_weight=5e-4,
nz_enc=128,
nz_mid=128,
n_processing_layers=5,
cond_decode=True,
)
bc_model = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
nz_mid=128,
n_processing_layers=5,
# checkpt_path=f'{os.environ["EXP_DIR"]}/bc_atomic/kitchen/offline_data/no-topknob',
)
# Dataset
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config.dataset_spec['dataset_path'] = './data/kitchen/kitchen-mixed-no-topknob.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1 # flat last action from seq gets cropped
|
the-stack_0_9504 | #!/usr/bin/env python
# spongemock __main__.py
# author: Noah Krim
# email: [email protected]
from __future__ import print_function
import argparse
import re
from pyperclip import copy
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
parser = init_parser()
args = parser.parse_args()
try:
out = mock(' '.join(args.text), args.bias, args.seed or args.strseed or None)
except Exception as e:
eprint('Error: '+sys.argv[0]+': '+str(e))
return 1
if args.copy:
try:
copy(out)
except Exception:
eprint('Warning: '+sys.argv[0]+': could not copy the output to the clipboard because of an unexpected error. '
+'If using Linux, pleaes make sure you have all the proper modules installed for pyperclip '
+'(more info: https://tkinter.unpythonic.net/wiki/How_to_install_Tkinter).')
print(out)
return 0
def init_parser():
parser = argparse.ArgumentParser(description='Mock some text like spongebob would. mOCk SoMe TexT lIKe SpONGebOb wOuLd.')
parser.add_argument('text', nargs='+', help='the text to mock. ThE tExT tO mOCk.')
parser.add_argument('-c', '--copy', action='store_true', help='Mocked text will be copied to the clipboard.')
parser.add_argument('-b', '--bias', type=float, default=0.5,
help='This bias is used to succesively increase the chance of swapping from the previously-mocked case. '
+'A value of `0` will ensure the chance is always 50/50, '
+'and a value of `1` will ensure that after the first random choice the capitalization perfectly oscilates. '
+'Default is `0.5`.')
seed_group = parser.add_mutually_exclusive_group()
seed_group.add_argument('-s', '--seed', type=parsable_seed, help='Seed for random number generator. Can be any number or string (numbers are parsed).')
seed_group.add_argument('-S', '--strseed', help='Seed for random number generator. Does not attempt to parse the string to a number.')
return parser
def parsable_seed(str_seed):
# Try int parse
if re.match(r'^-?\d+$', str_seed):
return int(float(str_seed))
# Try float parse
try:
return float(str_seed)
except Exception:
pass
return str_seed
if __name__ == '__main__':
if __package__ is None:
from os import path
sys.path.append( path.dirname(path.abspath(__file__) ) )
from spongemock import mock
else:
from .spongemock import mock
main()
else:
from .spongemock import mock |
the-stack_0_9506 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # 用于管理日期时间
import os.path # 来管理路径
import sys # 用于找到脚本名称(argv[0])
# 导入BackTrader平台
import backtrader as bt
# 创建一个策略
class TestStrategy(bt.Strategy):
params = (
('deep', -0.3),
('printlog', False),
('profit',0.3),
('isA', False),
('onlyprintgood',True)
)
def log(self, txt, dt=None, doprint=False):
'''此策略的日志记录功能'''
if self.params.printlog or doprint:
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# 保存对收盘价线最新数据的引用
self.dataclose = self.datas[0].close
# 跟踪待处理订单和买入价格/佣金
self.order = None
self.buyprice = None
self.buycomm = None
self.init_cash = self.broker.getvalue()
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# 做多/做空 订单 已提交/已执行 到/被代理 - 无事可做
return
# 检查订单是否已经完成
# 注意:如果没有足够资金,代理可能拒绝订单
if order.status in [order.Completed]:
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # 做空
self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
self.order = None
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %
(trade.pnl, trade.pnlcomm))
def next(self):
# 引用的收盘价的日志
self.log('Close, %.2f' % self.dataclose[0])
# 检查订单是否挂起。。。如果是,我们无法发送第二个
if self.order:
return
# 检查我们是否在市场上
if not self.position:
if len(self) > 10 and (self.dataclose[0] - max(self.dataclose)) / max(self.dataclose) <= self.params.deep:
# 最大回撤达到deep 买,买,买!!! (应用所有可能的默认参数)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# 跟踪创建的订单以避免第二个订单
if self.params.isA:
size = int(self.broker.getvalue()*0.9 / self.dataclose[0]/100)*100
else:
size =self.broker.getvalue()*0.9/self.dataclose[0]-0.1
self.order = self.buy(size=size)
else:
# 已经在市场,我们可能需要做空
if (self.dataclose[0] - self.position.price) / self.position.price >= self.params.profit:
# 卖,卖,卖!!! (应用所有可能的默认参数)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# 跟踪创建的订单以避免第二个订单
self.order = self.sell(size=self.position.size)
def stop(self):
if self.params.isA:
self.basesize = int(self.init_cash / self.dataclose[-(len(self)-1)] / 100) * 100
self.rest = self.init_cash- self.basesize * self.dataclose[-(len(self)-1)]
else:
self.basesize = self.init_cash / self.dataclose[-(len(self)-1)] - 0.1
self.rest = self.init_cash - self.basesize * self.dataclose[-(len(self)-1)]
self.basline = self.basesize * self.dataclose[0] + self.rest
if self.params.onlyprintgood:
if self.broker.getvalue()/self.init_cash - 1 > self.basline/self.init_cash - 1:
self.log('(MA deep %f, P %f, profit %f, baseline %f) Ending Value %.2f,baseline Value %.2f' %
(self.params.deep, self.params.profit, self.broker.getvalue()/self.init_cash - 1,self.basline/self.init_cash - 1, self.broker.getvalue(),self.basline), doprint=True)
else:
self.log('(MA deep %f, P %f, profit %f, baseline %f) Ending Value %.2f,baseline Value %.2f' %
(self.params.deep, self.params.profit, self.broker.getvalue() / self.init_cash - 1,
self.basline / self.init_cash - 1, self.broker.getvalue(), self.basline), doprint=True)
if __name__ == '__main__':
# 创建一个大脑实例
cerebro = bt.Cerebro()
# 添加一个策略(最大跌幅超过deep买,买入后盈利超过profit卖)
#cerebro.addstrategy(TestStrategy)
#最大回撤达到deep后买入,盈利profit后卖出
cerebro.optstrategy(
TestStrategy,
deep=[-i/20 for i in range(1, 20)], profit=[i/20 for i in range(1, 20)], isA=True, printlog=False, onlyprintgood=True)
# 数据保存在样本的一个子文件夹中。我们需要找到脚本的位置
modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
datapath = os.path.join(modpath, '../../datas/orcl-1995-2014.txt')
# 创建一个数据槽
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# 不接收这个日期更早的数据
fromdate=datetime.datetime(2000, 1, 1),
# 不接收晚于这个日期的数据
todate=datetime.datetime(2000, 12, 31),
reverse=False)
import pandas as pd
df = pd.read_csv('./samples/candlestick1')
df.index = pd.to_datetime(df.pop('id'), unit='s')
df.columns = ['high','low','open','close','volume','Adjusted_Close']
df.pop('Adjusted_Close')
df = df.sort_index()
df1 = df[-345:]
import tushare as ts
ts.set_token('1eda71057295b5ba834d31d24b572521d24689463e7328ca84fed1d6')
pro = ts.pro_api()
#df = pro.query('daily', ts_code='600519.SH', start_date='20150123',end_date='20210619')
#df = pro.query('daily', ts_code='601318.SH', start_date='20150123', end_date='20210530')
df = ts.pro_bar(ts_code='600519.SH', adj='qfq', start_date='20150123', end_date='20210619')
df = df.set_index(["trade_date"])
df = df.sort_index(ascending=True)
features_considered = ['open', 'close', 'high', 'low', "vol"]
features = df[features_considered]
features.columns = ['open', 'close', 'high', 'low','volume']
df = features
print(df.loc['20150123'])
df.index = pd.to_datetime(df.index, format='%Y%m%d')
data = bt.feeds.PandasData(dataname=df)
# 把数据槽添加到大脑引擎中
cerebro.adddata(data)
# 设定我们希望的初始金额
cerebro.broker.setcash(20000000.0)
# 根据stake添加一个固定下单量
#cerebro.addsizer(bt.sizers.FixedSize, stake=1)
# 设定佣金为0.1%,去掉百分号除以100
cerebro.broker.setcommission(commission=0.001)
# 运行所有命令
cerebro.run(maxcpus=1)
|
the-stack_0_9509 | from data import warehouse, word_frequencies
from puzzle.heuristics import acrostic
from puzzle.puzzlepedia import prod_config
from spec.mamba import *
BA_PREFIX_TRIE = word_frequencies.load(
zip(('bad', 'bag', 'ban', 'bar', 'bat'), [1]*5))
with description('acrostic'):
with it('uses a mock trie'):
a = acrostic.Acrostic(list('bag'), BA_PREFIX_TRIE)
expect(len(a._trie)).to(be_below(100))
with it('yields multi-character solutions'):
a = acrostic.Acrostic(list('bag'), BA_PREFIX_TRIE)
expect(list(a)).to(contain('bag'))
with it('is observable'):
a = acrostic.Acrostic(list('bag'), BA_PREFIX_TRIE)
subs = mock.Mock()
a.subscribe(subs)
expect(subs.on_next.call_args).to(equal(mock.call('bag')))
with it('yields unique solutions'):
a = acrostic.Acrostic(list('ba') + ['ggg'], BA_PREFIX_TRIE)
expect(list(a)).to(have_len(1))
with it('yields multiple multi-character solutions'):
a = acrostic.Acrostic(list('ba') + ['dgnrt'], BA_PREFIX_TRIE)
expect(list(a)).to(contain('bad', 'bag', 'ban', 'bar', 'bat'))
with _description('real data'):
with before.all:
warehouse.save()
prod_config.init()
with after.all:
prod_config.reset()
warehouse.restore()
with it('finds simple words'):
a = acrostic.Acrostic('cab')
expected = [
'cab',
'ca b',
'c ab',
]
for i, (answer, weight) in enumerate(a.items()):
expect('#%s = %s @ %s' % (i, answer, weight)).to(equal(
'#%s = %s @ %s' % (i, expected[i], weight)
))
expect(a.items()).to(have_len(len(expected)))
with it('finds important words'):
a = acrostic.Acrostic('binary')
expect(next(a.items())).to(equal(('binary', 1)))
with _it('modestly expensive'):
words = [
'larch', 'simple', 'foray', 'doyen', 'eerily', 'soup', 'must',
]
a = acrostic.Acrostic(words)
limit = 1000000
for i, (answer, weight) in enumerate(a.items()):
if answer.startswith('answer') or i % 1000 == 0:
print(answer, weight)
if i > limit:
print('tried %s' % i)
break
with _it('crazy expensive'):
words = [
'champion', 'nitpick', 'conspiracy', 'windpipe', 'epinephrine',
'philanthropic', 'sierpinski', 'mississippi', 'pilaf', 'vulpine',
'spinach', 'pinochet', 'porcupine', 'megapixels', 'australopithecus',
'sharpie', 'intrepid', 'insipid', 'robespierre'
]
a = acrostic.Acrostic(words)
limit = 1000000
for i, (answer, weight) in enumerate(a.items()):
if answer.startswith('answer') or i % (limit / 10) == 0:
print(answer, weight)
if i > limit:
print('tried %s' % i)
break
""" 4/24
a to incipient each rss 120548796
a to incipient opps eii 153396
a to incipient eipe rni 59329
a to incipient ipps epe 174519
a to incipient cmss ede 290375
a to incipient csts rsr 175192
a to incipient opca dsr 752124
a to incipient cisr tnp 87249
a to incipient ilos dps 1290835
a to pntemplates cs tio 770193
a to perempuan usps tio 770193
4/25 + early break in walk when scores are low
a to incipient each rss 120548796
a to incipient iste eie 57198
a to incipient cmss dss 1995347
a to incipient imia rsi 697477
a to incipient osrs eip 398559
a to perempuan peas tpe 275152
a to perempuan imcs nss 990710
a to perempuan caar ens 717319
a to perempuan usea tns 523866
a to perempuan epra pii 512601
a to dicipline imps psi 6101411
9/15 38 seconds; 35 seconds
a to incipient usui ipi 1.699863585947228e-07
a in incipient isps psr 3.399727171894456e-07
a in incipient rire dns 5.7795361922205745e-06
a i applesauce isls pdo 1.699863585947228e-07
a i applesauce pirs inr 6.799454343788912e-07
a i renaisance csus iss 2.209822661731396e-06
a i renaisance cmaa nsp 3.399727171894456e-07
a i renassance imes nss 5.099590757841683e-07
a can eliminate aisi ds 3.399727171894456e-07
a can eliminate phr dio 1.699863585947228e-07
"""
|
the-stack_0_9510 | from unicorn.arm_const import *
from ..fuzz import get_fuzz
import sys
def puts(uc):
ptr = uc.reg_read(UC_ARM_REG_R0)
assert(ptr != 0)
msg = uc.mem_read(ptr, 256)
#ptr += 1
#while msg[-1] != b"\0":
# msg += uc.mem_read(ptr, 1)
# ptr += 1
if b'\0' in msg:
msg = msg[:msg.find(b'\0')]
print(msg)
def putchar(uc):
c = uc.reg_read(UC_ARM_REG_R0)
assert (c < 256)
sys.stdout.write(chr(c))
sys.stdout.flush()
def printf(uc):
# for now just print out the fmt string
ptr = uc.reg_read(UC_ARM_REG_R0)
assert(ptr != 0)
msg = uc.mem_read(ptr, 256)
# ptr += 1
# while msg[-1] != b"\0":
# msg += uc.mem_read(ptr, 1)
# ptr += 1
if b'\0' in msg:
msg = msg[:msg.find(b'\0')]
sys.stdout.write(msg.decode('latin1'))
sys.stdout.flush()
def readline(uc):
ptr = uc.reg_read(UC_ARM_REG_R0)
l = uc.reg_read(UC_ARM_REG_R1)
assert(ptr != 0)
data = b''
while len(data) < l:
data += get_fuzz(1)
if data.endswith(b'\n'):
break
uc.mem_write(ptr, data)
uc.reg_write(UC_ARM_REG_R0, 0)
# echo
sys.stdout.write(data.decode('latin1'))
sys.stdout.flush() |
the-stack_0_9513 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import math
"""
############# GENERAL GAS CLASS ###########
"""
class Gas():
def __init__(self,T,P,R_u=8.31447):
self.T = T
self.P = P
self.R_u=R_u
self.normalshock=self.Shock(self)
def gas_list(self):
print(" Code\t","Gas","\n",
"----\t","---","\n",
"Air\t","Air" ,"\n",
"Ar\t\t","Argon" ,"\n" ,
"CO2\t","Carbon dioxide" ,"\n",
"CO\t\t","Carbon monoxide" ,"\n",
"N2\t\t","Nitrogen" ,"\n"
)
def area(self, diameter):
return (np.pi*(diameter**2))/4
def critical_area(self,massflowrate):
return massflowrate/(self.P*1000*(self.k**(1/2))*(2/(self.k+1))**((self.k+1)/(2*self.k-2))/((self.R*1000*self.T)**(1/2)))
def critical_m_dot(self, Ma, diameter=1):
return self.critical_density()*self.area(diameter)*self.critical_speed_of_sound(Ma)
def critical_temperature(self, Ma):
return self.stagnation_temp(Ma)*2/(self.k+1)
def critical_pressure(self):
return self.P*(2/(self.k+1))**(self.k/(self.k-1))
def critical_density(self):
return self.rho*(2/(self.k+1))**(1/(self.k-1))
def critical_speed_of_sound(self, Ma):
return np.sqrt(self.k*self.R*self.critical_temperature(Ma)*1000)
def density(self):
return self.P/(self.R*self.T)
def diameter(self, area):
return np.sqrt(4/np.pi*area)
def enthalpy(self):
return self.cp*self.T
def exit_temperature(self,Mach):
return self.T/(1+(self.k-1)/2*Mach**2)
def exit_pressure(self,Mach):
return self.P/(1+(self.k-1)/2*Mach**2)**(self.k/(self.k-1))
def exit_density(self, Mach):
return self.rho/(1+(self.k-1)/2*Mach**2)**(1/(self.k-1))
def exit_speed(self, Mach):
return Mach*np.sqrt(self.k*self.R*self.exit_temperature(Mach)*1000)
def exit_area(self, Throat_Area, Mach):
return Throat_Area*(1/Mach)*((2/(self.k+1))*(1+(self.k-1)/2*Mach**2))**((self.k+1)/(2*self.k-2))
def mach_number(self, velocity):
return velocity/self.speed_of_sound()
def m_dot(self, velocity, diameter=1):
return self.density()*self.area(diameter)*velocity
def mfr(self,velocity, diameter):
return self.critical_pressure()*self.area(diameter)*self.mach_number(velocity)*np.sqrt(self.k/(self.R*self.critical_temperature()))
def mass_flowrate(self, velocity, diameter=1):
return (self.area(diameter)*self.mach_number(velocity)*self.stagnation_pressure(velocity)*np.sqrt(self.k/(self.R*self.stagnation_temp(velocity))))\
/((1+(self.k-1)*(self.mach_number(velocity)**2)/2)**((self.k+1)/(2*(self.k-1))))
def ma_finder(self, section, area_ratio, show_iterations=False, tolerance=10e-6, method="bisection"):
try:
if section !="upward" and section !="downward":
raise NameError("Please specify the flow by using these keywords: \"upward\" or \"downward\"")
def finder(Ma):
value = (1/Ma*((1+0.5*(self.k-1)*Ma**2)/(0.5*(self.k+1)))**(0.5*(self.k+1)/(self.k-1)))
if method=='golden' or method=='secant':
target = abs(value - area_ratio)
elif method=='bisection':
target = value - area_ratio
return target
# def check_boundaries(Ma_0, Ma_1):
# if section=="upward":
# if Ma_0>1 or Ma_1>1:
# Ma_0 = 1/Ma_0
# Ma_1 = Ma_0+0.001
# # print("ma kucuk 1 den calisti")
# elif section=="downward":
# if Ma_0<1 or Ma_1<1:
# Ma_0 = 1+Ma_0
# Ma_1 = Ma_0+0.1
# # print("ma buyuk 1 den calisti")
if section=="upward":
if method=='bisection':
Ma=bisection_method( finder,0, 1, tolerance = 10e-6,show_iterations=show_iterations)
elif method=='secant':
Ma=secant_method( finder,0, 1, tolerance = 10e-6,show_iterations=show_iterations)
elif method=='golden':
Ma=golden_section(finder,0, 1, tolerance = 10e-6,show_iterations=show_iterations)
elif section=="downward":
if method=='bisection':
Ma=bisection_method( finder,1, 5, tolerance = 10e-6,show_iterations=show_iterations)
elif method=='secant':
Ma=secant_method( finder,1, 5, tolerance = 10e-6,show_iterations=show_iterations)
elif method=='golden':
Ma=golden_section(finder,1, 5, tolerance = 10e-6,show_iterations=show_iterations)
return Ma
except NameError:
raise NameError("Please specify the flow by using these keywords: \"upward\" or \"downward\"") from None
except ValueError:
raise ValueError("Given area is smaller than throat area. Program has terminated.\n Hint: You could change the division number.") from None
def plot(self,area_start, area_end, Mach_start, y_axis='T', color_bar='Ma', division=250 ,x_axis='A', method="bisection"):
area_upward = np.linspace(area_start, self.throat_area(area_start,Mach_start), division)
area_downward = np.linspace(self.throat_area(area_start,Mach_start), area_end, division)
area_total = np.concatenate((area_upward,area_downward))
ST = self.stagnation_temp(Mach_start)
temp_upward = []
Ma_upward = []
for i in range(division):
ratio = self.throat_area_ratio(area_upward[i], area_start, Mach_start)
Ma=self.ma_finder("upward",ratio,method=method)
Ma_upward.append(Ma)
temp_upward.append(self.temperature(Ma, ST))
temp_downward = []
Ma_downward = []
for i in range(division):
ratio = self.throat_area_ratio(area_downward[i], area_start, Mach_start)
Ma=self.ma_finder("downward",ratio,method=method)
Ma_downward.append(Ma)
temp_downward.append(self.temperature(Ma, ST))
temp_total = temp_upward +temp_downward
Ma_total = Ma_upward +Ma_downward
fig = plt.figure(figsize=(10,7.5))
ax = fig.add_subplot(111)
xs = np.linspace(0,1,2*division)
if y_axis == 'T':
y_lbl='Temperature (K)'
if color_bar=='Ma':
color = Ma_total
mp = ax.scatter((xs),(temp_total),c=color,cmap=plt.cm.get_cmap('jet'))
c_lbl = 'Mach Number'
elif color_bar=='T':
mp = ax.scatter((xs),(temp_total),c=temp_total,cmap=plt.cm.get_cmap('jet'))
c_lbl = 'T (K)'
elif y_axis == 'Ma':
y_lbl='Mach Number'
if color_bar=='Ma':
color = Ma_total
mp = ax.scatter((xs),(Ma_total),c=color,cmap=plt.cm.get_cmap('jet'))
c_lbl = 'Mach Number'
elif color_bar=='T':
mp = ax.scatter((xs),(Ma_total),c=temp_total,cmap=plt.cm.get_cmap('jet'))
c_lbl = 'T (K)'
cb = plt.colorbar(mp)
cb.set_label(c_lbl)
ax.set(title=r'Converging- Diverging Nozzle',
xlabel='Area $m^2$', ylabel=y_lbl)
tick_labels=[]
for j in np.linspace(0,(2*division),7):
if j==2*division:
tick_labels.append(round(area_total[-1],4))
else:
tick_labels.append(round(area_total[int(j)],4))
plt.xticks(np.linspace(0,1,7),tick_labels)
plt.show()
def pressure(self, Mach, Stagnation_Pressure):
return Stagnation_Pressure/((1+0.5*(self.k-1)*Mach**2)**(self.k/(self.k-1)))
def speed_of_sound(self):
return np.sqrt(self.k*self.R*self.T*1000)
def stagnation_temp(self,Mach):
return self.T*(1+(self.k-1)/2*Mach**2)
def stagnation_pressure(self,Mach):
return self.P*(1+0.5*(self.k-1)*Mach**2)**(self.k/(self.k-1))
def temperature(self, Mach, Stagnation_Temperature):
return Stagnation_Temperature/(1+(self.k-1)/2*Mach**2)
def throat_area(self,known_area,Mach):
return known_area/((1/Mach)*((2/(self.k+1))*(1+(self.k-1)/2*Mach**2))**((self.k+1)/(2*self.k-2)))
def throat_area_ratio(self,wanted_area, known_area,known_Mach):
return wanted_area/self.throat_area(known_area, known_Mach)
class Shock():
def __init__(self, gas):
self.gas = gas
def P2(self, Ma1, P1):
return P1*(1/(self.gas.k+1)*(2*self.gas.k*Ma1**2-(self.gas.k-1)))
def Ma2(self,Ma1):
return np.sqrt(((self.gas.k-1)*Ma1**2+2)/(2*self.gas.k*Ma1**2-(self.gas.k-1)))
def P0_2(self,Stagnation_Pressure, Ma1):
return Stagnation_Pressure*((((self.gas.k+1)*Ma1**2)/(2+(self.gas.k-1)*Ma1**2))**(self.gas.k/(self.gas.k-1))\
*((self.gas.k+1)/(2*self.gas.k*Ma1**2-(self.gas.k-1)))**(1/(self.gas.k-1)))
def area_shock_star(self, area1_star, Ma1):
return area1_star*(self.Ma2(Ma1)/Ma1)*((2+(self.gas.k-1)*Ma1**2)/(2+(self.gas.k-1)*self.Ma2(Ma1)**2))**((self.gas.k+1)/(2*self.gas.k-2))
def Ma_beforeshock(self, P2_P1):
return np.sqrt((P2_P1*(self.gas.k+1)+(self.gas.k-1))/(2*self.gas.k))
def T2(self,T1,Ma1):
return T1*(2+(self.gas.k-1)*Ma1**2)*(2*self.gas.k*Ma1**2-(self.gas.k-1))/(((self.gas.k+1)**2)*(Ma1**2))
def V2(self, T1, V1):
return np.sqrt(2*self.gas.cp*(T1-self.T2(T1, V1/(self.gas.speed_of_sound())))+V1**2)
class Air(Gas):
def __init__(self,T=298.15,P=101.325):
super().__init__(T, P)
self.M=28.97
self.k=1.4
self.R=self.R_u/self.M
self.cp=1.9327E-10*self.T**4 - 7.9999E-07*self.T**3 + 1.1407E-03*self.T**2 - 4.4890E-01*self.T + 1.0575E+03
self.rho = self.P/(self.R*self.T)
class CO2(Gas):
def __init__(self,T=298.15,P=101.325):
super().__init__(T, P)
self.M=44.01
self.k=1.289
self.R=self.R_u/self.M
self.cp=0.849
self.rho = self.P/(self.R*self.T)
class CO(Gas):
def __init__(self,T=298.15,P=101.325):
super().__init__(T, P)
self.M=28.01
self.k=1.4
self.R=self.R_u/self.M
self.cp=1.039
self.rho = self.P/(self.R*self.T)
class N2(Gas):
def __init__(self,T=298.15,P=101.325):
super().__init__(T, P)
self.M=28.01
self.k=1.4
self.R=self.R_u/self.M
self.cp=1.040
self.rho = self.P/(self.R*self.T)
class Ar(Gas):
def __init__(self,T=298.15,P=101.325):
super().__init__(T, P)
self.M=39.95
self.k=1.667
self.R=self.R_u/self.M
self.cp=0.5203
self.rho = self.P/(self.R*self.T)
"""
############# NUMERICAL METHODS ###########
"""
def golden_section(func,starting, ending, show_iterations=False, tolerance = 10e-6):
gr=(np.sqrt(5)+1)/2-1
dm=tolerance
a0 = starting+dm
b0 = ending-dm
count=0
while True:
count+=1
# print(finder(Ma_0))
# print(finder(Ma_1))
d=gr*(b0-a0)
a1=a0+d
b1=b0-d
if abs((a1-b1)/a1)<=tolerance:
if 1>=ending:
print("The Mach number below unity is: ",a1,"\n")
elif starting>=1:
print("The Mach number above unity is: ",a1,"\n")
break
else:
if func(a0)>func(b0):
a0=a1
b1=b1
else:
a0=a0
b0=b1
if show_iterations ==True:
print("Iteration ", count, " :",a1)
return (a1+b1)/2
def secant_method(func, lower_bound, upper_bound, show_iterations=False,tolerance=10e-6):
Ma_0 = (upper_bound+lower_bound)/2
dMa = 0.01
Ma_1 = Ma_0+dMa
count=0
while True:
count+=1
Ma_2 = Ma_1 - func(Ma_1)*(Ma_1-Ma_0)/(func(Ma_1)-func(Ma_0))
if show_iterations ==True:
print("Iteration ", count, " :",Ma_2)
if func(Ma_2)<=tolerance:
if show_iterations ==True:
print("The Mach number below unity is: ",Ma_2,"\n")
break
else:
Ma_0 = Ma_1
Ma_1 = Ma_2
return Ma_2
def bisection_method(func, lower_bound, upper_bound, show_iterations=False,tolerance=10e-6):
if lower_bound==0 :
lower_bound+=tolerance
a=lower_bound
b= upper_bound
count = 0
while True:
count+=1
c = (a+b)/2
if abs(func(c))<=tolerance:
if show_iterations ==True:
print("The root is: ",c,"\n")
break
else:
if func(a)*func(c)>func(b)*func(c):
b=b
a=c
else:
a=a
b=c
if show_iterations ==True:
print("Iteration ", count, " :",c)
return c
"""
############# ROCKET NOZZLE CLASS ###########
"""
class Nozzle(Gas):
def __init__(self, class_gas):
self.T=class_gas.T
self.P=class_gas.P
self.k=class_gas.k
self.M=class_gas.M
self.k=class_gas.k
self.R=class_gas.R_u/class_gas.M
self.cp=class_gas.cp
self.rho = class_gas.P/(class_gas.R*class_gas.T)
def critical_throat_pressure(self):
return self.P*(2/(self.k+1))**(self.k/(self.k-1))
def exit_mach(self,backflow_pressure):
if self.ischoked(backflow_pressure):
Ma = 1
else:
Ma = np.sqrt(5*((self.P/backflow_pressure)**(2/7)-1))
return Ma
def ischoked(self, backflow_pressure ):
if backflow_pressure < self.critical_pressure():
condition=True
else:
condition = False
return condition
def massflowrate(self, backflow_pressure, area):
if self.ischoked(backflow_pressure):
mdot = (area*self.P*1000)/(np.sqrt(self.R*self.T*1000))*np.sqrt((2*self.k/(self.k-1))*((self.critical_pressure()/self.P)**(2/self.k))*(1-(self.critical_pressure()/self.P)**(1-1/self.k)))
else:
mdot = (area*self.P*1000)/(np.sqrt(self.R*self.T*1000))*np.sqrt((2*self.k/(self.k-1))*((backflow_pressure/self.P)**(2/self.k))*(1-(backflow_pressure/self.P)**(1-1/self.k)))
return mdot
class RocketNozzle(Gas):
def __init__(self, class_gas):
self.T=class_gas.T
self.P=class_gas.P
self.k=class_gas.k
self.M=class_gas.M
self.k=class_gas.k
self.R=class_gas.R_u/class_gas.M
self.cp=class_gas.cp
self.rho = class_gas.P/(class_gas.R*class_gas.T)
self.normalshock=self.Shock(self)
def geometry(self, area_start, area_throat, area_end, division=250, color = 'black'):
A_start = area_start
A1_star = area_throat
A_exit = area_end
division = 250
r1=int((A_start/A1_star)/(A_start/A1_star+A_exit/A1_star)*division)
r2=int((A_exit/A1_star)/(A_start/A1_star+A_exit/A1_star)*division)
area_upward = np.linspace((A_start), (A1_star), r1)
area_downward = np.linspace((A1_star), (A_exit), r2)
area_total = np.concatenate((area_upward,area_downward))
diameter_total = self.diameter(area_total)
# plt.style.use('dark_background')
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
xs = np.linspace(0,1,r1+r2)
tick_labels=[]
for j in np.linspace(0,(r1+r2),11):
if j==r1+r2:
tick_labels.append(round(area_total[-1],4))
else:
tick_labels.append(round(area_total[int(j)],4))
plt.xticks(np.linspace(0,1,11),tick_labels)
plt.plot(xs,diameter_total/2,color=color,linewidth=3)
plt.plot(xs,-diameter_total/2,color=color,linewidth=3)
centerline,=plt.plot(xs, 0*xs,linewidth=1,color=color)
dashes=[30,5,5,5]
centerline.set_dashes(dashes)
plt.xlabel("Area (m2)")
plt.ylabel("Radius (m)")
plt.title("Rocket Nozzle Geometry")
plt.show()
plt.style.use('default')
def shock(self, exit_pressure, throat_area, exit_area, start_area, plot=True,division = 250):
def shock_finder(A_shock):
ratio = A_shock/throat_area
M1 = self.ma_finder('downward', ratio)
P1 = self.pressure(M1, self.P)
T1 = self.temperature(M1, self.T)
M2 = self.normalshock.Ma2(M1)
P2 = self.normalshock.P2(M1,P1)
T2 = self.normalshock.T2(T1, M1)
P02 = self.normalshock.P0_2(self.P, M1)
A2_star = self.normalshock.area_shock_star(throat_area, M1)
ratio2 = exit_area/A2_star
Me = self.ma_finder('upward', ratio2)
Pe = self.pressure(Me,P02)
target = Pe-exit_pressure
return target
if shock_finder(exit_area)>0:
print("There is no shock wave in the rocket nozzle")
A_shock = None
else:
A_shock=bisection_method( shock_finder,throat_area, exit_area, tolerance = 10e-3,show_iterations=True)
def shock_plot(start_area):
A_start = start_area
A1_star = throat_area
A_exit = exit_area
r1=int((A_start/A1_star)/(A_start/A1_star+A_exit/A1_star)*division)
r2=int((A_exit/A1_star)/(A_start/A1_star+A_exit/A1_star)*division)
area_upward = np.linspace((start_area), (throat_area), r1)
area_downward = np.linspace((throat_area), (exit_area), r2)
area_total = np.concatenate((area_upward,area_downward))
def find_closest(A, target):
#A must be sorted
idx = A.searchsorted(target)
idx = np.clip(idx, 1, len(A)-1)
left = A[idx-1]
right = A[idx]
idx -= target - left < right - target
return idx
idx=find_closest(area_total,A_shock)
r=self.diameter(A_shock)/2
plt.style.use('dark_background')
self.geometry(start_area, throat_area, exit_area,color='white')
y=np.linspace(r,-r)
# correction = ((A_shock/throat_area)+(start_area/throat_area))/((exit_area/throat_area)+(start_area/throat_area))
x=A_shock*np.sin(5000*y)+idx/division
plt.plot(x,y,color='gold')
plt.show()
plt.style.use('default')
if plot==True:
shock_plot(start_area)
return A_shock
"""
############# RELATIONS CLASS ###########
"""
class relations:
def change_in_entropy(T2,T1,P2,P1,cp,R):
return cp*np.log(T2/T1)-R*np.log(P2/P1)
|
the-stack_0_9514 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
import sys
# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
# system, use it to handle IPAddress ServerAltnames (this was added in
# python-3.5) otherwise only do DNS matching. This allows
# backports.ssl_match_hostname to continue to be used in Python 2.7.
try:
from pip._vendor import ipaddress
except ImportError:
ipaddress = None
__version__ = '3.5.0.1'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def _to_unicode(obj):
if isinstance(obj, str) and sys.version_info < (3,):
obj = unicode(obj, encoding='ascii', errors='strict')
return obj
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
# Divergence from upstream: ipaddress can't handle byte str
ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
return ip == host_ip
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
# Divergence from upstream: ipaddress can't handle byte str
host_ip = ipaddress.ip_address(_to_unicode(hostname))
except ValueError:
# Not an IP address (common case)
host_ip = None
except UnicodeError:
# Divergence from upstream: Have to deal with ipaddress not taking
# byte strings. addresses should be all ascii, so we consider it not
# an ipaddress in this case
host_ip = None
except AttributeError:
# Divergence from upstream: Make ipaddress library optional
if ipaddress is None:
host_ip = None
else:
raise
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
the-stack_0_9516 | import numpy as np
import matplotlib.pyplot as plt
# EXAMPLE ON THE GUIDE
a = np.arange(-5, 5, 0.1)
f_x = np.power(a,2)
plt.plot(a, f_x)
plt.xlim(-5,5)
plt.ylim(-5,15)
k = np.array([-2,0,2])
plt.plot(k, k**2, "bo")
for i in k:
plt.plot(a, (2*i)*a-(i**2))
plt.show()
|
the-stack_0_9518 | """
Plot one week of events loaded from file (starting from the earliest event).
Examples:
plot_events.py --from events.json
Usage:
plot_events.py [--from=<FILE>]
Options:
-h --help Show this screen.
-f --from=<FILE> File containing a list of event descriptions [default: default]
"""
from os.path import join, dirname, exists
import sys
import random
import matplotlib.pyplot as plt
import numpy as np
import json
import pytz
import matplotlib.ticker as ticker
import re
from datetime import datetime, time, timedelta
from matplotlib.patches import FancyBboxPatch
from PIL import Image
from docopt import docopt
from datetime import time
from math import ceil
from collections import namedtuple
from typing import List, Dict
from schedulingassistant.data import Event
from schedulingassistant.conversion_utils import time_to_float
# TODO:
# * Complete all docstrings for function in this file
# * Add tests for `str_to_datetime`
DAY_COUNT = 7
def main(sys_args: List[str] = []) -> None:
args = parse_args(docopt(__doc__, argv=sys_args))
try:
events = extract_events_from(args['event_file'])
plot_events(events)
return 0
except Exception as e_info:
print(e_info)
return 1
def parse_args(args: Dict[str, str]) -> Dict[str, any]:
"""Parse the arguments passed in, and return a dictionary containing the final input values for the application."""
DEFAULT_EVENT_FILE = "generated_events.json"
event_file_arg = args['--from']
event_file_path = event_file_arg
if event_file_arg == "default":
event_file_path = join(dirname(__file__), DEFAULT_EVENT_FILE)
if exists(event_file_path):
return { 'event_file': event_file_path }
else:
raise ValueError("File '" + event_file_path + "' could not be found.")
def plot_events(events: List[Event]) -> None:
fig = plt.figure(figsize=(10, 16))
fig.tight_layout()
plt.title('Events', y=1, fontsize=14)
ax = fig.add_subplot(1, 1, 1)
# X
ax.set_xlim(0.5, DAY_COUNT + 0.5)
earliest_date = min([e.start_datetime.date() for e in events])
date_labels = [(earliest_date + timedelta(days=i)).strftime("%d/%m/%Y") for i in range(DAY_COUNT + 1)]
ax.set_xticks(range(1, DAY_COUNT + 1))
ax.set_xticklabels(date_labels)
plt.tick_params(bottom=False) # Hide ticks
# Y
start_of_day = 0
end_of_day = 24
ax.set_ylim(end_of_day, start_of_day)
block_times = np.arange(start_of_day, end_of_day, float(5.0/60.0))
ax.set_yticks(block_times)
hour_labels = [("{0}:00".format(int(b)) if b.is_integer() else "") for b in block_times]
ax.set_yticklabels(hour_labels)
# Create the horizontal timeblock grid lines
ax.grid(axis='y', linestyle='-', linewidth=0.3, color="black", alpha=0.05)
grid_lines = ax.yaxis.get_gridlines()
for h in range(end_of_day):
label = "{0}:00".format(h)
label_idx = hour_labels.index(label)
grid_lines[label_idx].set_alpha(1.0)
# Go through and make all hour grid lines bold
# https://stackoverflow.com/questions/53781180/polar-plot-put-one-grid-line-in-bold
# Plot the events
for e in events:
plot_event(e, earliest_date, ax)
# Save this output to an image file and open it
img_name = 'events.png'
plt.savefig(img_name, dpi=400, bbox_inches='tight')
img = Image.open(img_name)
img.show()
def extract_events_from(events_file_path: str) -> List[Event]:
"""todo, also add docstrings to all other functions in this file"""
events = []
with open(events_file_path) as events_file:
json_events = json.load(events_file)
for e in json_events:
name = e['name']
start = str_to_datetime(e['start_datetime'])
end = str_to_datetime(e['end_datetime'])
events.append(Event(name, start, end))
return events
def str_to_datetime(input_str: str) -> datetime:
"""Parse a string `input_str` and return a corresponding `datetime` object."""
microseconds = 0
if '.' in input_str:
seconds_decimal_component_match = re.match(r"[^.]*\d+[^.]*(.\d+)", input_str)
if seconds_decimal_component_match:
decimal_component_str = seconds_decimal_component_match.group(1)
input_str = input_str.replace(decimal_component_str, '')
microseconds = int(float("0" + decimal_component_str) * 1000000)
output = datetime.strptime(input_str, "%Y-%m-%dT%H:%M:%S%z").replace(microsecond=microseconds, tzinfo=pytz.utc)
return output
def plot_event(e: Event, earliest_date: datetime.date, ax) -> None:
boxes = convert_event_to_boxes(e)
# An index representing the first day that the start of this event should be on
day_offset = (e.start_datetime.date() - earliest_date).days
color = rand_hex_col()
start_hour = e.start_datetime.hour
start_min = e.start_datetime.minute
event_label = '{0}:{1:0>2} {2}'.format(start_hour, start_min, e.name)
for box_idx in range(len(boxes)):
label = event_label if box_idx == 0 else ""
draw_event_box(boxes[box_idx], day_offset, label, color, ax)
# An `EventBox` represents a window of time that can be drawn with one rectangle on a calendar with multiple days in
# different columns. E.g 9am - 10am would be valid `EventBox`, but 11pm - 1am would not as this would have to be broken
# down into two windows.
EventBox = namedtuple('EventBox', ['column_idx', 'start_time_float', 'end_time_float'])
def draw_event_box(box: EventBox, day_offset: int, label: str, color: str, ax):
"""Draws an event box on the plot using a day index (used internally to calculate the horizontal components of the
box, and two start/end floats representing percentages through the day, used to calculate the vertical components."""
top = box.start_time_float
bottom = box.end_time_float
left = 0.5 + box.column_idx + day_offset
# If this event would be drawn outside the view of the plot
if left >= 7.0:
return
padding_between_days = 0.05
right = left + 1 - padding_between_days
# Draw boxes and labels on top of everything else
z = 2.0
box = FancyBboxPatch(
(left, top),
abs(right - left),
abs(bottom - top),
boxstyle="round,pad=-0.0040,rounding_size=0.02",
ec="black",
fc=color,
lw=0.2,
zorder=z,
mutation_aspect=1)
ax.add_patch(box)
plt.text(left + 0.01, top + 0.01, label, va='top', fontsize=3, zorder=z)
def convert_event_to_boxes(event: Event) -> List[EventBox]:
"""Takes in an event and converts this into a list of boxes that when combined completely cover the time allocated
to this event. Usually, this list will contain a single EventBox as many events start and end on the same day, but
any events split across multiple day boundaries will be split into multiple boxes."""
start_date = event.start_datetime.date()
end_date = event.end_datetime.date()
start_time_float = time_to_float(event.start_datetime.time())
end_time_float = time_to_float(event.end_datetime.time())
days_spanned = (end_date - start_date).days + 1
boxes = []
if days_spanned == 1:
boxes.append(EventBox(0, start_time_float, end_time_float))
else:
boxes.append(EventBox(0, start_time_float, 24.0))
for i in range(max(0, days_spanned - 2)):
boxes.append(EventBox(i + 1, 0.0, 24.0))
boxes.append(EventBox(days_spanned - 1, 0.0, end_time_float))
return boxes
# Create rounded box for the event with a random colour
# https://stackoverflow.com/questions/58425392/bar-chart-with-rounded-corners-in-matplotlib
def rand_hex_col() -> str:
r = lambda: 128 + random.randint(0, 127)
return '#%02X%02X%02X' % (r(),r(),r())
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
the-stack_0_9519 | # Copyright 2020 Dragonchain, Inc.
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
# You may obtain a copy of the Apache License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from typing import TYPE_CHECKING, List, Any
from dragonchain import exceptions
from dragonchain.lib.dto import eth
from dragonchain.lib.dto import btc
from dragonchain.lib.dto import bnb
from dragonchain.lib.interfaces import storage
if TYPE_CHECKING:
from dragonchain.lib.dto import model
FOLDER = "INTERCHAINS"
def save_interchain_client(interchain_client: "model.InterchainModel") -> None:
"""Save an interchain model to storage"""
storage.put_object_as_json(f"{FOLDER}/{interchain_client.blockchain}/{interchain_client.name}", interchain_client.export_as_at_rest())
def does_interchain_exist(blockchain: str, name: str) -> bool:
"""Check if a specific interchain exists
Args:
blockchain: the blockchain of the desired client (i.e. bitcoin, ethereum, etc)
name: the name (id) of the network to get (user defined on the creation of the interchain)
"""
if blockchain == "bitcoin":
return storage.does_object_exist(f"{FOLDER}/bitcoin/{name}")
elif blockchain == "ethereum":
return storage.does_object_exist(f"{FOLDER}/ethereum/{name}")
elif blockchain == "binance":
return storage.does_object_exist(f"{FOLDER}/binance/{name}")
else:
return False
def get_interchain_client(blockchain: str, name: str) -> "model.InterchainModel":
"""Get a specific interchain client
Args:
blockchain: the blockchain of the desired client (i.e. bitcoin, ethereum, etc)
name: the name (id) of the network to get (user defined on the creation of the interchain)
Raises:
exceptions.NotFound: When the requested client can't be found
"""
if blockchain == "bitcoin":
return btc.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/bitcoin/{name}"))
elif blockchain == "ethereum":
return eth.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/ethereum/{name}"))
elif blockchain == "binance":
return bnb.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/binance/{name}"))
else:
raise exceptions.NotFound(f"Blockchain network {blockchain} is not supported")
def list_interchain_clients(blockchain: str) -> List["model.InterchainModel"]:
"""Get all of the interchain clients for a specific blockchain type
Args:
blockchain: The blockchain of the desired clients to get
Returns:
List of instantiated interchain clients for the specified blockchain
"""
from_rest_function: Any = None
if blockchain == "bitcoin":
from_rest_function = btc.new_from_at_rest
elif blockchain == "ethereum":
from_rest_function = eth.new_from_at_rest
elif blockchain == "binance":
from_rest_function = bnb.new_from_at_rest
else:
raise exceptions.NotFound(f"Blockchain network {blockchain} is not supported")
return [from_rest_function(storage.get_json_from_object(x)) for x in storage.list_objects(f"{FOLDER}/{blockchain}/")]
def set_default_interchain_client(blockchain: str, name: str) -> "model.InterchainModel":
"""Set the default interchain model for this chain
Args:
blockchain: the blockchain of the desired client (i.e. bitcoin, ethereum, etc)
name: the name (id) of the network to set as default (user defined on the creation of the interchain)
Returns:
The client for the interchain which was set as default
Raises:
exceptions.NotFound: When trying to set a default to an interchain that doesn't exist on this chain
"""
# Make sure the specified interchain exists before setting as default
client = get_interchain_client(blockchain, name)
storage.put_object_as_json(f"{FOLDER}/default", {"version": "1", "blockchain": blockchain, "name": name})
return client
def get_default_interchain_client() -> "model.InterchainModel":
"""Get the interchain model which has been set as the default for this chain
Returns:
Instantiated InterchainModel
Raises:
exceptions.NotFound: When default has not been set, or set default cannot be found
NotImplementedError: WHen the saved default is a bad version
"""
default_dto = storage.get_json_from_object(f"{FOLDER}/default")
if default_dto.get("version") == "1":
return get_interchain_client(default_dto.get("blockchain"), default_dto.get("name"))
else:
raise NotImplementedError(f"Default dto error. Version {default_dto.get('version')} not supported")
def delete_interchain_client(blockchain: str, name: str) -> None:
"""Delete an interchain client from this chain
Args:
blockchain: the blockchain of the desired client (i.e. bitcoin, ethereum, etc)
name: the name (id) of the network to delete (user defined on the creation of the interchain)
"""
storage.delete(f"{FOLDER}/{blockchain}/{name}")
|
the-stack_0_9520 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for openvswitch rpc
"""
import stubout
from neutron.agent import rpc as agent_rpc
from neutron.common import topics
from neutron.openstack.common import context
from neutron.openstack.common import rpc
from neutron.plugins.openvswitch.common import constants
from neutron.plugins.openvswitch import ovs_neutron_plugin as povs
from neutron.tests import base
class rpcApiTestCase(base.BaseTestCase):
def _test_ovs_api(self, rpcapi, topic, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
expected_retval = 'foo' if method == 'call' else None
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
if rpc_method == 'cast' and method == 'run_instance':
kwargs['call'] = False
self.fake_args = None
self.fake_kwargs = None
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(rpc, rpc_method, _fake_rpc_method)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [ctxt, topic, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
def test_delete_network(self):
rpcapi = povs.AgentNotifierApi(topics.AGENT)
self._test_ovs_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.NETWORK,
topics.DELETE),
'network_delete', rpc_method='fanout_cast',
network_id='fake_request_spec')
def test_port_update(self):
rpcapi = povs.AgentNotifierApi(topics.AGENT)
self._test_ovs_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.PORT,
topics.UPDATE),
'port_update', rpc_method='fanout_cast',
port='fake_port',
network_type='fake_network_type',
segmentation_id='fake_segmentation_id',
physical_network='fake_physical_network')
def test_tunnel_update(self):
rpcapi = povs.AgentNotifierApi(topics.AGENT)
self._test_ovs_api(rpcapi,
topics.get_topic_name(topics.AGENT,
constants.TUNNEL,
topics.UPDATE),
'tunnel_update', rpc_method='fanout_cast',
tunnel_ip='fake_ip', tunnel_id='fake_id',
tunnel_type=None)
def test_device_details(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_ovs_api(rpcapi, topics.PLUGIN,
'get_device_details', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id')
def test_update_device_down(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_ovs_api(rpcapi, topics.PLUGIN,
'update_device_down', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
def test_tunnel_sync(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_ovs_api(rpcapi, topics.PLUGIN,
'tunnel_sync', rpc_method='call',
tunnel_ip='fake_tunnel_ip',
tunnel_type=None)
def test_update_device_up(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_ovs_api(rpcapi, topics.PLUGIN,
'update_device_up', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
|
the-stack_0_9521 | """Tests for HTMLParser.py."""
import html.parser
import pprint
import unittest
from test import support
class EventCollector(html.parser.HTMLParser):
def __init__(self, *args, **kw):
self.events = []
self.append = self.events.append
html.parser.HTMLParser.__init__(self, *args, **kw)
def get_events(self):
# Normalize the list of events so that buffer artefacts don't
# separate runs of contiguous characters.
L = []
prevtype = None
for event in self.events:
type = event[0]
if type == prevtype == "data":
L[-1] = ("data", L[-1][1] + event[1])
else:
L.append(event)
prevtype = type
self.events = L
return L
# structure markup
def handle_starttag(self, tag, attrs):
self.append(("starttag", tag, attrs))
def handle_startendtag(self, tag, attrs):
self.append(("startendtag", tag, attrs))
def handle_endtag(self, tag):
self.append(("endtag", tag))
# all other markup
def handle_comment(self, data):
self.append(("comment", data))
def handle_charref(self, data):
self.append(("charref", data))
def handle_data(self, data):
self.append(("data", data))
def handle_decl(self, data):
self.append(("decl", data))
def handle_entityref(self, data):
self.append(("entityref", data))
def handle_pi(self, data):
self.append(("pi", data))
def unknown_decl(self, decl):
self.append(("unknown decl", decl))
class EventCollectorExtra(EventCollector):
def handle_starttag(self, tag, attrs):
EventCollector.handle_starttag(self, tag, attrs)
self.append(("starttag_text", self.get_starttag_text()))
class EventCollectorCharrefs(EventCollector):
def get_events(self):
return self.events
def handle_charref(self, data):
self.fail('This should never be called with convert_charrefs=True')
def handle_entityref(self, data):
self.fail('This should never be called with convert_charrefs=True')
class TestCaseBase(unittest.TestCase):
def get_collector(self):
raise NotImplementedError
def _run_check(self, source, expected_events, collector=None):
if collector is None:
collector = self.get_collector()
parser = collector
for s in source:
parser.feed(s)
parser.close()
events = parser.get_events()
if events != expected_events:
self.fail("received events did not match expected events" +
"\nSource:\n" + repr(source) +
"\nExpected:\n" + pprint.pformat(expected_events) +
"\nReceived:\n" + pprint.pformat(events))
def _run_check_extra(self, source, events):
self._run_check(source, events,
EventCollectorExtra(convert_charrefs=False))
def _parse_error(self, source):
def parse(source=source):
parser = self.get_collector()
parser.feed(source)
parser.close()
with self.assertRaises(html.parser.HTMLParseError):
with self.assertWarns(DeprecationWarning):
parse()
class HTMLParserStrictTestCase(TestCaseBase):
def get_collector(self):
with support.check_warnings(("", DeprecationWarning), quite=False):
return EventCollector(strict=True, convert_charrefs=False)
def test_processing_instruction_only(self):
self._run_check("<?processing instruction>", [
("pi", "processing instruction"),
])
self._run_check("<?processing instruction ?>", [
("pi", "processing instruction ?"),
])
def test_simple_html(self):
self._run_check("""
<!DOCTYPE html PUBLIC 'foo'>
<HTML>&entity; 
<!--comment1a
-></foo><bar><<?pi?></foo<bar
comment1b-->
<Img sRc='Bar' isMAP>sample
text
“
<!--comment2a-- --comment2b-->
</Html>
""", [
("data", "\n"),
("decl", "DOCTYPE html PUBLIC 'foo'"),
("data", "\n"),
("starttag", "html", []),
("entityref", "entity"),
("charref", "32"),
("data", "\n"),
("comment", "comment1a\n-></foo><bar><<?pi?></foo<bar\ncomment1b"),
("data", "\n"),
("starttag", "img", [("src", "Bar"), ("ismap", None)]),
("data", "sample\ntext\n"),
("charref", "x201C"),
("data", "\n"),
("comment", "comment2a-- --comment2b"),
("data", "\n"),
("endtag", "html"),
("data", "\n"),
])
def test_malformatted_charref(self):
self._run_check("<p>&#bad;</p>", [
("starttag", "p", []),
("data", "&#bad;"),
("endtag", "p"),
])
def test_unclosed_entityref(self):
self._run_check("&entityref foo", [
("entityref", "entityref"),
("data", " foo"),
])
def test_bad_nesting(self):
# Strangely, this *is* supposed to test that overlapping
# elements are allowed. HTMLParser is more geared toward
# lexing the input that parsing the structure.
self._run_check("<a><b></a></b>", [
("starttag", "a", []),
("starttag", "b", []),
("endtag", "a"),
("endtag", "b"),
])
def test_bare_ampersands(self):
self._run_check("this text & contains & ampersands &", [
("data", "this text & contains & ampersands &"),
])
def test_bare_pointy_brackets(self):
self._run_check("this < text > contains < bare>pointy< brackets", [
("data", "this < text > contains < bare>pointy< brackets"),
])
def test_illegal_declarations(self):
self._parse_error('<!spacer type="block" height="25">')
def test_starttag_end_boundary(self):
self._run_check("""<a b='<'>""", [("starttag", "a", [("b", "<")])])
self._run_check("""<a b='>'>""", [("starttag", "a", [("b", ">")])])
def test_buffer_artefacts(self):
output = [("starttag", "a", [("b", "<")])]
self._run_check(["<a b='<'>"], output)
self._run_check(["<a ", "b='<'>"], output)
self._run_check(["<a b", "='<'>"], output)
self._run_check(["<a b=", "'<'>"], output)
self._run_check(["<a b='<", "'>"], output)
self._run_check(["<a b='<'", ">"], output)
output = [("starttag", "a", [("b", ">")])]
self._run_check(["<a b='>'>"], output)
self._run_check(["<a ", "b='>'>"], output)
self._run_check(["<a b", "='>'>"], output)
self._run_check(["<a b=", "'>'>"], output)
self._run_check(["<a b='>", "'>"], output)
self._run_check(["<a b='>'", ">"], output)
output = [("comment", "abc")]
self._run_check(["", "<!--abc-->"], output)
self._run_check(["<", "!--abc-->"], output)
self._run_check(["<!", "--abc-->"], output)
self._run_check(["<!-", "-abc-->"], output)
self._run_check(["<!--", "abc-->"], output)
self._run_check(["<!--a", "bc-->"], output)
self._run_check(["<!--ab", "c-->"], output)
self._run_check(["<!--abc", "-->"], output)
self._run_check(["<!--abc-", "->"], output)
self._run_check(["<!--abc--", ">"], output)
self._run_check(["<!--abc-->", ""], output)
def test_starttag_junk_chars(self):
self._parse_error("</>")
self._parse_error("</$>")
self._parse_error("</")
self._parse_error("</a")
self._parse_error("<a<a>")
self._parse_error("</a<a>")
self._parse_error("<!")
self._parse_error("<a")
self._parse_error("<a foo='bar'")
self._parse_error("<a foo='bar")
self._parse_error("<a foo='>'")
self._parse_error("<a foo='>")
self._parse_error("<a$>")
self._parse_error("<a$b>")
self._parse_error("<a$b/>")
self._parse_error("<a$b >")
self._parse_error("<a$b />")
def test_valid_doctypes(self):
# from http://www.w3.org/QA/2002/04/valid-dtd-list.html
dtds = ['HTML', # HTML5 doctype
('HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd"'),
('HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" '
'"http://www.w3.org/TR/html4/loose.dtd"'),
('html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"'),
('html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd"'),
('math PUBLIC "-//W3C//DTD MathML 2.0//EN" '
'"http://www.w3.org/Math/DTD/mathml2/mathml2.dtd"'),
('html PUBLIC "-//W3C//DTD '
'XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" '
'"http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"'),
('svg PUBLIC "-//W3C//DTD SVG 1.1//EN" '
'"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"'),
'html PUBLIC "-//IETF//DTD HTML 2.0//EN"',
'html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"']
for dtd in dtds:
self._run_check("<!DOCTYPE %s>" % dtd,
[('decl', 'DOCTYPE ' + dtd)])
def test_declaration_junk_chars(self):
self._parse_error("<!DOCTYPE foo $ >")
def test_startendtag(self):
self._run_check("<p/>", [
("startendtag", "p", []),
])
self._run_check("<p></p>", [
("starttag", "p", []),
("endtag", "p"),
])
self._run_check("<p><img src='foo' /></p>", [
("starttag", "p", []),
("startendtag", "img", [("src", "foo")]),
("endtag", "p"),
])
def test_get_starttag_text(self):
s = """<foo:bar \n one="1"\ttwo=2 >"""
self._run_check_extra(s, [
("starttag", "foo:bar", [("one", "1"), ("two", "2")]),
("starttag_text", s)])
def test_cdata_content(self):
contents = [
'<!-- not a comment --> ¬-an-entity-ref;',
"<not a='start tag'>",
'<a href="" /> <p> <span></span>',
'foo = "</scr" + "ipt>";',
'foo = "</SCRIPT" + ">";',
'foo = <\n/script> ',
'<!-- document.write("</scr" + "ipt>"); -->',
('\n//<![CDATA[\n'
'document.write(\'<s\'+\'cript type="text/javascript" '
'src="http://www.example.org/r=\'+new '
'Date().getTime()+\'"><\\/s\'+\'cript>\');\n//]]>'),
'\n<!-- //\nvar foo = 3.14;\n// -->\n',
'foo = "</sty" + "le>";',
'<!-- \u2603 -->',
# these two should be invalid according to the HTML 5 spec,
# section 8.1.2.2
#'foo = </\nscript>',
#'foo = </ script>',
]
elements = ['script', 'style', 'SCRIPT', 'STYLE', 'Script', 'Style']
for content in contents:
for element in elements:
element_lower = element.lower()
s = '<{element}>{content}</{element}>'.format(element=element,
content=content)
self._run_check(s, [("starttag", element_lower, []),
("data", content),
("endtag", element_lower)])
def test_cdata_with_closing_tags(self):
# see issue #13358
# make sure that HTMLParser calls handle_data only once for each CDATA.
# The normal event collector normalizes the events in get_events,
# so we override it to return the original list of events.
class Collector(EventCollector):
def get_events(self):
return self.events
content = """<!-- not a comment --> ¬-an-entity-ref;
<a href="" /> </p><p> <span></span></style>
'</script' + '>'"""
for element in [' script', 'script ', ' script ',
'\nscript', 'script\n', '\nscript\n']:
element_lower = element.lower().strip()
s = '<script>{content}</{element}>'.format(element=element,
content=content)
self._run_check(s, [("starttag", element_lower, []),
("data", content),
("endtag", element_lower)],
collector=Collector(convert_charrefs=False))
def test_comments(self):
html = ("<!-- I'm a valid comment -->"
'<!--me too!-->'
'<!------>'
'<!---->'
'<!----I have many hyphens---->'
'<!-- I have a > in the middle -->'
'<!-- and I have -- in the middle! -->')
expected = [('comment', " I'm a valid comment "),
('comment', 'me too!'),
('comment', '--'),
('comment', ''),
('comment', '--I have many hyphens--'),
('comment', ' I have a > in the middle '),
('comment', ' and I have -- in the middle! ')]
self._run_check(html, expected)
def test_condcoms(self):
html = ('<!--[if IE & !(lte IE 8)]>aren\'t<![endif]-->'
'<!--[if IE 8]>condcoms<![endif]-->'
'<!--[if lte IE 7]>pretty?<![endif]-->')
expected = [('comment', "[if IE & !(lte IE 8)]>aren't<![endif]"),
('comment', '[if IE 8]>condcoms<![endif]'),
('comment', '[if lte IE 7]>pretty?<![endif]')]
self._run_check(html, expected)
def test_convert_charrefs(self):
collector = lambda: EventCollectorCharrefs(convert_charrefs=True)
self.assertTrue(collector().convert_charrefs)
charrefs = ['"', '"', '"', '"', '"', '"']
# check charrefs in the middle of the text/attributes
expected = [('starttag', 'a', [('href', 'foo"zar')]),
('data', 'a"z'), ('endtag', 'a')]
for charref in charrefs:
self._run_check('<a href="foo{0}zar">a{0}z</a>'.format(charref),
expected, collector=collector())
# check charrefs at the beginning/end of the text/attributes
expected = [('data', '"'),
('starttag', 'a', [('x', '"'), ('y', '"X'), ('z', 'X"')]),
('data', '"'), ('endtag', 'a'), ('data', '"')]
for charref in charrefs:
self._run_check('{0}<a x="{0}" y="{0}X" z="X{0}">'
'{0}</a>{0}'.format(charref),
expected, collector=collector())
# check charrefs in <script>/<style> elements
for charref in charrefs:
text = 'X'.join([charref]*3)
expected = [('data', '"'),
('starttag', 'script', []), ('data', text),
('endtag', 'script'), ('data', '"'),
('starttag', 'style', []), ('data', text),
('endtag', 'style'), ('data', '"')]
self._run_check('{1}<script>{0}</script>{1}'
'<style>{0}</style>{1}'.format(text, charref),
expected, collector=collector())
# check truncated charrefs at the end of the file
html = '&quo &# &#x'
for x in range(1, len(html)):
self._run_check(html[:x], [('data', html[:x])],
collector=collector())
# check a string with no charrefs
self._run_check('no charrefs here', [('data', 'no charrefs here')],
collector=collector())
class HTMLParserTolerantTestCase(HTMLParserStrictTestCase):
def get_collector(self):
return EventCollector(convert_charrefs=False)
def test_deprecation_warnings(self):
with self.assertWarns(DeprecationWarning):
EventCollector() # convert_charrefs not passed explicitly
with self.assertWarns(DeprecationWarning):
EventCollector(strict=True)
with self.assertWarns(DeprecationWarning):
EventCollector(strict=False)
with self.assertRaises(html.parser.HTMLParseError):
with self.assertWarns(DeprecationWarning):
EventCollector().error('test')
def test_tolerant_parsing(self):
self._run_check('<html <html>te>>xt&a<<bc</a></html>\n'
'<img src="URL><//img></html</html>', [
('starttag', 'html', [('<html', None)]),
('data', 'te>>xt'),
('entityref', 'a'),
('data', '<'),
('starttag', 'bc<', [('a', None)]),
('endtag', 'html'),
('data', '\n<img src="URL>'),
('comment', '/img'),
('endtag', 'html<')])
def test_starttag_junk_chars(self):
self._run_check("</>", [])
self._run_check("</$>", [('comment', '$')])
self._run_check("</", [('data', '</')])
self._run_check("</a", [('data', '</a')])
self._run_check("<a<a>", [('starttag', 'a<a', [])])
self._run_check("</a<a>", [('endtag', 'a<a')])
self._run_check("<!", [('data', '<!')])
self._run_check("<a", [('data', '<a')])
self._run_check("<a foo='bar'", [('data', "<a foo='bar'")])
self._run_check("<a foo='bar", [('data', "<a foo='bar")])
self._run_check("<a foo='>'", [('data', "<a foo='>'")])
self._run_check("<a foo='>", [('data', "<a foo='>")])
self._run_check("<a$>", [('starttag', 'a$', [])])
self._run_check("<a$b>", [('starttag', 'a$b', [])])
self._run_check("<a$b/>", [('startendtag', 'a$b', [])])
self._run_check("<a$b >", [('starttag', 'a$b', [])])
self._run_check("<a$b />", [('startendtag', 'a$b', [])])
def test_slashes_in_starttag(self):
self._run_check('<a foo="var"/>', [('startendtag', 'a', [('foo', 'var')])])
html = ('<img width=902 height=250px '
'src="/sites/default/files/images/homepage/foo.jpg" '
'/*what am I doing here*/ />')
expected = [(
'startendtag', 'img',
[('width', '902'), ('height', '250px'),
('src', '/sites/default/files/images/homepage/foo.jpg'),
('*what', None), ('am', None), ('i', None),
('doing', None), ('here*', None)]
)]
self._run_check(html, expected)
html = ('<a / /foo/ / /=/ / /bar/ / />'
'<a / /foo/ / /=/ / /bar/ / >')
expected = [
('startendtag', 'a', [('foo', None), ('=', None), ('bar', None)]),
('starttag', 'a', [('foo', None), ('=', None), ('bar', None)])
]
self._run_check(html, expected)
#see issue #14538
html = ('<meta><meta / ><meta // ><meta / / >'
'<meta/><meta /><meta //><meta//>')
expected = [
('starttag', 'meta', []), ('starttag', 'meta', []),
('starttag', 'meta', []), ('starttag', 'meta', []),
('startendtag', 'meta', []), ('startendtag', 'meta', []),
('startendtag', 'meta', []), ('startendtag', 'meta', []),
]
self._run_check(html, expected)
def test_declaration_junk_chars(self):
self._run_check("<!DOCTYPE foo $ >", [('decl', 'DOCTYPE foo $ ')])
def test_illegal_declarations(self):
self._run_check('<!spacer type="block" height="25">',
[('comment', 'spacer type="block" height="25"')])
def test_with_unquoted_attributes(self):
# see #12008
html = ("<html><body bgcolor=d0ca90 text='181008'>"
"<table cellspacing=0 cellpadding=1 width=100% ><tr>"
"<td align=left><font size=-1>"
"- <a href=/rabota/><span class=en> software-and-i</span></a>"
"- <a href='/1/'><span class=en> library</span></a></table>")
expected = [
('starttag', 'html', []),
('starttag', 'body', [('bgcolor', 'd0ca90'), ('text', '181008')]),
('starttag', 'table',
[('cellspacing', '0'), ('cellpadding', '1'), ('width', '100%')]),
('starttag', 'tr', []),
('starttag', 'td', [('align', 'left')]),
('starttag', 'font', [('size', '-1')]),
('data', '- '), ('starttag', 'a', [('href', '/rabota/')]),
('starttag', 'span', [('class', 'en')]), ('data', ' software-and-i'),
('endtag', 'span'), ('endtag', 'a'),
('data', '- '), ('starttag', 'a', [('href', '/1/')]),
('starttag', 'span', [('class', 'en')]), ('data', ' library'),
('endtag', 'span'), ('endtag', 'a'), ('endtag', 'table')
]
self._run_check(html, expected)
def test_comma_between_attributes(self):
self._run_check('<form action="/xxx.php?a=1&b=2&", '
'method="post">', [
('starttag', 'form',
[('action', '/xxx.php?a=1&b=2&'),
(',', None), ('method', 'post')])])
def test_weird_chars_in_unquoted_attribute_values(self):
self._run_check('<form action=bogus|&#()value>', [
('starttag', 'form',
[('action', 'bogus|&#()value')])])
def test_invalid_end_tags(self):
# A collection of broken end tags. <br> is used as separator.
# see http://www.w3.org/TR/html5/tokenization.html#end-tag-open-state
# and #13993
html = ('<br></label</p><br></div end tmAd-leaderBoard><br></<h4><br>'
'</li class="unit"><br></li\r\n\t\t\t\t\t\t</ul><br></><br>')
expected = [('starttag', 'br', []),
# < is part of the name, / is discarded, p is an attribute
('endtag', 'label<'),
('starttag', 'br', []),
# text and attributes are discarded
('endtag', 'div'),
('starttag', 'br', []),
# comment because the first char after </ is not a-zA-Z
('comment', '<h4'),
('starttag', 'br', []),
# attributes are discarded
('endtag', 'li'),
('starttag', 'br', []),
# everything till ul (included) is discarded
('endtag', 'li'),
('starttag', 'br', []),
# </> is ignored
('starttag', 'br', [])]
self._run_check(html, expected)
def test_broken_invalid_end_tag(self):
# This is technically wrong (the "> shouldn't be included in the 'data')
# but is probably not worth fixing it (in addition to all the cases of
# the previous test, it would require a full attribute parsing).
# see #13993
html = '<b>This</b attr=">"> confuses the parser'
expected = [('starttag', 'b', []),
('data', 'This'),
('endtag', 'b'),
('data', '"> confuses the parser')]
self._run_check(html, expected)
def test_correct_detection_of_start_tags(self):
# see #13273
html = ('<div style="" ><b>The <a href="some_url">rain</a> '
'<br /> in <span>Spain</span></b></div>')
expected = [
('starttag', 'div', [('style', '')]),
('starttag', 'b', []),
('data', 'The '),
('starttag', 'a', [('href', 'some_url')]),
('data', 'rain'),
('endtag', 'a'),
('data', ' '),
('startendtag', 'br', []),
('data', ' in '),
('starttag', 'span', []),
('data', 'Spain'),
('endtag', 'span'),
('endtag', 'b'),
('endtag', 'div')
]
self._run_check(html, expected)
html = '<div style="", foo = "bar" ><b>The <a href="some_url">rain</a>'
expected = [
('starttag', 'div', [('style', ''), (',', None), ('foo', 'bar')]),
('starttag', 'b', []),
('data', 'The '),
('starttag', 'a', [('href', 'some_url')]),
('data', 'rain'),
('endtag', 'a'),
]
self._run_check(html, expected)
def test_EOF_in_charref(self):
# see #17802
# This test checks that the UnboundLocalError reported in the issue
# is not raised, however I'm not sure the returned values are correct.
# Maybe HTMLParser should use self.unescape for these
data = [
('a&', [('data', 'a&')]),
('a&b', [('data', 'ab')]),
('a&b ', [('data', 'a'), ('entityref', 'b'), ('data', ' ')]),
('a&b;', [('data', 'a'), ('entityref', 'b')]),
]
for html, expected in data:
self._run_check(html, expected)
def test_unescape_method(self):
from html import unescape
p = self.get_collector()
with self.assertWarns(DeprecationWarning):
s = '""""""&#bad;'
self.assertEqual(p.unescape(s), unescape(s))
def test_broken_comments(self):
html = ('<! not really a comment >'
'<! not a comment either -->'
'<! -- close enough -->'
'<!><!<-- this was an empty comment>'
'<!!! another bogus comment !!!>')
expected = [
('comment', ' not really a comment '),
('comment', ' not a comment either --'),
('comment', ' -- close enough --'),
('comment', ''),
('comment', '<-- this was an empty comment'),
('comment', '!! another bogus comment !!!'),
]
self._run_check(html, expected)
def test_broken_condcoms(self):
# these condcoms are missing the '--' after '<!' and before the '>'
html = ('<![if !(IE)]>broken condcom<![endif]>'
'<![if ! IE]><link href="favicon.tiff"/><![endif]>'
'<![if !IE 6]><img src="firefox.png" /><![endif]>'
'<![if !ie 6]><b>foo</b><![endif]>'
'<![if (!IE)|(lt IE 9)]><img src="mammoth.bmp" /><![endif]>')
# According to the HTML5 specs sections "8.2.4.44 Bogus comment state"
# and "8.2.4.45 Markup declaration open state", comment tokens should
# be emitted instead of 'unknown decl', but calling unknown_decl
# provides more flexibility.
# See also Lib/_markupbase.py:parse_declaration
expected = [
('unknown decl', 'if !(IE)'),
('data', 'broken condcom'),
('unknown decl', 'endif'),
('unknown decl', 'if ! IE'),
('startendtag', 'link', [('href', 'favicon.tiff')]),
('unknown decl', 'endif'),
('unknown decl', 'if !IE 6'),
('startendtag', 'img', [('src', 'firefox.png')]),
('unknown decl', 'endif'),
('unknown decl', 'if !ie 6'),
('starttag', 'b', []),
('data', 'foo'),
('endtag', 'b'),
('unknown decl', 'endif'),
('unknown decl', 'if (!IE)|(lt IE 9)'),
('startendtag', 'img', [('src', 'mammoth.bmp')]),
('unknown decl', 'endif')
]
self._run_check(html, expected)
class AttributesStrictTestCase(TestCaseBase):
def get_collector(self):
with support.check_warnings(("", DeprecationWarning), quite=False):
return EventCollector(strict=True, convert_charrefs=False)
def test_attr_syntax(self):
output = [
("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", None)])
]
self._run_check("""<a b='v' c="v" d=v e>""", output)
self._run_check("""<a b = 'v' c = "v" d = v e>""", output)
self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
def test_attr_values(self):
self._run_check("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""",
[("starttag", "a", [("b", "xxx\n\txxx"),
("c", "yyy\t\nyyy"),
("d", "\txyz\n")])])
self._run_check("""<a b='' c="">""",
[("starttag", "a", [("b", ""), ("c", "")])])
# Regression test for SF patch #669683.
self._run_check("<e a=rgb(1,2,3)>",
[("starttag", "e", [("a", "rgb(1,2,3)")])])
# Regression test for SF bug #921657.
self._run_check(
"<a href=mailto:[email protected]>",
[("starttag", "a", [("href", "mailto:[email protected]")])])
def test_attr_nonascii(self):
# see issue 7311
self._run_check(
"<img src=/foo/bar.png alt=\u4e2d\u6587>",
[("starttag", "img", [("src", "/foo/bar.png"),
("alt", "\u4e2d\u6587")])])
self._run_check(
"<a title='\u30c6\u30b9\u30c8' href='\u30c6\u30b9\u30c8.html'>",
[("starttag", "a", [("title", "\u30c6\u30b9\u30c8"),
("href", "\u30c6\u30b9\u30c8.html")])])
self._run_check(
'<a title="\u30c6\u30b9\u30c8" href="\u30c6\u30b9\u30c8.html">',
[("starttag", "a", [("title", "\u30c6\u30b9\u30c8"),
("href", "\u30c6\u30b9\u30c8.html")])])
def test_attr_entity_replacement(self):
self._run_check(
"<a b='&><"''>",
[("starttag", "a", [("b", "&><\"'")])])
def test_attr_funky_names(self):
self._run_check(
"<a a.b='v' c:d=v e-f=v>",
[("starttag", "a", [("a.b", "v"), ("c:d", "v"), ("e-f", "v")])])
def test_entityrefs_in_attributes(self):
self._run_check(
"<html foo='€&aa&unsupported;'>",
[("starttag", "html", [("foo", "\u20AC&aa&unsupported;")])])
class AttributesTolerantTestCase(AttributesStrictTestCase):
def get_collector(self):
return EventCollector(convert_charrefs=False)
def test_attr_funky_names2(self):
self._run_check(
"<a $><b $=%><c \=/>",
[("starttag", "a", [("$", None)]),
("starttag", "b", [("$", "%")]),
("starttag", "c", [("\\", "/")])])
def test_entities_in_attribute_value(self):
# see #1200313
for entity in ['&', '&', '&', '&']:
self._run_check('<a href="%s">' % entity,
[("starttag", "a", [("href", "&")])])
self._run_check("<a href='%s'>" % entity,
[("starttag", "a", [("href", "&")])])
self._run_check("<a href=%s>" % entity,
[("starttag", "a", [("href", "&")])])
def test_malformed_attributes(self):
# see #13357
html = (
"<a href=test'style='color:red;bad1'>test - bad1</a>"
"<a href=test'+style='color:red;ba2'>test - bad2</a>"
"<a href=test' style='color:red;bad3'>test - bad3</a>"
"<a href = test' style='color:red;bad4' >test - bad4</a>"
)
expected = [
('starttag', 'a', [('href', "test'style='color:red;bad1'")]),
('data', 'test - bad1'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'+style='color:red;ba2'")]),
('data', 'test - bad2'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'\xa0style='color:red;bad3'")]),
('data', 'test - bad3'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'\xa0style='color:red;bad4'")]),
('data', 'test - bad4'), ('endtag', 'a')
]
self._run_check(html, expected)
def test_malformed_adjacent_attributes(self):
# see #12629
self._run_check('<x><y z=""o"" /></x>',
[('starttag', 'x', []),
('startendtag', 'y', [('z', ''), ('o""', None)]),
('endtag', 'x')])
self._run_check('<x><y z="""" /></x>',
[('starttag', 'x', []),
('startendtag', 'y', [('z', ''), ('""', None)]),
('endtag', 'x')])
# see #755670 for the following 3 tests
def test_adjacent_attributes(self):
self._run_check('<a width="100%"cellspacing=0>',
[("starttag", "a",
[("width", "100%"), ("cellspacing","0")])])
self._run_check('<a id="foo"class="bar">',
[("starttag", "a",
[("id", "foo"), ("class","bar")])])
def test_missing_attribute_value(self):
self._run_check('<a v=>',
[("starttag", "a", [("v", "")])])
def test_javascript_attribute_value(self):
self._run_check("<a href=javascript:popup('/popup/help.html')>",
[("starttag", "a",
[("href", "javascript:popup('/popup/help.html')")])])
def test_end_tag_in_attribute_value(self):
# see #1745761
self._run_check("<a href='http://www.example.org/\">;'>spam</a>",
[("starttag", "a",
[("href", "http://www.example.org/\">;")]),
("data", "spam"), ("endtag", "a")])
if __name__ == "__main__":
unittest.main()
|
the-stack_0_9523 | import os
import unittest
from openeo_pg_parser.translate import translate_process_graph
class GraphTester(unittest.TestCase):
""" Tests all functionalities of the class `Graph`. """
def setUp(self):
""" Setting up variables for one test. """
pg_dirpath = os.path.join(os.path.dirname(__file__), 'process_graphs')
self.max_ndvi_pg_filepath = os.path.join(pg_dirpath, "s2_max_ndvi.json")
def test_sort_process_graph(self):
""" Tests sorting of a process graph. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
assert list(graph.ids) == ["apply_0", "linear_scale_range_1", "load_collection_2", "reduce_bands_3", "red_4",
"nir_5", "ndvi_6", "reduce_time_7", "max_8", "save_9"]
sorted_graph = graph.sort(by='dependency')
assert list(sorted_graph.ids) == ["load_collection_2", "reduce_bands_3", "red_4", "nir_5", "ndvi_6",
"reduce_time_7", "max_8", "apply_0", "linear_scale_range_1", "save_9"]
def test_get_parent_process(self):
""" Tests to retrieve the parent process of an embedded process graph. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
lsr_node = graph['linear_scale_range_1']
apply_node = graph['apply_0']
assert lsr_node.parent_process == apply_node
def test_is_reducer(self):
""" Tests reducer identification. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
apply_node = graph['apply_0']
assert not apply_node.is_reducer
reduce_node = graph['reduce_time_7']
assert reduce_node.is_reducer
def test_get_dimension(self):
""" Tests dimension retrieval. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
apply_node = graph['apply_0']
assert apply_node.dimension is None
reduce_node = graph['reduce_time_7']
assert reduce_node.dimension == 't'
def test_get_node_by_id(self):
""" Tests node access in a graph by node id. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
apply_node = graph['apply_0']
assert apply_node.id == 'apply_0'
def test_get_node_by_name(self):
""" Tests node access in a graph by node name. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
apply_node = graph['apply']
assert apply_node.id == 'apply_0'
def test_has_descendant_process(self):
""" Tests if a node has a descendant process. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
dc_node = graph['load_collection_2']
assert dc_node.has_descendant_process(graph, 'save_result')
def test_to_igraph(self):
""" Tests conversion of internal graph to an iGraph object. """
graph = translate_process_graph(self.max_ndvi_pg_filepath)
graph.to_igraph(edge_name="process")
assert True
if __name__ == '__main__':
unittest.main() |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.