filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_12448 | from lyrebird.mock import context
from flask import Response, stream_with_context
import json
class MockHandler:
"""
根据当前设置数据组的匹配条件,查找对应的mock数据。
如果没有找到匹配的数据则交由下一个处理器处理。
"""
def handle(self, handler_context):
data = context.application.data_manager.router.get_mock_data(handler_context.flow)
if data:
handler_context.response = self.data2response(data)
def data2response(self, data):
resp_info = json.loads(data.response.content)
code = resp_info['code']
headers = resp_info['headers']
headers['lyrebird'] = 'mock'
resp_data = data.response_data.content
if resp_data:
if type(resp_data) == str:
data_len = len(resp_data.encode())
else:
data_len = len(resp_data)
headers['Content-Length'] = data_len
def gen():
yield resp_data
return Response(stream_with_context(gen()), status=code, headers=headers)
|
the-stack_0_12449 | from flask import Blueprint
from flask_admin.base import MenuLink
from flask_admin.consts import ICON_TYPE_IMAGE
from ddui.dash_app import app as dash_view
from airflow.plugins_manager import AirflowPlugin
ml_repo_website = MenuLink(
category='DataDriver',
name='Git repository',
url='https://gitlab.octo.com/dd/ddui.git',
icon_type=ICON_TYPE_IMAGE,
icon_value='brian/git.png'
)
ml_doc = MenuLink(
category='DataDriver',
name='DataDriver API documentation',
url='http://datadriver-doc-ddapi.s3-website-eu-west-1.amazonaws.com/',
icon_type=ICON_TYPE_IMAGE,
icon_value='brian/sigle.png'
)
ml_version = MenuLink(
category='DataDriver',
name='Version',
url='/dash/version',
icon_type=ICON_TYPE_IMAGE,
icon_value='brian/sigle.png'
)
brian_bp = Blueprint(
"brian_web", __name__,
template_folder='templates',
static_folder='static/brian',
static_url_path='/static/brian',
)
class DataDriverUIPlugin(AirflowPlugin):
name = 'DataDriver UI Plugin'
operators = []
hooks = []
executors = []
macros = []
admin_views = [dash_view]
flask_blueprints = [brian_bp]
menu_links = [ml_doc, ml_repo_website, ml_version] |
the-stack_0_12451 | from threading import Thread, Event
from queue import Queue
import time
import numpy as np
import traceback
# This code originally used Process not Thread.
# Process is much slower to start (Process.start() is ~100 ms, Thread.start() is a few ms)
# The process-safe versions of Queue and Event are also significantly slower.
# On the other hand, CPU-bound Python threads can't run in parallel ("global interpreter lock").
# The overall problem is not CPU-bound - we should always be limited by tProc execution.
# In the worst case where the tProc is running fast, we should actually be waiting for IO a lot (due to the DMA).
# So we think it's safe to use threads.
# However, this is a complicated problem and we may ultimately need to mess around with sys.setswitchinterval() or go back to Process.
# To use Process instead of Thread, use the following import and change WORKERTYPE.
#from multiprocessing import Process, Queue, Event
class DataStreamer():
"""
Uses a separate thread to read data from the average buffers.
The class methods define the readout loop and initialization of the worker thread.
The QickSoc methods start_readout() and poll_data() are the external interface to the streamer.
We don't lock the QickSoc or the IPs. The user is responsible for not disrupting a readout in progress.
:param soc: The QickSoc object.
:type soc: QickSoc
"""
#WORKERTYPE = Process
WORKERTYPE = Thread
def __init__(self, soc):
self.soc = soc
self.start_worker()
def start_worker(self):
# Initialize flags and queues.
# Passes run commands from the main thread to the worker thread.
self.job_queue = Queue()
# Passes data from the worker thread to the main thread.
self.data_queue = Queue()
# Passes exceptions from the worker thread to the main thread.
self.error_queue = Queue()
# The main thread can use this flag to tell the worker thread to stop.
# The main thread clears the flag when starting readout.
self.stop_flag = Event()
# The worker thread uses this to tell the main thread when it's done.
# The main thread clears the flag when starting readout.
self.done_flag = Event()
self.done_flag.set()
# Process object for the streaming readout.
# daemon=True means the readout thread will be killed if the parent is killed
self.readout_worker = self.WORKERTYPE(target=self._run_readout, daemon=True)
self.readout_worker.start()
def stop_readout(self):
"""
Signal the readout loop to break.
"""
self.stop_flag.set()
def readout_running(self):
"""
Test if the readout loop is running.
:return: readout thread status
:rtype: bool
"""
return not self.done_flag.is_set()
def data_available(self):
"""
Test if data is available in the queue.
:return: data queue status
:rtype: bool
"""
return not self.data_queue.empty()
def _run_readout(self):
"""
Worker thread for the streaming readout
:param total_count: Number of data points expected
:type addr: int
:param counter_addr: Data memory address for the loop counter
:type counter_addr: int
:param ch_list: List of readout channels
:type addr: list
:param reads_per_count: Number of data points to expect per counter increment
:type reads_per_count: int
"""
while True:
try:
# wait for a job
total_count, counter_addr, ch_list, reads_per_count = self.job_queue.get(block=True)
#print("streamer loop: start", total_count)
count = 0
last_count = 0
# how many measurements to transfer at a time
stride = int(0.1 * self.soc.get_avg_max_length(0))
# bigger stride is more efficient, but the transfer size must never exceed AVG_MAX_LENGTH, so the stride should be set with some safety margin
# make sure count variable is reset to 0 before starting processor
self.soc.tproc.single_write(addr=counter_addr, data=0)
stats = []
t_start = time.time()
# if the tproc is configured for internal start, this will start the program
# for external start, the program will not start until a start pulse is received
self.soc.tproc.start()
# Keep streaming data until you get all of it
while last_count < total_count:
if self.stop_flag.is_set():
print("streamer loop: got stop flag")
break
count = self.soc.tproc.single_read(
addr=counter_addr)*reads_per_count
# wait until either you've gotten a full stride of measurements or you've finished (so you don't go crazy trying to download every measurement)
if count >= min(last_count+stride, total_count):
addr = last_count % self.soc.get_avg_max_length(0)
length = count-last_count
if length >= self.soc.get_avg_max_length(0):
raise RuntimeError("Overflowed the averages buffer (%d unread samples >= buffer size %d)."
% (length, self.soc.get_avg_max_length(0)) +
"\nYou need to slow down the tProc by increasing relax_delay." +
"\nIf the TQDM progress bar is enabled, disabling it may help.")
# transfers must be of even length; trim the length (instead of padding it)
# don't trim if this is the last read of the run
if count < last_count:
length -= length % 2
# buffer for each channel
d_buf = np.zeros((len(ch_list), 2, length))
# for each adc channel get the single shot data and add it to the buffer
for iCh, ch in enumerate(ch_list):
data = self.soc.get_accumulated(
ch=ch, address=addr, length=length)
d_buf[iCh] = data
last_count += length
stats = (time.time()-t_start, count, addr, length)
self.data_queue.put((length, (d_buf, stats)))
#if last_count==total_count: print("streamer loop: normal completion")
except Exception as e:
print("streamer loop: got exception")
traceback.print_exc()
# pass the exception to the main thread
self.error_queue.put(e)
# put dummy data in the data queue, to trigger a poll_data read
self.data_queue.put((0, (None, None)))
finally:
# we should set the done flag regardless of whether we completed readout, used the stop flag, or errored out
self.done_flag.set()
|
the-stack_0_12452 | from django import template
register = template.Library()
@register.filter
def getcount(item, choice):
"""returns the number of times choice has been selected for item"""
return item.userchoices.filter(choice=choice).count()
@register.filter
def getuniqueitems(userchoices):
"""return a list of unique items given a bunch of userchoices"""
items = []
for userchoice in userchoices:
if userchoice.item not in items:
items.append(userchoice.item)
return items
@register.filter
def getzerochoiceitems(items, choice):
"""return a list of unique items where the given choice has been chosen zero times"""
returnitems = []
for item in items:
if item.userchoices.filter(choice=choice).count()==0:
if item not in returnitems:
returnitems.append(item)
return returnitems
|
the-stack_0_12454 | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Partial implementations for resource processing.
Resources are procesed according to type, by a series of methods that deal with the specifics for
each resource type. Each of this methods returns a struct, which always have a `files` field
containing resource tuples as described in processor.bzl. Optionally, the structs can also have an
`infoplists` field containing a list of plists that should be merged into the root Info.plist.
"""
load(
"@build_bazel_rules_apple//apple/bundling:file_actions.bzl",
"file_actions",
)
load(
"@build_bazel_rules_apple//apple/internal/partials/support:resources_support.bzl",
"resources_support",
)
load(
"@build_bazel_rules_apple//apple/internal:intermediates.bzl",
"intermediates",
)
load(
"@build_bazel_rules_apple//apple/internal:outputs.bzl",
"outputs",
)
load(
"@build_bazel_rules_apple//apple/internal:processor.bzl",
"processor",
)
load(
"@build_bazel_rules_apple//apple/internal:resource_actions.bzl",
"resource_actions",
)
load(
"@build_bazel_rules_apple//apple/internal:resources.bzl",
"NewAppleResourceInfo",
"resources",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleBundleInfo",
)
load(
"@bazel_skylib//lib:new_sets.bzl",
"sets",
)
load(
"@bazel_skylib//lib:partial.bzl",
"partial",
)
def _merge_root_infoplists(ctx, infoplists, out_infoplist, **kwargs):
"""Registers the root Info.plist generation action.
Args:
ctx: The target's rule context.
infoplists: List of plists that should be merged into the root Info.plist.
out_infoplist: Reference to the output Info plist.
**kwargs: Extra parameters forwarded into the merge_root_infoplists action.
Returns:
A list of tuples as described in processor.bzl with the Info.plist file
reference and the PkgInfo file if required.
"""
# TODO(b/73349137): Remove this symlink. It's only used so that the file has the proper name
# when bundled.
plist_symlink = intermediates.file(
ctx.actions,
ctx.label.name,
"Info.plist",
)
files = [plist_symlink]
file_actions.symlink(ctx, out_infoplist, plist_symlink)
out_pkginfo = None
if ctx.attr._needs_pkginfo:
out_pkginfo = intermediates.file(
ctx.actions,
ctx.label.name,
"PkgInfo",
)
files.append(out_pkginfo)
resource_actions.merge_root_infoplists(
ctx,
infoplists,
out_infoplist,
out_pkginfo,
**kwargs
)
return [(processor.location.content, None, depset(direct = files))]
def _deduplicate(resources_provider, avoid_provider, field):
"""Deduplicates and returns resources between 2 providers for a given field.
Deduplication happens by comparing the target path of a file and the files
themselves. If there are 2 resources with the same target path but different
contents, the files will not be deduplicated.
This approach is naïve in the sense that it deduplicates resources too
aggressively. We also need to compare the target that references the
resources so that they are not deduplicated if they are referenced within
multiple binary-containing bundles.
Args:
resources_provider: The provider with the resources to be bundled.
avoid_provider: The provider with the resources to avoid bundling.
field: The field to deduplicate resources on.
Returns:
A list of tuples with the resources present in avoid_providers removed from
resources_providers.
"""
# Build a dictionary with the file paths under each key for the avoided resources.
avoid_dict = {}
if avoid_provider and hasattr(avoid_provider, field):
for parent_dir, swift_module, files in getattr(avoid_provider, field):
key = "%s_%s" % (parent_dir or "root", swift_module or "root")
avoid_dict[key] = {x.short_path: None for x in files.to_list()}
# Get the resources to keep, compare them to the avoid_dict under the same
# key, and remove the duplicated file references. Then recreate the original
# tuple with only the remaining files, if any.
deduped_tuples = []
for parent_dir, swift_module, files in getattr(resources_provider, field):
key = "%s_%s" % (parent_dir or "root", swift_module or "root")
# Dictionary used as a set to mark files as processed by short_path to deduplicate generated
# files that may appear more than once if multiple architectures are being built.
multi_architecture_deduplication_set = {}
deduped_files = depset([])
for to_bundle_file in files.to_list():
short_path = to_bundle_file.short_path
if short_path in multi_architecture_deduplication_set:
continue
multi_architecture_deduplication_set[short_path] = None
if key in avoid_dict and short_path in avoid_dict[key]:
# If the resource file is present in the provider of resources to avoid, we compare
# the owners of the resource through the owners dictionaries of the providers. If
# there are owners present in resources_provider which are not present in
# avoid_provider, it means that there is at least one target that declares usage of
# the resource which is not accounted for in avoid_provider. If this is the case, we
# add the resource to be bundled in the bundle represented by resource_provider.
deduped_owners = [
o
for o in resources_provider.owners[short_path]
if o not in avoid_provider.owners[short_path]
]
if deduped_owners:
deduped_files = depset(
direct = [to_bundle_file],
transitive = [deduped_files],
)
else:
deduped_files = depset(direct = [to_bundle_file], transitive = [deduped_files])
if deduped_files:
deduped_tuples.append((parent_dir, swift_module, deduped_files))
return deduped_tuples
def _locales_requested(ctx):
"""Determines which locales to include when resource actions.
If the user has specified "apple.locales_to_include" we use those. Otherwise we don't filter.
'Base' is included by default to any given list of locales to include.
Args:
ctx: The rule context.
Returns:
A set of locales to include or None if all should be included.
"""
requested_locales = ctx.var.get("apple.locales_to_include")
if requested_locales != None:
return sets.make(["Base"] + [x.strip() for x in requested_locales.split(",")])
else:
return None
def _locale_for_path(resource_path):
"""Returns the detected locale for the given resource path."""
if not resource_path:
return None
loc = resource_path.find(".lproj")
if loc == -1:
return None
# If there was more after '.lproj', then it has to be a directory, otherwise
# it was part of some other extension.
if (loc + 6) > len(resource_path) and resource_path[loc + 6] != "/":
return None
locale_start = resource_path.rfind("/", end = loc)
if locale_start < 0:
return resource_path[0:loc]
return resource_path[locale_start + 1:loc]
def _validate_processed_locales(locales_requested, locales_included, locales_dropped):
"""Prints a warning if locales were dropped and none of the requested ones were included."""
if sets.length(locales_dropped):
# Display a warning if a locale was dropped and there are unfulfilled locale requests; it
# could mean that the user made a mistake in defining the locales they want to keep.
if not sets.is_equal(locales_requested, locales_included):
unused_locales = sets.difference(locales_requested, locales_included)
print("Warning: Did not have resources that matched " + sets.str(unused_locales) +
" in locale filter. Please verify apple.locales_to_include is defined" +
" properly.")
def _resources_partial_impl(
ctx,
bundle_id,
bundle_verification_targets,
plist_attrs,
targets_to_avoid,
top_level_attrs,
version_keys_required):
"""Implementation for the resource processing partial."""
providers = []
if hasattr(ctx.attr, "deps"):
providers.extend([
x[NewAppleResourceInfo]
for x in ctx.attr.deps
if NewAppleResourceInfo in x
])
# TODO(kaipi): Bucket top_level_attrs directly instead of collecting and
# splitting.
files = resources.collect(ctx.attr, res_attrs = top_level_attrs)
if files:
providers.append(resources.bucketize(files, owner = str(ctx.label)))
if plist_attrs:
plists = resources.collect(ctx.attr, res_attrs = plist_attrs)
plist_provider = resources.bucketize_typed(
plists,
owner = str(ctx.label),
bucket_type = "infoplists",
)
providers.append(plist_provider)
avoid_providers = [
x[NewAppleResourceInfo]
for x in targets_to_avoid
if NewAppleResourceInfo in x
]
avoid_provider = None
if avoid_providers:
# Call merge_providers with validate_all_resources_owned set, to ensure that all the
# resources from dependency bundles have an owner.
avoid_provider = resources.merge_providers(
avoid_providers,
validate_all_resources_owned = True,
)
final_provider = resources.merge_providers(providers, default_owner = str(ctx.label))
# Map of resource provider fields to a tuple that contains the method to use to process those
# resources and a boolean indicating whether the Swift module is required for that processing.
provider_field_to_action = {
"asset_catalogs": (resources_support.asset_catalogs, False),
"datamodels": (resources_support.datamodels, True),
"infoplists": (resources_support.infoplists, False),
"plists": (resources_support.plists_and_strings, False),
"pngs": (resources_support.pngs, False),
# TODO(b/113252360): Remove this once we can correctly process Fileset files.
"resource_zips": (resources_support.resource_zips, False),
"storyboards": (resources_support.storyboards, True),
"strings": (resources_support.plists_and_strings, False),
"texture_atlases": (resources_support.texture_atlases, False),
"unprocessed": (resources_support.noop, False),
"xibs": (resources_support.xibs, True),
}
# List containing all the files that the processor will bundle in their
# configured location.
bundle_files = []
fields = resources.populated_resource_fields(final_provider)
infoplists = []
locales_requested = _locales_requested(ctx)
locales_included = sets.make(["Base"])
locales_dropped = sets.make()
for field in fields:
processing_func, requires_swift_module = provider_field_to_action[field]
deduplicated = _deduplicate(final_provider, avoid_provider, field)
for parent_dir, swift_module, files in deduplicated:
if locales_requested:
locale = _locale_for_path(parent_dir)
if sets.contains(locales_requested, locale):
sets.insert(locales_included, locale)
elif locale != None:
sets.insert(locales_dropped, locale)
continue
processing_args = {
"ctx": ctx,
"parent_dir": parent_dir,
"files": files,
}
# Only pass the Swift module name if the type of resource to process
# requires it.
if requires_swift_module:
processing_args["swift_module"] = swift_module
result = processing_func(**processing_args)
bundle_files.extend(result.files)
if hasattr(result, "infoplists"):
infoplists.extend(result.infoplists)
if locales_requested:
_validate_processed_locales(locales_requested, locales_included, locales_dropped)
if bundle_id:
# If no bundle ID was given, do not process the root Info.plist and do not validate embedded
# bundles.
bundle_verification_infoplists = [
b.target[AppleBundleInfo].infoplist
for b in bundle_verification_targets
]
bundle_verification_required_values = [
(
b.target[AppleBundleInfo].infoplist,
[[b.parent_bundle_id_reference, bundle_id]],
)
for b in bundle_verification_targets
if hasattr(b, "parent_bundle_id_reference")
]
out_infoplist = outputs.infoplist(ctx)
bundle_files.extend(
_merge_root_infoplists(
ctx,
infoplists,
out_infoplist,
bundle_id = bundle_id,
child_plists = bundle_verification_infoplists,
child_required_values = bundle_verification_required_values,
version_keys_required = version_keys_required,
),
)
return struct(bundle_files = bundle_files, providers = [final_provider])
def resources_partial(
bundle_id = None,
bundle_verification_targets = [],
plist_attrs = [],
targets_to_avoid = [],
top_level_attrs = [],
version_keys_required = True):
"""Constructor for the resources processing partial.
This partial collects and propagates all resources that should be bundled in the target being
processed.
Args:
bundle_id: Optional bundle ID to use when processing resources. If no bundle ID is given,
the bundle will not contain a root Info.plist and no embedded bundle verification will
occur.
bundle_verification_targets: List of structs that reference embedable targets that need to
be validated. The structs must have a `target` field with the target containing an
Info.plist file that will be validated. The structs may also have a
`parent_bundle_id_reference` field that contains the plist path, in list form, to the
plist entry that must contain this target's bundle ID.
plist_attrs: List of attributes that should be processed as Info plists that should be
merged and processed.
targets_to_avoid: List of targets containing resources that should be deduplicated from the
target being processed.
top_level_attrs: List of attributes containing resources that need to be processed from the
target being processed.
version_keys_required: Whether to validate that the Info.plist version keys are correctly
configured.
Returns:
A partial that returns the bundle location of the resources and the resources provider.
"""
return partial.make(
_resources_partial_impl,
bundle_id = bundle_id,
bundle_verification_targets = bundle_verification_targets,
plist_attrs = plist_attrs,
targets_to_avoid = targets_to_avoid,
top_level_attrs = top_level_attrs,
version_keys_required = version_keys_required,
)
|
the-stack_0_12455 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUNetconfSessionsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUNetconfManager(NURESTObject):
""" Represents a NetconfManager in the VSD
Notes:
Identifies Netconf Manager communicating with VSD, This can only be created by netconfmgr user
"""
__rest_name__ = "netconfmanager"
__resource_name__ = "netconfmanagers"
## Constants
CONST_STATUS_CONNECTED = "CONNECTED"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_STATUS_JMS_DISCONNECTED = "JMS_DISCONNECTED"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_STATUS_DISCONNECTED = "DISCONNECTED"
CONST_STATUS_INIT = "INIT"
def __init__(self, **kwargs):
""" Initializes a NetconfManager instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> netconfmanager = NUNetconfManager(id=u'xxxx-xxx-xxx-xxx', name=u'NetconfManager')
>>> netconfmanager = NUNetconfManager(data=my_dict)
"""
super(NUNetconfManager, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._release = None
self._entity_scope = None
self._assoc_entity_type = None
self._status = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="release", remote_name="release", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="assoc_entity_type", remote_name="assocEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'CONNECTED', u'DISCONNECTED', u'INIT', u'JMS_DISCONNECTED'])
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.netconf_sessions = NUNetconfSessionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
A unique name of the Netconf Manager entity.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
A unique name of the Netconf Manager entity.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def release(self):
""" Get release value.
Notes:
Netconf Manager RPM release version
"""
return self._release
@release.setter
def release(self, value):
""" Set release value.
Notes:
Netconf Manager RPM release version
"""
self._release = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def assoc_entity_type(self):
""" Get assoc_entity_type value.
Notes:
Type of parent entity
This attribute is named `assocEntityType` in VSD API.
"""
return self._assoc_entity_type
@assoc_entity_type.setter
def assoc_entity_type(self, value):
""" Set assoc_entity_type value.
Notes:
Type of parent entity
This attribute is named `assocEntityType` in VSD API.
"""
self._assoc_entity_type = value
@property
def status(self):
""" Get status value.
Notes:
VSD connection status with this Netconf Manager
"""
return self._status
@status.setter
def status(self, value):
""" Set status value.
Notes:
VSD connection status with this Netconf Manager
"""
self._status = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
the-stack_0_12457 |
import numpy as np
import pandas as pd
from abc import abstractmethod
from gym.spaces import Space, Box
from typing import Dict
from trades import Trade, TradeType
from exchanges import InstrumentExchange
from slippage import RandomSlippageModel
class SimulatedExchange(InstrumentExchange):
"""An instrument exchange, in which the price history is based off the supplied data frame and
trade execution is largely decided by the designated slippage model.
If the `data_frame` parameter is not supplied upon initialization, it must be set before
the exchange can be used within a trading environment.
"""
def __init__(self, data_frame: pd.DataFrame = None, **kwargs):
super().__init__(base_instrument=kwargs.get('base_instrument', 'USD'), dtype=kwargs.get('dtype', np.float16))
if data_frame is not None:
self._data_frame = data_frame.astype(self._dtype)
self._commission_percent = kwargs.get('commission_percent', 0.3)
self._base_precision = kwargs.get('base_precision', 2)
self._instrument_precision = kwargs.get('instrument_precision', 8)
self._initial_balance = kwargs.get('initial_balance', 1E4)
self._min_order_amount = kwargs.get('min_order_amount', 1E-3)
self._min_trade_price = kwargs.get('min_trade_price', 1E-6)
self._max_trade_price = kwargs.get('max_trade_price', 1E6)
self._min_trade_amount = kwargs.get('min_trade_amount', 1E-3)
self._max_trade_amount = kwargs.get('max_trade_amount', 1E6)
max_allowed_slippage_percent = kwargs.get('max_allowed_slippage_percent', 1.0)
SlippageModelClass = kwargs.get('slippage_model', RandomSlippageModel)
self._slippage_model = SlippageModelClass(max_allowed_slippage_percent)
@property
def data_frame(self) -> pd.DataFrame:
"""The underlying data model backing the price and volume simulation."""
return self._data_frame
@data_frame.setter
def data_frame(self, data_frame: pd.DataFrame):
self._data_frame = data_frame
@property
def initial_balance(self) -> float:
return self._initial_balance
@property
def balance(self) -> float:
return self._balance
@property
def portfolio(self) -> Dict[str, float]:
return self._portfolio
@property
def trades(self) -> pd.DataFrame:
return self._trades
@property
def performance(self) -> pd.DataFrame:
return self._performance
@property
def observation_space(self) -> Space:
low = np.array([self._min_trade_price, ] * 4 + [self._min_trade_amount, ])
high = np.array([self._max_trade_price, ] * 4 + [self._max_trade_amount, ])
return Box(low=low, high=high, dtype=self._dtype)
@property
def has_next_observation(self) -> bool:
return self._current_step < len(self._data_frame) - 1
def next_observation(self) -> pd.DataFrame:
obs = self._data_frame.iloc[self._current_step]
self._current_step += 1
return obs
def current_price(self, symbol: str) -> float:
if len(self._data_frame) is 0:
self.next_observation()
return float(self._data_frame['close'].values[self._current_step])
def _is_valid_trade(self, trade: Trade) -> bool:
if trade.trade_type is TradeType.MARKET_BUY or trade.trade_type is TradeType.LIMIT_BUY:
return trade.amount >= self._min_order_amount and self._balance >= trade.amount * trade.price
elif trade.trade_type is TradeType.MARKET_SELL or trade.trade_type is TradeType.LIMIT_SELL:
return trade.amount >= self._min_order_amount and self._portfolio.get(trade.symbol, 0) >= trade.amount
return True
def _update_account(self, trade: Trade):
if trade.amount > 0:
self._trades = self._trades.append({
'step': self._current_step,
'symbol': trade.symbol,
'type': trade.trade_type,
'amount': trade.amount,
'price': trade.price
}, ignore_index=True)
if trade.is_buy:
self._balance -= trade.amount * trade.price
self._portfolio[trade.symbol] = self._portfolio.get(trade.symbol, 0) + trade.amount
elif trade.is_sell:
self._balance += trade.amount * trade.price
self._portfolio[trade.symbol] = self._portfolio.get(trade.symbol, 0) - trade.amount
self._portfolio[self._base_instrument] = self._balance
self._performance.append({
'balance': self.balance,
'net_worth': self.net_worth,
}, ignore_index=True)
def execute_trade(self, trade: Trade) -> Trade:
current_price = self.current_price(symbol=trade.symbol)
commission = self._commission_percent / 100
filled_trade = trade.copy()
if filled_trade.is_hold or not self._is_valid_trade(filled_trade):
filled_trade.amount = 0
elif filled_trade.is_buy:
price_adjustment = price_adjustment = (1 + commission)
filled_trade.price = round(current_price * price_adjustment, self._base_precision)
filled_trade.amount = round(
(filled_trade.price * filled_trade.amount) / filled_trade.price, self._instrument_precision)
elif filled_trade.is_sell:
price_adjustment = (1 - commission)
filled_trade.price = round(current_price * price_adjustment, self._base_precision)
filled_trade.amount = round(filled_trade.amount, self._instrument_precision)
filled_trade = self._slippage_model.fill_order(filled_trade, current_price)
self._update_account(filled_trade)
return filled_trade
def reset(self):
self._balance = self._initial_balance
self._portfolio = {self._base_instrument: self._balance}
self._trades = pd.DataFrame([], columns=['step', 'symbol', 'type', 'amount', 'price'])
self._performance = pd.DataFrame([], columns=['balance', 'net_worth'])
self._current_step = 0
|
the-stack_0_12458 | #!/usr/bin/env python3
from omxplayer.player import OMXPlayer
from pathlib import Path
from time import sleep
import logging
logging.basicConfig(level=logging.INFO)
import socket
import pdb
noCommMode = False
if not noCommMode:
HOST = ''
PORT = 55555
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST,PORT))
sock.listen(1)
conn,addr = sock.accept()
vidPath = "raspi.avi"
player_log = logging.getLogger("Player 1")
player = OMXPlayer(vidPath,
dbus_name='org.mpris.MediaPlayer2.omxplayer1')
player.playEvent += lambda _: player_log.info("Play")
player.pauseEvent += lambda _: player_log.info("Pause")
player.stopEvent += lambda _: player_log.info("Stop")
player.set_aspect_mode('stretch')
player.set_video_pos(0, 0, 700, int(512*2.14))
sleep(10)
if noCommMode:
# for debugging
player.set_position(120*60)
# player.play()
# sleep(1)
# player.pause()
sleep(20)
player.set_position(130*60)
# player.play()
sleep(20)
player.set_position(140*60)
sleep(20)
player.stop()
else:
while True:
data = conn.recv(1024)
print('received: '+str(data))
if data=='term':
break
if '_' in data:
cmd = data.split('_')[0]
arg = float(data.split('_')[1])
if cmd=='pause':
player.set_position(arg)
player.play()
sleep(10)
player.pause()
elif cmd=='play':
player.set_position(arg)
conn.close()
player.quit()
|
the-stack_0_12459 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
import onnxruntime
import pytest
import shutil
from mxnet import gluon
from mxnet.test_utils import assert_almost_equal
@pytest.mark.skip(reason='Gluon no long support v1.x models since https://github.com/apache/incubator-mxnet/pull/20262')
def test_resnet50_v2(tmp_path):
try:
ctx = mx.cpu()
model = gluon.model_zoo.vision.resnet50_v2(pretrained=True, ctx=ctx)
BS = 1
inp = mx.random.uniform(0, 1, (1, 3, 224, 224))
model.hybridize(static_alloc=True)
out = model(inp)
prefix = "%s/resnet50" % tmp_path
model.export(prefix)
sym_file = "%s-symbol.json" % prefix
params_file = "%s-0000.params" % prefix
onnx_file = "%s.onnx" % prefix
dynamic_input_shapes = [('batch', 3, 224, 224)]
input_shapes = [(1, 3, 224, 224)]
input_types = [np.float32]
converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes,
input_types, onnx_file,
dynamic=True,
dynamic_input_shapes=dynamic_input_shapes)
ses_opt = onnxruntime.SessionOptions()
ses_opt.log_severity_level = 3
session = onnxruntime.InferenceSession(onnx_file, ses_opt)
BS = 10
inp = mx.random.uniform(0, 1, (1, 3, 224, 224))
mx_out = model(inp)
onnx_inputs = [inp]
input_dict = dict((session.get_inputs()[i].name, onnx_inputs[i].asnumpy())
for i in range(len(onnx_inputs)))
on_out = session.run(None, input_dict)
assert_almost_equal(mx_out, on_out, rtol=0.001, atol=0.01)
finally:
shutil.rmtree(tmp_path)
|
the-stack_0_12461 | #!/usr/bin/python3
"""Resets the datastore
Deletes all sqlite files. This will not reset nifi but reset lhipa and cl model states
"""
__author__ = "Martin Eigenmann"
__license__ = "unlicence"
__version__ = "0.0.1"
__email__ = "[email protected]"
__status__ = "Prototpye"
import json
import sys
import os
import sqlite3
for f in os.listdir('/data'):
if len(f.split('-')) == 1:
db = sqlite3.connect(f'/data/{f}')
cursor = db.cursor()
cursor.execute('DROP TABLE IF EXISTS et')
db.commit()
cursor.close()
db.close()
os.remove(f'/data/{f}')
print(json.dumps({ "reset": True })) |
the-stack_0_12462 | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch import nn as nn
from torch.nn import functional as F
from mmseg.core import add_prefix
from ..builder import (SEGMENTORS, build_backbone, build_head, build_loss,
build_neck)
from .base import Base3DSegmentor
@SEGMENTORS.register_module()
class EncoderDecoder3D(Base3DSegmentor):
"""3D Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be thrown during inference.
"""
def __init__(self,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
loss_regularization=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(EncoderDecoder3D, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self._init_loss_regularization(loss_regularization)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
assert self.with_decode_head, \
'3D EncoderDecoder Segmentor should have a decode_head'
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = build_head(decode_head)
self.num_classes = self.decode_head.num_classes
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(build_head(head_cfg))
else:
self.auxiliary_head = build_head(auxiliary_head)
def _init_loss_regularization(self, loss_regularization):
"""Initialize ``loss_regularization``"""
if loss_regularization is not None:
if isinstance(loss_regularization, list):
self.loss_regularization = nn.ModuleList()
for loss_cfg in loss_regularization:
self.loss_regularization.append(build_loss(loss_cfg))
else:
self.loss_regularization = build_loss(loss_regularization)
def extract_feat(self, points):
"""Extract features from points."""
x = self.backbone(points)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, points, img_metas):
"""Encode points with backbone and decode into a semantic segmentation
map of the same size as input.
Args:
points (torch.Tensor): Input points of shape [B, N, 3+C].
img_metas (list[dict]): Meta information of each sample.
Returns:
torch.Tensor: Segmentation logits of shape [B, num_classes, N].
"""
x = self.extract_feat(points)
out = self._decode_head_forward_test(x, img_metas)
return out
def _decode_head_forward_train(self, x, img_metas, pts_semantic_mask):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head.forward_train(x, img_metas,
pts_semantic_mask,
self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, x, img_metas):
"""Run forward function and calculate loss for decode head in
inference."""
seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)
return seg_logits
def _auxiliary_head_forward_train(self, x, img_metas, pts_semantic_mask):
"""Run forward function and calculate loss for auxiliary head in
training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(x, img_metas,
pts_semantic_mask,
self.train_cfg)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
x, img_metas, pts_semantic_mask, self.train_cfg)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def _loss_regularization_forward_train(self):
"""Calculate regularization loss for model weight in training."""
losses = dict()
if isinstance(self.loss_regularization, nn.ModuleList):
for idx, regularize_loss in enumerate(self.loss_regularization):
loss_regularize = dict(
loss_regularize=regularize_loss(self.modules()))
losses.update(add_prefix(loss_regularize, f'regularize_{idx}'))
else:
loss_regularize = dict(
loss_regularize=self.loss_regularization(self.modules()))
losses.update(add_prefix(loss_regularize, 'regularize'))
return losses
def forward_dummy(self, points):
"""Dummy forward function."""
seg_logit = self.encode_decode(points, None)
return seg_logit
def forward_train(self, points, img_metas, pts_semantic_mask):
"""Forward function for training.
Args:
points (list[torch.Tensor]): List of points of shape [N, C].
img_metas (list): Image metas.
pts_semantic_mask (list[torch.Tensor]): List of point-wise semantic
labels of shape [N].
Returns:
dict[str, Tensor]: Losses.
"""
points_cat = torch.stack(points)
pts_semantic_mask_cat = torch.stack(pts_semantic_mask)
# extract features using backbone
x = self.extract_feat(points_cat)
losses = dict()
loss_decode = self._decode_head_forward_train(x, img_metas,
pts_semantic_mask_cat)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, img_metas, pts_semantic_mask_cat)
losses.update(loss_aux)
if self.with_regularization_loss:
loss_regularize = self._loss_regularization_forward_train()
losses.update(loss_regularize)
return losses
@staticmethod
def _input_generation(coords,
patch_center,
coord_max,
feats,
use_normalized_coord=False):
"""Generating model input.
Generate input by subtracting patch center and adding additional
features. Currently support colors and normalized xyz as features.
Args:
coords (torch.Tensor): Sampled 3D point coordinate of shape [S, 3].
patch_center (torch.Tensor): Center coordinate of the patch.
coord_max (torch.Tensor): Max coordinate of all 3D points.
feats (torch.Tensor): Features of sampled points of shape [S, C].
use_normalized_coord (bool, optional): Whether to use normalized
xyz as additional features. Defaults to False.
Returns:
torch.Tensor: The generated input data of shape [S, 3+C'].
"""
# subtract patch center, the z dimension is not centered
centered_coords = coords.clone()
centered_coords[:, 0] -= patch_center[0]
centered_coords[:, 1] -= patch_center[1]
# normalized coordinates as extra features
if use_normalized_coord:
normalized_coord = coords / coord_max
feats = torch.cat([feats, normalized_coord], dim=1)
points = torch.cat([centered_coords, feats], dim=1)
return points
def _sliding_patch_generation(self,
points,
num_points,
block_size,
sample_rate=0.5,
use_normalized_coord=False,
eps=1e-3):
"""Sampling points in a sliding window fashion.
First sample patches to cover all the input points.
Then sample points in each patch to batch points of a certain number.
Args:
points (torch.Tensor): Input points of shape [N, 3+C].
num_points (int): Number of points to be sampled in each patch.
block_size (float, optional): Size of a patch to sample.
sample_rate (float, optional): Stride used in sliding patch.
Defaults to 0.5.
use_normalized_coord (bool, optional): Whether to use normalized
xyz as additional features. Defaults to False.
eps (float, optional): A value added to patch boundary to guarantee
points coverage. Defaults to 1e-3.
Returns:
np.ndarray | np.ndarray:
- patch_points (torch.Tensor): Points of different patches of
shape [K, N, 3+C].
- patch_idxs (torch.Tensor): Index of each point in
`patch_points`, of shape [K, N].
"""
device = points.device
# we assume the first three dims are points' 3D coordinates
# and the rest dims are their per-point features
coords = points[:, :3]
feats = points[:, 3:]
coord_max = coords.max(0)[0]
coord_min = coords.min(0)[0]
stride = block_size * sample_rate
num_grid_x = int(
torch.ceil((coord_max[0] - coord_min[0] - block_size) /
stride).item() + 1)
num_grid_y = int(
torch.ceil((coord_max[1] - coord_min[1] - block_size) /
stride).item() + 1)
patch_points, patch_idxs = [], []
for idx_y in range(num_grid_y):
s_y = coord_min[1] + idx_y * stride
e_y = torch.min(s_y + block_size, coord_max[1])
s_y = e_y - block_size
for idx_x in range(num_grid_x):
s_x = coord_min[0] + idx_x * stride
e_x = torch.min(s_x + block_size, coord_max[0])
s_x = e_x - block_size
# extract points within this patch
cur_min = torch.tensor([s_x, s_y, coord_min[2]]).to(device)
cur_max = torch.tensor([e_x, e_y, coord_max[2]]).to(device)
cur_choice = ((coords >= cur_min - eps) &
(coords <= cur_max + eps)).all(dim=1)
if not cur_choice.any(): # no points in this patch
continue
# sample points in this patch to multiple batches
cur_center = cur_min + block_size / 2.0
point_idxs = torch.nonzero(cur_choice, as_tuple=True)[0]
num_batch = int(np.ceil(point_idxs.shape[0] / num_points))
point_size = int(num_batch * num_points)
replace = point_size > 2 * point_idxs.shape[0]
num_repeat = point_size - point_idxs.shape[0]
if replace: # duplicate
point_idxs_repeat = point_idxs[torch.randint(
0, point_idxs.shape[0],
size=(num_repeat, )).to(device)]
else:
point_idxs_repeat = point_idxs[torch.randperm(
point_idxs.shape[0])[:num_repeat]]
choices = torch.cat([point_idxs, point_idxs_repeat], dim=0)
choices = choices[torch.randperm(choices.shape[0])]
# construct model input
point_batches = self._input_generation(
coords[choices],
cur_center,
coord_max,
feats[choices],
use_normalized_coord=use_normalized_coord)
patch_points.append(point_batches)
patch_idxs.append(choices)
patch_points = torch.cat(patch_points, dim=0)
patch_idxs = torch.cat(patch_idxs, dim=0)
# make sure all points are sampled at least once
assert torch.unique(patch_idxs).shape[0] == points.shape[0], \
'some points are not sampled in sliding inference'
return patch_points, patch_idxs
def slide_inference(self, point, img_meta, rescale):
"""Inference by sliding-window with overlap.
Args:
point (torch.Tensor): Input points of shape [N, 3+C].
img_meta (dict): Meta information of input sample.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Returns:
Tensor: The output segmentation map of shape [num_classes, N].
"""
num_points = self.test_cfg.num_points
block_size = self.test_cfg.block_size
sample_rate = self.test_cfg.sample_rate
use_normalized_coord = self.test_cfg.use_normalized_coord
batch_size = self.test_cfg.batch_size * num_points
# patch_points is of shape [K*N, 3+C], patch_idxs is of shape [K*N]
patch_points, patch_idxs = self._sliding_patch_generation(
point, num_points, block_size, sample_rate, use_normalized_coord)
feats_dim = patch_points.shape[1]
seg_logits = [] # save patch predictions
for batch_idx in range(0, patch_points.shape[0], batch_size):
batch_points = patch_points[batch_idx:batch_idx + batch_size]
batch_points = batch_points.view(-1, num_points, feats_dim)
# batch_seg_logit is of shape [B, num_classes, N]
batch_seg_logit = self.encode_decode(batch_points, img_meta)
batch_seg_logit = batch_seg_logit.transpose(1, 2).contiguous()
seg_logits.append(batch_seg_logit.view(-1, self.num_classes))
# aggregate per-point logits by indexing sum and dividing count
seg_logits = torch.cat(seg_logits, dim=0) # [K*N, num_classes]
expand_patch_idxs = patch_idxs.unsqueeze(1).repeat(1, self.num_classes)
preds = point.new_zeros((point.shape[0], self.num_classes)).\
scatter_add_(dim=0, index=expand_patch_idxs, src=seg_logits)
count_mat = torch.bincount(patch_idxs)
preds = preds / count_mat[:, None]
# TODO: if rescale and voxelization segmentor
return preds.transpose(0, 1) # to [num_classes, K*N]
def whole_inference(self, points, img_metas, rescale):
"""Inference with full scene (one forward pass without sliding)."""
seg_logit = self.encode_decode(points, img_metas)
# TODO: if rescale and voxelization segmentor
return seg_logit
def inference(self, points, img_metas, rescale):
"""Inference with slide/whole style.
Args:
points (torch.Tensor): Input points of shape [B, N, 3+C].
img_metas (list[dict]): Meta information of each sample.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg.mode in ['slide', 'whole']
if self.test_cfg.mode == 'slide':
seg_logit = torch.stack([
self.slide_inference(point, img_meta, rescale)
for point, img_meta in zip(points, img_metas)
], 0)
else:
seg_logit = self.whole_inference(points, img_metas, rescale)
output = F.softmax(seg_logit, dim=1)
return output
def simple_test(self, points, img_metas, rescale=True):
"""Simple test with single scene.
Args:
points (list[torch.Tensor]): List of points of shape [N, 3+C].
img_metas (list[dict]): Meta information of each sample.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Defaults to True.
Returns:
list[dict]: The output prediction result with following keys:
- semantic_mask (Tensor): Segmentation mask of shape [N].
"""
# 3D segmentation requires per-point prediction, so it's impossible
# to use down-sampling to get a batch of scenes with same num_points
# therefore, we only support testing one scene every time
seg_pred = []
for point, img_meta in zip(points, img_metas):
seg_prob = self.inference(point.unsqueeze(0), [img_meta],
rescale)[0]
seg_map = seg_prob.argmax(0) # [N]
# to cpu tensor for consistency with det3d
seg_map = seg_map.cpu()
seg_pred.append(seg_map)
# warp in dict
seg_pred = [dict(semantic_mask=seg_map) for seg_map in seg_pred]
return seg_pred
def aug_test(self, points, img_metas, rescale=True):
"""Test with augmentations.
Args:
points (list[torch.Tensor]): List of points of shape [B, N, 3+C].
img_metas (list[list[dict]]): Meta information of each sample.
Outer list are different samples while inner is different augs.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Defaults to True.
Returns:
list[dict]: The output prediction result with following keys:
- semantic_mask (Tensor): Segmentation mask of shape [N].
"""
# in aug_test, one scene going through different augmentations could
# have the same number of points and are stacked as a batch
# to save memory, we get augmented seg logit inplace
seg_pred = []
for point, img_meta in zip(points, img_metas):
seg_prob = self.inference(point, img_meta, rescale)
seg_prob = seg_prob.mean(0) # [num_classes, N]
seg_map = seg_prob.argmax(0) # [N]
# to cpu tensor for consistency with det3d
seg_map = seg_map.cpu()
seg_pred.append(seg_map)
# warp in dict
seg_pred = [dict(semantic_mask=seg_map) for seg_map in seg_pred]
return seg_pred
|
the-stack_0_12463 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from trac.mimeview.tests import api, patch, pygments, rst, txtl
from trac.mimeview.tests.functional import functionalSuite
def test_suite():
suite = unittest.TestSuite()
suite.addTest(api.test_suite())
suite.addTest(patch.test_suite())
suite.addTest(pygments.test_suite())
suite.addTest(rst.test_suite())
suite.addTest(txtl.test_suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
the-stack_0_12465 | # encoding: utf-8
# For Facebook
FACEBOOK_APP_SECRET = ''
FACEBOOK_APP_ID = ''
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
MESSAGE_FORMAT = u'''Hi,
%(message)s
--
%(creator)s'''
# set email sender address
#default_email_sender = ''
# set default address to send messages
#default_email_to = ''
COMMIT_SCRIPT='' |
the-stack_0_12466 | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import os
import re
import sys
from copy import deepcopy
from importlib.machinery import SourceFileLoader
from datetime import datetime
import pandas as pd
from .procedure import Procedure, UnknownProcedure
from .parameters import Parameter
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def unique_filename(directory, prefix='DATA', suffix='', ext='csv',
dated_folder=False, index=True, datetimeformat="%Y-%m-%d"):
""" Returns a unique filename based on the directory and prefix
"""
now = datetime.now()
directory = os.path.abspath(directory)
if dated_folder:
directory = os.path.join(directory, now.strftime('%Y-%m-%d'))
if not os.path.exists(directory):
os.makedirs(directory)
if index:
i = 1
basename = "%s%s" % (prefix, now.strftime(datetimeformat))
basepath = os.path.join(directory, basename)
filename = "%s_%d%s.%s" % (basepath, i, suffix, ext)
while os.path.exists(filename):
i += 1
filename = "%s_%d%s.%s" % (basepath, i, suffix, ext)
else:
basename = "%s%s%s.%s" % (prefix, now.strftime(datetimeformat), suffix, ext)
filename = os.path.join(directory, basename)
return filename
class CSVFormatter(logging.Formatter):
""" Formatter of data results """
def __init__(self, columns, delimiter=','):
"""Creates a csv formatter for a given list of columns (=header).
:param columns: list of column names.
:type columns: list
:param delimiter: delimiter between columns.
:type delimiter: str
"""
super().__init__()
self.columns = columns
self.delimiter = delimiter
def format(self, record):
"""Formats a record as csv.
:param record: record to format.
:type record: dict
:return: a string
"""
return self.delimiter.join('{}'.format(record[x]) for x in self.columns)
def format_header(self):
return self.delimiter.join(self.columns)
class Results(object):
""" The Results class provides a convenient interface to reading and
writing data in connection with a :class:`.Procedure` object.
:cvar COMMENT: The character used to identify a comment (default: #)
:cvar DELIMITER: The character used to delimit the data (default: ,)
:cvar LINE_BREAK: The character used for line breaks (default \\n)
:cvar CHUNK_SIZE: The length of the data chuck that is read
:param procedure: Procedure object
:param data_filename: The data filename where the data is or should be
stored
"""
COMMENT = '#'
DELIMITER = ','
LINE_BREAK = "\n"
CHUNK_SIZE = 1000
def __init__(self, procedure, data_filename):
if not isinstance(procedure, Procedure):
raise ValueError("Results require a Procedure object")
self.procedure = procedure
self.procedure_class = procedure.__class__
self.parameters = procedure.parameter_objects()
self._header_count = -1
self.formatter = CSVFormatter(columns=self.procedure.DATA_COLUMNS)
if isinstance(data_filename, (list, tuple)):
data_filenames, data_filename = data_filename, data_filename[0]
else:
data_filenames = [data_filename]
self.data_filename = data_filename
self.data_filenames = data_filenames
if os.path.exists(data_filename): # Assume header is already written
self.reload()
self.procedure.status = Procedure.FINISHED
# TODO: Correctly store and retrieve status
else:
for filename in self.data_filenames:
with open(filename, 'w') as f:
f.write(self.header())
f.write(self.labels())
self._data = None
def __getstate__(self):
# Get all information needed to reconstruct procedure
self._parameters = self.procedure.parameter_values()
self._class = self.procedure.__class__.__name__
module = sys.modules[self.procedure.__module__]
self._package = module.__package__
self._module = module.__name__
self._file = module.__file__
state = self.__dict__.copy()
del state['procedure']
del state['procedure_class']
return state
def __setstate__(self, state):
self.__dict__.update(state)
# Restore the procedure
module = SourceFileLoader(self._module, self._file).load_module()
cls = getattr(module, self._class)
self.procedure = cls()
self.procedure.set_parameters(self._parameters)
self.procedure.refresh_parameters()
self.procedure_class = cls
del self._parameters
del self._class
del self._package
del self._module
del self._file
def header(self):
""" Returns a text header to accompany a datafile so that the procedure
can be reconstructed
"""
h = []
procedure = re.search("'(?P<name>[^']+)'",
repr(self.procedure_class)).group("name")
h.append("Procedure: <%s>" % procedure)
h.append("Parameters:")
for name, parameter in self.parameters.items():
h.append("\t%s: %s" % (parameter.name, str(parameter).encode("unicode_escape").decode("utf-8")))
h.append("Data:")
self._header_count = len(h)
h = [Results.COMMENT + l for l in h] # Comment each line
return Results.LINE_BREAK.join(h) + Results.LINE_BREAK
def labels(self):
""" Returns the columns labels as a string to be written
to the file
"""
return self.formatter.format_header() + Results.LINE_BREAK
def format(self, data):
""" Returns a formatted string containing the data to be written
to a file
"""
return self.formatter.format(data)
def parse(self, line):
""" Returns a dictionary containing the data from the line """
data = {}
items = line.split(Results.DELIMITER)
for i, key in enumerate(self.procedure.DATA_COLUMNS):
data[key] = items[i]
return data
@staticmethod
def parse_header(header, procedure_class=None):
""" Returns a Procedure object with the parameters as defined in the
header text.
"""
if procedure_class is not None:
procedure = procedure_class()
else:
procedure = None
header = header.split(Results.LINE_BREAK)
procedure_module = None
parameters = {}
for line in header:
if line.startswith(Results.COMMENT):
line = line[1:] # Uncomment
else:
raise ValueError("Parsing a header which contains "
"uncommented sections")
if line.startswith("Procedure"):
regex = r"<(?:(?P<module>[^>]+)\.)?(?P<class>[^.>]+)>"
search = re.search(regex, line)
procedure_module = search.group("module")
procedure_class = search.group("class")
elif line.startswith("\t"):
separator = ": "
partitioned_line = line[1:].partition(separator)
if partitioned_line[1] != separator:
raise Exception("Error partitioning header line %s." % line)
else:
parameters[partitioned_line[0]] = partitioned_line[2]
if procedure is None:
if procedure_class is None:
raise ValueError("Header does not contain the Procedure class")
try:
from importlib import import_module
procedure_module = import_module(procedure_module)
procedure_class = getattr(procedure_module, procedure_class)
procedure = procedure_class()
except ImportError:
procedure = UnknownProcedure(parameters)
log.warning("Unknown Procedure being used")
except Exception as e:
raise e
# Fill the procedure with the parameters found
for name, parameter in procedure.parameter_objects().items():
if parameter.name in parameters:
value = parameters[parameter.name]
setattr(procedure, name, value)
else:
raise Exception("Missing '%s' parameter when loading '%s' class" % (
parameter.name, procedure_class))
procedure.refresh_parameters() # Enforce update of meta data
return procedure
@staticmethod
def load(data_filename, procedure_class=None):
""" Returns a Results object with the associated Procedure object and
data
"""
header = ""
header_read = False
header_count = 0
with open(data_filename, 'r') as f:
while not header_read:
line = f.readline()
if line.startswith(Results.COMMENT):
header += line.strip() + Results.LINE_BREAK
header_count += 1
else:
header_read = True
procedure = Results.parse_header(header[:-1], procedure_class)
results = Results(procedure, data_filename)
results._header_count = header_count
return results
@property
def data(self):
# Need to update header count for correct referencing
if self._header_count == -1:
self._header_count = len(
self.header()[-1].split(Results.LINE_BREAK))
if self._data is None or len(self._data) == 0:
# Data has not been read
try:
self.reload()
except Exception:
# Empty dataframe
self._data = pd.DataFrame(columns=self.procedure.DATA_COLUMNS)
else: # Concatenate additional data, if any, to already loaded data
skiprows = len(self._data) + self._header_count
chunks = pd.read_csv(
self.data_filename,
comment=Results.COMMENT,
header=0,
names=self._data.columns,
chunksize=Results.CHUNK_SIZE, skiprows=skiprows, iterator=True
)
try:
tmp_frame = pd.concat(chunks, ignore_index=True)
# only append new data if there is any
# if no new data, tmp_frame dtype is object, which override's
# self._data's original dtype - this can cause problems plotting
# (e.g. if trying to plot int data on a log axis)
if len(tmp_frame) > 0:
self._data = pd.concat([self._data, tmp_frame],
ignore_index=True)
except Exception:
pass # All data is up to date
return self._data
def reload(self):
""" Preforms a full reloading of the file data, neglecting
any changes in the comments
"""
chunks = pd.read_csv(
self.data_filename,
comment=Results.COMMENT,
chunksize=Results.CHUNK_SIZE,
iterator=True
)
try:
self._data = pd.concat(chunks, ignore_index=True)
except Exception:
self._data = chunks.read()
def __repr__(self):
return "<{}(filename='{}',procedure={},shape={})>".format(
self.__class__.__name__, self.data_filename,
self.procedure.__class__.__name__,
self.data.shape
)
|
the-stack_0_12467 | ########################################
# CS/CNS/EE 155 2018
# Problem Set 6
#
# Author: Andrew Kang
# Description: Set 6 HMM helper
########################################
import re
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from matplotlib import animation
from matplotlib.animation import FuncAnimation
####################
# WORDCLOUD FUNCTIONS
####################
def mask():
# Parameters.
r = 128
d = 2 * r + 1
# Get points in a circle.
y, x = np.ogrid[-r:d-r, -r:d-r]
circle = (x**2 + y**2 <= r**2)
# Create mask.
mask = 255 * np.ones((d, d), dtype=np.uint8)
mask[circle] = 0
return mask
def text_to_wordcloud(text, max_words=50, title='', show=True):
plt.close('all')
# Generate a wordcloud image.
wordcloud = WordCloud(random_state=0,
max_words=max_words,
background_color='white',
mask=mask()).generate(text)
# Show the image.
if show:
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.title(title, fontsize=24)
plt.show()
return wordcloud
def states_to_wordclouds(hmm, obs_map, max_words=50, show=True):
# Initialize.
M = 100000
n_states = len(hmm.A)
obs_map_r = obs_map_reverser(obs_map)
wordclouds = []
# Generate a large emission.
emission, states = hmm.generate_emission(M)
# For each state, get a list of observations that have been emitted
# from that state.
obs_count = []
for i in range(n_states):
obs_lst = np.array(emission)[np.where(np.array(states) == i)[0]]
obs_count.append(obs_lst)
# For each state, convert it into a wordcloud.
for i in range(n_states):
obs_lst = obs_count[i]
sentence = [obs_map_r[j] for j in obs_lst]
sentence_str = ' '.join(sentence)
wordclouds.append(text_to_wordcloud(sentence_str, max_words=max_words, title='State %d' % i, show=show))
return wordclouds
####################
# HMM FUNCTIONS
####################
def parse_observations(text):
# Convert text to dataset.
lines = [line.split() for line in text.split('\n') if line.split()]
obs_counter = 0
obs = []
obs_map = {}
for line in lines:
obs_elem = []
for word in line:
word = re.sub(r'[^\w]', '', word).lower()
if word not in obs_map:
# Add unique words to the observations map.
obs_map[word] = obs_counter
obs_counter += 1
# Add the encoded word.
obs_elem.append(obs_map[word])
# Add the encoded sequence.
obs.append(obs_elem)
return obs, obs_map
def obs_map_reverser(obs_map):
obs_map_r = {}
for key in obs_map:
obs_map_r[obs_map[key]] = key
return obs_map_r
def sample_sentence(hmm, obs_map, n_words=100):
# Get reverse map.
obs_map_r = obs_map_reverser(obs_map)
# Sample and convert sentence.
emission, states = hmm.generate_emission(n_words)
sentence = [obs_map_r[i] for i in emission]
return ' '.join(sentence).capitalize() + '...'
####################
# HMM VISUALIZATION FUNCTIONS
####################
def visualize_sparsities(hmm, O_max_cols=50, O_vmax=0.1):
plt.close('all')
plt.set_cmap('viridis')
# Visualize sparsity of A.
plt.imshow(hmm.A, vmax=1.0)
plt.colorbar()
plt.title('Sparsity of A matrix')
plt.show()
# Visualize parsity of O.
plt.imshow(np.array(hmm.O)[:, :O_max_cols], vmax=O_vmax, aspect='auto')
plt.colorbar()
plt.title('Sparsity of O matrix')
plt.show()
####################
# HMM ANIMATION FUNCTIONS
####################
def animate_emission(hmm, obs_map, M=8, height=12, width=12, delay=1):
# Parameters.
lim = 1200
text_x_offset = 40
text_y_offset = 80
x_offset = 580
y_offset = 520
R = 420
r = 100
arrow_size = 20
arrow_p1 = 0.03
arrow_p2 = 0.02
arrow_p3 = 0.06
# Initialize.
n_states = len(hmm.A)
obs_map_r = obs_map_reverser(obs_map)
wordclouds = states_to_wordclouds(hmm, obs_map, max_words=20, show=False)
# Initialize plot.
fig, ax = plt.subplots()
fig.set_figheight(height)
fig.set_figwidth(width)
ax.grid('off')
plt.axis('off')
ax.set_xlim([0, lim])
ax.set_ylim([0, lim])
# Plot each wordcloud.
for i, wordcloud in enumerate(wordclouds):
x = x_offset + int(R * np.cos(np.pi * 2 * i / n_states))
y = y_offset + int(R * np.sin(np.pi * 2 * i / n_states))
ax.imshow(wordcloud.to_array(), extent=(x - r, x + r, y - r, y + r), aspect='auto', zorder=-1)
# Initialize text.
text = ax.text(text_x_offset, lim - text_y_offset, '', fontsize=24)
# Make the arrows.
zorder_mult = n_states ** 2 * 100
arrows = []
for i in range(n_states):
row = []
for j in range(n_states):
# Arrow coordinates.
x_i = x_offset + R * np.cos(np.pi * 2 * i / n_states)
y_i = y_offset + R * np.sin(np.pi * 2 * i / n_states)
x_j = x_offset + R * np.cos(np.pi * 2 * j / n_states)
y_j = y_offset + R * np.sin(np.pi * 2 * j / n_states)
dx = x_j - x_i
dy = y_j - y_i
d = np.sqrt(dx**2 + dy**2)
if i != j:
arrow = ax.arrow(x_i + (r/d + arrow_p1) * dx + arrow_p2 * dy,
y_i + (r/d + arrow_p1) * dy + arrow_p2 * dx,
(1 - 2 * r/d - arrow_p3) * dx,
(1 - 2 * r/d - arrow_p3) * dy,
color=(1 - hmm.A[i][j], ) * 3,
head_width=arrow_size, head_length=arrow_size,
zorder=int(hmm.A[i][j] * zorder_mult))
else:
arrow = ax.arrow(x_i, y_i, 0, 0,
color=(1 - hmm.A[i][j], ) * 3,
head_width=arrow_size, head_length=arrow_size,
zorder=int(hmm.A[i][j] * zorder_mult))
row.append(arrow)
arrows.append(row)
emission, states = hmm.generate_emission(M)
def animate(i):
if i >= delay:
i -= delay
if i == 0:
arrows[states[0]][states[0]].set_color('red')
elif i == 1:
arrows[states[0]][states[0]].set_color((1 - hmm.A[states[0]][states[0]], ) * 3)
arrows[states[i - 1]][states[i]].set_color('red')
else:
arrows[states[i - 2]][states[i - 1]].set_color((1 - hmm.A[states[i - 2]][states[i - 1]], ) * 3)
arrows[states[i - 1]][states[i]].set_color('red')
# Set text.
text.set_text(' '.join([obs_map_r[e] for e in emission][:i+1]).capitalize())
return arrows + [text]
# Animate!
print('\nAnimating...')
anim = FuncAnimation(fig, animate, frames=M+delay, interval=1000)
return anim
# honestly this function is so jank but who even fuckin cares
# i don't even remember how or why i wrote this mess
# no one's gonna read this
# hey if you see this tho hmu on fb let's be friends
|
the-stack_0_12468 | import concurrent.futures
import rasterio
from rasterio._example import compute
def main(infile, outfile, num_workers=4):
with rasterio.Env():
with rasterio.open(infile) as src:
profile = src.profile
profile.update(blockxsize=128, blockysize=128, tiled=True)
with rasterio.open(outfile, "w", **profile) as dst:
windows = [window for ij, window in dst.block_windows()]
data_gen = (src.get_data(window=window) for window in windows)
with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
for window, result in zip(windows, executor.map(compute, data_gen)):
dst.write(result, window=window,
)
in_path = 'test.tif'
out_path = 'output.tif'
if __name__ == '__main__':
main(in_path, out_path)
|
the-stack_0_12470 | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running ValueDice on the OpenAI Gym."""
import functools
from absl import flags
import acme
from acme import specs
from acme.agents.jax import value_dice
from absl import app
import helpers
import jax
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_steps', 1000000,
'Number of env steps to run training for.')
flags.DEFINE_integer('eval_every', 10000, 'How often to run evaluation')
flags.DEFINE_string('env_name', 'MountainCarContinuous-v0',
'What environment to run')
flags.DEFINE_string('dataset_name', 'd4rl_mujoco_halfcheetah/v0-medium',
'What dataset to use. '
'See the TFDS catalog for possible values.')
flags.DEFINE_integer('num_sgd_steps_per_step', 64,
'Number of SGD steps per learner step().')
flags.DEFINE_integer('seed', 0, 'Random seed.')
def main(_):
# Create an environment, grab the spec, and use it to create networks.
environment = helpers.make_environment(task=FLAGS.env_name)
environment_spec = specs.make_environment_spec(environment)
agent_networks = value_dice.make_networks(environment_spec)
# Construct the agent.
config = value_dice.ValueDiceConfig(
num_sgd_steps_per_step=FLAGS.num_sgd_steps_per_step)
agent = value_dice.ValueDice(
environment_spec,
agent_networks,
config=config,
make_demonstrations=functools.partial(
helpers.make_demonstration_iterator, dataset_name=FLAGS.dataset_name),
seed=FLAGS.seed)
# Create the environment loop used for training.
train_loop = acme.EnvironmentLoop(environment, agent, label='train_loop')
# Create the evaluation actor and loop.
eval_actor = agent.builder.make_actor(
random_key=jax.random.PRNGKey(FLAGS.seed),
policy_network=value_dice.apply_policy_and_sample(
agent_networks, eval_mode=True),
variable_source=agent)
eval_env = helpers.make_environment(task=FLAGS.env_name)
eval_loop = acme.EnvironmentLoop(eval_env, eval_actor, label='eval_loop')
assert FLAGS.num_steps % FLAGS.eval_every == 0
for _ in range(FLAGS.num_steps // FLAGS.eval_every):
eval_loop.run(num_episodes=5)
train_loop.run(num_steps=FLAGS.eval_every)
eval_loop.run(num_episodes=5)
if __name__ == '__main__':
app.run(main)
|
the-stack_0_12472 | #!/usr/bin/env python
__all__ = ['zhanqi_download']
from ..common import *
import re
def zhanqi_download(url, output_dir = '.', merge = True, info_only = False):
html = get_content(url)
rtmp_base_patt = r'VideoUrl":"([^"]+)"'
rtmp_id_patt = r'VideoID":"([^"]+)"'
title_patt = r'<p class="title-name" title="[^"]+">([^<]+)</p>'
title_patt_backup = r'<title>([^<]{1,9999})</title>'
rtmp_base = match1(html, rtmp_base_patt).replace('\\/','/')
rtmp_id = match1(html, rtmp_id_patt).replace('\\/','/')
title = match1(html, title_patt) or match1(html, title_patt_backup)
title = unescape_html(title)
real_url = rtmp_base+'/'+rtmp_id
print_info(site_info, title, 'flv', float('inf'))
if not info_only:
download_rtmp_url(real_url, title, 'flv', {}, output_dir, merge = merge)
site_info = "zhanqi.tv"
download = zhanqi_download
download_playlist = playlist_not_supported('zhanqi')
|
the-stack_0_12474 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class IdentityClient(VssClient):
"""Identity
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(IdentityClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '8a3d49b8-91f0-46ef-b33d-dda338c25db3'
def create_or_bind_with_claims(self, source_identity):
"""CreateOrBindWithClaims.
[Preview API]
:param :class:`<Identity> <identity.v4_0.models.Identity>` source_identity:
:rtype: :class:`<Identity> <identity.v4_0.models.Identity>`
"""
content = self._serialize.body(source_identity, 'Identity')
response = self._send(http_method='PUT',
location_id='90ddfe71-171c-446c-bf3b-b597cd562afd',
version='4.0-preview.1',
content=content)
return self._deserialize('Identity', response)
def get_descriptor_by_id(self, id, is_master_id=None):
"""GetDescriptorById.
[Preview API]
:param str id:
:param bool is_master_id:
:rtype: :class:`<str> <identity.v4_0.models.str>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if is_master_id is not None:
query_parameters['isMasterId'] = self._serialize.query('is_master_id', is_master_id, 'bool')
response = self._send(http_method='GET',
location_id='a230389a-94f2-496c-839f-c929787496dd',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('str', response)
def create_groups(self, container):
"""CreateGroups.
:param :class:`<object> <identity.v4_0.models.object>` container:
:rtype: [Identity]
"""
content = self._serialize.body(container, 'object')
response = self._send(http_method='POST',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='4.0',
content=content,
returns_collection=True)
return self._deserialize('[Identity]', response)
def delete_group(self, group_id):
"""DeleteGroup.
:param str group_id:
"""
route_values = {}
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
self._send(http_method='DELETE',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='4.0',
route_values=route_values)
def list_groups(self, scope_ids=None, recurse=None, deleted=None, properties=None):
"""ListGroups.
:param str scope_ids:
:param bool recurse:
:param bool deleted:
:param str properties:
:rtype: [Identity]
"""
query_parameters = {}
if scope_ids is not None:
query_parameters['scopeIds'] = self._serialize.query('scope_ids', scope_ids, 'str')
if recurse is not None:
query_parameters['recurse'] = self._serialize.query('recurse', recurse, 'bool')
if deleted is not None:
query_parameters['deleted'] = self._serialize.query('deleted', deleted, 'bool')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='4.0',
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[Identity]', response)
def get_identity_changes(self, identity_sequence_id, group_sequence_id, scope_id=None):
"""GetIdentityChanges.
:param int identity_sequence_id:
:param int group_sequence_id:
:param str scope_id:
:rtype: :class:`<ChangedIdentities> <identity.v4_0.models.ChangedIdentities>`
"""
query_parameters = {}
if identity_sequence_id is not None:
query_parameters['identitySequenceId'] = self._serialize.query('identity_sequence_id', identity_sequence_id, 'int')
if group_sequence_id is not None:
query_parameters['groupSequenceId'] = self._serialize.query('group_sequence_id', group_sequence_id, 'int')
if scope_id is not None:
query_parameters['scopeId'] = self._serialize.query('scope_id', scope_id, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
query_parameters=query_parameters)
return self._deserialize('ChangedIdentities', response)
def get_user_identity_ids_by_domain_id(self, domain_id):
"""GetUserIdentityIdsByDomainId.
:param str domain_id:
:rtype: [str]
"""
query_parameters = {}
if domain_id is not None:
query_parameters['domainId'] = self._serialize.query('domain_id', domain_id, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[str]', response)
def read_identities(self, descriptors=None, identity_ids=None, search_filter=None, filter_value=None, query_membership=None, properties=None, include_restricted_visibility=None, options=None):
"""ReadIdentities.
:param str descriptors:
:param str identity_ids:
:param str search_filter:
:param str filter_value:
:param str query_membership:
:param str properties:
:param bool include_restricted_visibility:
:param str options:
:rtype: [Identity]
"""
query_parameters = {}
if descriptors is not None:
query_parameters['descriptors'] = self._serialize.query('descriptors', descriptors, 'str')
if identity_ids is not None:
query_parameters['identityIds'] = self._serialize.query('identity_ids', identity_ids, 'str')
if search_filter is not None:
query_parameters['searchFilter'] = self._serialize.query('search_filter', search_filter, 'str')
if filter_value is not None:
query_parameters['filterValue'] = self._serialize.query('filter_value', filter_value, 'str')
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
if include_restricted_visibility is not None:
query_parameters['includeRestrictedVisibility'] = self._serialize.query('include_restricted_visibility', include_restricted_visibility, 'bool')
if options is not None:
query_parameters['options'] = self._serialize.query('options', options, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[Identity]', response)
def read_identities_by_scope(self, scope_id, query_membership=None, properties=None):
"""ReadIdentitiesByScope.
:param str scope_id:
:param str query_membership:
:param str properties:
:rtype: [Identity]
"""
query_parameters = {}
if scope_id is not None:
query_parameters['scopeId'] = self._serialize.query('scope_id', scope_id, 'str')
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[Identity]', response)
def read_identity(self, identity_id, query_membership=None, properties=None):
"""ReadIdentity.
:param str identity_id:
:param str query_membership:
:param str properties:
:rtype: :class:`<Identity> <identity.v4_0.models.Identity>`
"""
route_values = {}
if identity_id is not None:
route_values['identityId'] = self._serialize.url('identity_id', identity_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Identity', response)
def update_identities(self, identities):
"""UpdateIdentities.
:param :class:`<VssJsonCollectionWrapper> <identity.v4_0.models.VssJsonCollectionWrapper>` identities:
:rtype: [IdentityUpdateData]
"""
content = self._serialize.body(identities, 'VssJsonCollectionWrapper')
response = self._send(http_method='PUT',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
content=content,
returns_collection=True)
return self._deserialize('[IdentityUpdateData]', response)
def update_identity(self, identity, identity_id):
"""UpdateIdentity.
:param :class:`<Identity> <identity.v4_0.models.Identity>` identity:
:param str identity_id:
"""
route_values = {}
if identity_id is not None:
route_values['identityId'] = self._serialize.url('identity_id', identity_id, 'str')
content = self._serialize.body(identity, 'Identity')
self._send(http_method='PUT',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
route_values=route_values,
content=content)
def create_identity(self, framework_identity_info):
"""CreateIdentity.
:param :class:`<FrameworkIdentityInfo> <identity.v4_0.models.FrameworkIdentityInfo>` framework_identity_info:
:rtype: :class:`<Identity> <identity.v4_0.models.Identity>`
"""
content = self._serialize.body(framework_identity_info, 'FrameworkIdentityInfo')
response = self._send(http_method='PUT',
location_id='dd55f0eb-6ea2-4fe4-9ebe-919e7dd1dfb4',
version='4.0',
content=content)
return self._deserialize('Identity', response)
def read_identity_batch(self, batch_info):
"""ReadIdentityBatch.
[Preview API]
:param :class:`<IdentityBatchInfo> <identity.v4_0.models.IdentityBatchInfo>` batch_info:
:rtype: [Identity]
"""
content = self._serialize.body(batch_info, 'IdentityBatchInfo')
response = self._send(http_method='POST',
location_id='299e50df-fe45-4d3a-8b5b-a5836fac74dc',
version='4.0-preview.1',
content=content,
returns_collection=True)
return self._deserialize('[Identity]', response)
def get_identity_snapshot(self, scope_id):
"""GetIdentitySnapshot.
[Preview API]
:param str scope_id:
:rtype: :class:`<IdentitySnapshot> <identity.v4_0.models.IdentitySnapshot>`
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
response = self._send(http_method='GET',
location_id='d56223df-8ccd-45c9-89b4-eddf692400d7',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('IdentitySnapshot', response)
def get_max_sequence_id(self):
"""GetMaxSequenceId.
Read the max sequence id of all the identities.
:rtype: long
"""
response = self._send(http_method='GET',
location_id='e4a70778-cb2c-4e85-b7cc-3f3c7ae2d408',
version='4.0')
return self._deserialize('long', response)
def get_self(self):
"""GetSelf.
Read identity of the home tenant request user.
:rtype: :class:`<IdentitySelf> <identity.v4_0.models.IdentitySelf>`
"""
response = self._send(http_method='GET',
location_id='4bb02b5b-c120-4be2-b68e-21f7c50a4b82',
version='4.0')
return self._deserialize('IdentitySelf', response)
def add_member(self, container_id, member_id):
"""AddMember.
[Preview API]
:param str container_id:
:param str member_id:
:rtype: bool
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
response = self._send(http_method='PUT',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('bool', response)
def read_member(self, container_id, member_id, query_membership=None):
"""ReadMember.
[Preview API]
:param str container_id:
:param str member_id:
:param str query_membership:
:rtype: :class:`<str> <identity.v4_0.models.str>`
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('str', response)
def read_members(self, container_id, query_membership=None):
"""ReadMembers.
[Preview API]
:param str container_id:
:param str query_membership:
:rtype: [str]
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[str]', response)
def remove_member(self, container_id, member_id):
"""RemoveMember.
[Preview API]
:param str container_id:
:param str member_id:
:rtype: bool
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
response = self._send(http_method='DELETE',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('bool', response)
def read_member_of(self, member_id, container_id, query_membership=None):
"""ReadMemberOf.
[Preview API]
:param str member_id:
:param str container_id:
:param str query_membership:
:rtype: :class:`<str> <identity.v4_0.models.str>`
"""
route_values = {}
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='22865b02-9e4a-479e-9e18-e35b8803b8a0',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('str', response)
def read_members_of(self, member_id, query_membership=None):
"""ReadMembersOf.
[Preview API]
:param str member_id:
:param str query_membership:
:rtype: [str]
"""
route_values = {}
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='22865b02-9e4a-479e-9e18-e35b8803b8a0',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[str]', response)
def create_scope(self, info, scope_id):
"""CreateScope.
[Preview API]
:param :class:`<CreateScopeInfo> <identity.v4_0.models.CreateScopeInfo>` info:
:param str scope_id:
:rtype: :class:`<IdentityScope> <identity.v4_0.models.IdentityScope>`
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
content = self._serialize.body(info, 'CreateScopeInfo')
response = self._send(http_method='PUT',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('IdentityScope', response)
def delete_scope(self, scope_id):
"""DeleteScope.
[Preview API]
:param str scope_id:
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
self._send(http_method='DELETE',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='4.0-preview.1',
route_values=route_values)
def get_scope_by_id(self, scope_id):
"""GetScopeById.
[Preview API]
:param str scope_id:
:rtype: :class:`<IdentityScope> <identity.v4_0.models.IdentityScope>`
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
response = self._send(http_method='GET',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('IdentityScope', response)
def get_scope_by_name(self, scope_name):
"""GetScopeByName.
[Preview API]
:param str scope_name:
:rtype: :class:`<IdentityScope> <identity.v4_0.models.IdentityScope>`
"""
query_parameters = {}
if scope_name is not None:
query_parameters['scopeName'] = self._serialize.query('scope_name', scope_name, 'str')
response = self._send(http_method='GET',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='4.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('IdentityScope', response)
def rename_scope(self, rename_scope, scope_id):
"""RenameScope.
[Preview API]
:param :class:`<IdentityScope> <identity.v4_0.models.IdentityScope>` rename_scope:
:param str scope_id:
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
content = self._serialize.body(rename_scope, 'IdentityScope')
self._send(http_method='PATCH',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='4.0-preview.1',
route_values=route_values,
content=content)
def get_signed_in_token(self):
"""GetSignedInToken.
[Preview API]
:rtype: :class:`<AccessTokenResult> <identity.v4_0.models.AccessTokenResult>`
"""
response = self._send(http_method='GET',
location_id='6074ff18-aaad-4abb-a41e-5c75f6178057',
version='4.0-preview.1')
return self._deserialize('AccessTokenResult', response)
def get_signout_token(self):
"""GetSignoutToken.
[Preview API]
:rtype: :class:`<AccessTokenResult> <identity.v4_0.models.AccessTokenResult>`
"""
response = self._send(http_method='GET',
location_id='be39e83c-7529-45e9-9c67-0410885880da',
version='4.0-preview.1')
return self._deserialize('AccessTokenResult', response)
def get_tenant(self, tenant_id):
"""GetTenant.
[Preview API]
:param str tenant_id:
:rtype: :class:`<TenantInfo> <identity.v4_0.models.TenantInfo>`
"""
route_values = {}
if tenant_id is not None:
route_values['tenantId'] = self._serialize.url('tenant_id', tenant_id, 'str')
response = self._send(http_method='GET',
location_id='5f0a1723-2e2c-4c31-8cae-002d01bdd592',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('TenantInfo', response)
|
the-stack_0_12476 |
import os
import zipfile
from datetime import datetime
import numpy as np
import spacy
import twokenize
from nlplingo.common.utils import IntPair
from nlplingo.sandbox.misc.train_test import generate_argument_data_feature
from nlplingo.sandbox.misc.train_test import generate_trigger_data_feature
from nlplingo.sandbox.misc.train_test import get_predicted_positive_triggers
from nlplingo.text.text_span import EntityMention
from nlplingo.text.text_theory import Document
# from pyspark import SparkContext, SparkConf
# from ctypes import *
global spacy_en
global tagger_blog
global tagger_tweet
global tagger_news
global tagger_dw
#sys.path.append('/nfs/mercury-04/u40/ychan/spark/ner/crfsuite')
#sc.addPyFile('/nfs/mercury-04/u40/ychan/spark/ner/crfsuite/crfsuite.py')
#cdll.LoadLibrary('/nfs/mercury-04/u40/ychan/spark/ner/crfsuite/libcrfsuite-0.12.so')
#import crfsuite
class Token(object):
"""An individual word token.
"""
# idx : starting char offset
def __init__(self, text, idx, pos_tag=None):
self.text = text
self.idx = idx
self.tag_ = pos_tag
class Decoder(object):
#sys.path.append('/nfs/mercury-04/u40/ychan/spark/ner/crfsuite')
#cdll.LoadLibrary('/nfs/mercury-04/u40/ychan/spark/ner/crfsuite/libcrfsuite-0.12.so')
#import crfsuite
# python_path: /nfs/mercury-04/u40/ychan/spark/ner/crfsuite
# libcrfsuite_so: /nfs/mercury-04/u40/ychan/spark/ner/crfsuite/libcrfsuite-0.12.so
# model_file: /nfs/mercury-04/u40/ychan/ner/model/twitter.cv1.model
def __init__(self, params):
#sys.path.append(python_path)
#for library in libcrfsuite_so_libs:
# cdll.LoadLibrary(library)
#import crfsuite as crfsuite
#self.crfsuite = crfsuite
import pycrfsuite as pycrfsuite
self.pycrfsuite = pycrfsuite
self.model_blog = params['crf_models']['blog']
self.model_tweet = params['crf_models']['tweet']
self.model_news = params['crf_models']['news']
self.model_dw = params['crf_models']['dw']
if 'resources.zip' in params:
if os.path.isfile(params['resources.zip']) and not os.path.isdir(params['crf_models']['dir']):
zip_ref = zipfile.ZipFile(params['resources.zip'], 'r')
zip_ref.extractall()
zip_ref.close()
def instances(self, fi):
xseq = []
for line in fi:
fields = line.split('\t')
item = {}
for field in fields[1:]:
sfield = field.encode('ascii', 'replace')
p = sfield.rfind(':')
if p == -1:
# Unweighted (weight=1) attribute.
item[sfield] = 1.0
elif (p+1) >= len(sfield):
item[sfield] = 1.0
else:
try:
weight = float(sfield[p+1:])
item[sfield[:p]] = weight
except ValueError:
item[sfield] = 1.0
xseq.append(item)
return self.pycrfsuite.ItemSequence(xseq)
#def instances(self, fi):
# xseq = self.crfsuite.ItemSequence()
#
# for line in fi:
# # Split the line with TAB characters.
# fields = line.split('\t')
# item = self.crfsuite.Item()
# for field in fields[1:]:
# #print('field %s' % (field))
# sfield = field.encode('ascii','replace')
# #print('sfield %s' % (sfield))
# p = sfield.rfind(':')
# if p == -1:
# # Unweighted (weight=1) attribute.
# #print('field:{} type(field):{}'.format(field, type(field)))
# #print(type(field))
# #field_string = field.encode('ascii','replace')
# #item.append(self.crfsuite.Attribute(field_string))
# item.append(self.crfsuite.Attribute(sfield))
# elif (p+1) >= len(sfield):
# item.append(self.crfsuite.Attribute(sfield))
# else:
# try:
# weight = float(sfield[p+1:])
# item.append(self.crfsuite.Attribute(sfield[:p], weight))
# except ValueError:
# item.append(self.crfsuite.Attribute(sfield))
# #print field
# # Weighted attribute
# #item.append(self.crfsuite.Attribute(sfield[:p], float(sfield[p+1:])))
# # Append the item to the item sequence.
# xseq.append(item)
#
# return xseq
# Blog , Conference , SocialMediaPosting
def get_content_tagger(self, xseq, content_type):
global tagger_blog
global tagger_tweet
global tagger_news
global tagger_dw
if content_type == 'Blog':
try:
tagger_blog.set(xseq)
except:
tagger_blog = self.pycrfsuite.Tagger()
tagger_blog.open(self.model_blog)
print('**** Loaded blog NER model %s' % (self.model_blog))
tagger_blog.set(xseq)
return tagger_blog
elif content_type == 'SocialMediaPosting':
try:
tagger_tweet.set(xseq)
except:
tagger_tweet = self.pycrfsuite.Tagger()
tagger_tweet.open(self.model_tweet)
print('**** Loaded tweet NER model %s' % (self.model_tweet))
tagger_tweet.set(xseq)
return tagger_tweet
elif content_type == 'NewsArticle':
try:
tagger_news.set(xseq)
except:
tagger_news = self.pycrfsuite.Tagger()
tagger_news.open(self.model_news)
print('**** Loaded news NER model %s' % (self.model_news))
tagger_news.set(xseq)
return tagger_news
elif content_type == 'Post':
try:
tagger_dw.set(xseq)
except:
tagger_dw = self.pycrfsuite.Tagger()
tagger_dw.open(self.model_dw)
print('**** Loaded dw NER model %s' % (self.model_dw))
tagger_dw.set(xseq)
return tagger_dw
def tag_seq(self, xseq, content_type):
tagger = self.get_content_tagger(xseq, content_type)
return tagger.tag()
def collect_predictions(content, predictions, char_offsets):
ret = []
i = 0
while i < len(predictions):
p = predictions[i]
if p.startswith('B-'):
label = p[2:]
(start, end) = char_offsets[i]
while (i+1) < len(predictions) and predictions[i+1] == 'I-'+label:
i += 1
end = char_offsets[i][1]
# these are when we mix in ACE and Blog annotations. ACE tags 'ORG', Blog tags 'ORGANIZATION'
if label == 'ORG':
label = 'ORGANIZATION'
if label == 'PER':
label = 'PERSON'
d = {}
d['start'] = start
d['end'] = end
d['label'] = label
d['text'] = content[start:end]
d['extractor'] = 'nlplingo.ner'
ret.append(d)
i += 1
return ret
# A line could be a paragraph consisting of multiple sentences.
# We will get the correct definition of sentences according to whether this is blog, tweet, etc.
def get_sentences(line, content_type):
global spacy_en
if content_type == 'SocialMediaPosting':
sentences = []
start_offset = 0
sent = []
for token in twokenize.tokenize(line[:-1]):
idx = line.index(token, start_offset)
sent.append(Token(token, idx))
start_offset = idx + len(token)
sentences.append(sent)
return sentences
elif content_type == 'Blog' or content_type == 'NewsArticle' or content_type == 'Post':
try:
spacy_doc = spacy_en(line)
except:
spacy_en = spacy.load('en')
print('**** Loaded spacy en')
spacy_doc = spacy_en(line)
return spacy_doc.sents
def decode_sentence(ner_fea, dec, content, sent, offset, content_type):
"""
:type ner_fea: ner.ner_feature.NerFeature
:type dec: ner.decoder.Decoder
:type content: str
:type offset: int
:type content_type: str
sent: spacy sentence
Returns:
list[dict()]
content_type: 'Blog' , 'SocialMediaPosting' , 'NewsArticle' (will decide which NER feature set to use)
"""
tokens = [t for t in sent if len(t.text) > 0]
# a list, 1 element for each word in line
# each element is a tab separate features, except the 1st element which is a dummy label
word_feas = line_to_features(ner_fea, tokens, content_type) # content_type decides which NER feature set to use
word_seq = dec.instances(word_feas) # of type pycrfsuite.ItemSequence
predictions = dec.tag_seq(word_seq, content_type) # content_type decides which NER model to load
char_offsets = []
for token in tokens:
start = token.idx + offset
end = start + len(token.text)
char_offsets.append((start, end))
assert (len(char_offsets) == len(predictions)), 'len(char_offsets) should match len(predictions)'
# returns a dict with keys: start, end, label, text, extractor
return collect_predictions(content, predictions, char_offsets)
def find(element, json):
x = reduce(lambda d, key: d.get(key, {}), element.split("."), json)
if any(x) is True:
return x
return None
# line : a json string
def line_to_predictions(ner_fea, dec, json_eg, attr, content_type, word_embeddings, trigger_generator, trigger_model, arg_generator, argument_model, event_domain):
"""
:type word_embeddings: embeddings.word_embeddings.WordEmbedding
:type trigger_generator: tasks.event_trigger.EventTriggerExampleGenerator
:type trigger_model: model.event_cnn.ExtractionModel
:type arg_generator: tasks.event_argument.EventArgumentExampleGenerator
:type trigger_model: model.event_cnn.ExtractionModel
"""
global spacy_en
content = find(attr, json_eg) # json_eg.get(attr)
print(content_type.encode('ascii', 'ignore'))
print(content.encode('ascii', 'ignore'))
offset = 0
all_predictions = []
if content is not None:
if type(content) is list:
content = '\n'.join(content)
for line in content.split('\n'):
#print(offset)
#print('[' + content_type.encode('ascii', 'ignore') + ']')
#print('[' + line.encode('ascii', 'ignore') + ']')
d['line'] = line
all_predictions.append(d)
doc_ner_predictions = []
sentences = get_sentences(line, content_type)
if sentences is not None:
for sent in sentences:
sent_predictions = decode_sentence(ner_fea, dec, content, sent, offset, content_type)
doc_ner_predictions.extend(sent_predictions)
all_predictions.extend(sent_predictions)
if content_type == 'Blog':
print('*** content_type == Blog ***')
print(line.encode('ascii', 'ignore'))
doc = Document('dummy', line)
for i, p in enumerate(doc_ner_predictions):
id = 'em-{}'.format(i)
# we need to minus 'offset', because we are splitting the original 'content' into several 'line(s)'
# then we pass each 'line' to make a Document object. But p[start], p[end] are with respect to the
# 'content', so you need to minus 'offset' in order to make the 2 sets of offsets match
doc.add_entity_mention(EntityMention(id, IntPair(int(p['start'])-offset, int(p['end'])-offset), p['text'], p['label']))
doc.annotate_sentences(word_embeddings, spacy_en)
print('added {} NER'.format(len(doc_ner_predictions)))
(trigger_examples, trigger_data, trigger_data_list, trigger_label) = generate_trigger_data_feature(trigger_generator, [doc])
print('Generated {} trigger_examples, at {}'.format(len(trigger_examples), datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
if len(trigger_examples) > 0:
trigger_predictions = trigger_model.predict(trigger_data_list)
predicted_positive_triggers_map = get_predicted_positive_triggers(trigger_predictions, trigger_examples, event_domain.get_event_type_index('None'), event_domain)
# the above is organized by docid, let's now expand to get the actual eventtrigger examples
predicted_positive_triggers = []
""":type list[nlplingo.tasks.event_trigger.EventTriggerExample]"""
for docid in predicted_positive_triggers_map.keys():
predicted_positive_triggers.extend(predicted_positive_triggers_map[docid])
print('Predicted {} positive triggers, at {}'.format(len(predicted_positive_triggers), datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
for trigger_eg in predicted_positive_triggers:
print('trigger_eg %s (%s,%s) %s' % (trigger_eg.token.text, str(trigger_eg.token.start_char_offset()), str(trigger_eg.token.end_char_offset()), trigger_eg.event_type))
if len(predicted_positive_triggers) > 0:
if argument_model is None:
for eg in predicted_positive_triggers:
d = {}
d['docid'] = eg.sentence.docid
d['start'] = eg.anchor.start_char_offset()
d['end'] = eg.anchor.end_char_offset()
d['text'] = eg.anchor.text
all_predictions.append(d)
else:
# generate arguments with predicted triggers
(arg_examples_pt, arg_data_pt, arg_data_list_pt, arg_label_pt) = generate_argument_data_feature(arg_generator, [doc], params=None, predicted_triggers=predicted_positive_triggers_map)
#print('formed {} tasks eventargument examples'.format(len(arg_examples_pt)))
if len(arg_examples_pt) > 0:
# decode arguments with predicted triggers
argument_predictions_pt = argument_model.predict(arg_data_list_pt)
pred_arg_max = np.argmax(argument_predictions_pt, axis=1)
#predicted_events = defaultdict(list) # to collate by anchor
for i, predicted_label in enumerate(pred_arg_max):
if predicted_label != event_domain.get_event_role_index('None'):
eg = arg_examples_pt[i]
""":type: tasks.event_argument.EventArgumentExample"""
predicted_role = event_domain.get_event_role_from_index(predicted_label)
# print('{} || {} || {}'.format(predicted_role, eg.anchor.to_string(), eg.eventargument.to_string()))
#predicted_events[eg.anchor].append(EventArgument('dummy', eg.eventargument, predicted_role))
#print('argument_eg %s (%s,%s) %s' % (eg.eventargument.text, str(eg.eventargument.start_char_offset()), str(eg.eventargument.end_char_offset()), '{}.{}'.format(eg.anchor.label, predicted_role)))
d = {}
d['start'] = eg.argument.start_char_offset() + offset
d['end'] = eg.argument.end_char_offset() + offset
d['label'] = '{}.{}'.format(eg.anchor.label, predicted_role)
d['text'] = eg.argument.text
d['extractor'] = 'nlplingo.network'
all_predictions.append(d)
offset += len(line) + 1 # +1 to account for newline
# a list of dict, one for each predicted NE mention
if len(all_predictions) > 0:
if not "extractions" in json_eg:
json_eg["extractions"] = {}
json_eg['extractions'][attr] = all_predictions
return json_eg
# for each word in sent, return: label \tab (\tab separated list of features). If a feature is weighted, it will be like (.*):weight
def line_to_features(ner_fea, sent, content_type):
d = ('', '', '')
seq = [d, d]
for token in sent:
#start = token.idx
#print(token.text.encode('ascii', 'ignore'))
pos_tag = 'NN' if token.tag_ is None else token.tag_
seq.append((ner_fea.encode(token.text), pos_tag, 'DUMMY-tag'))
seq.append(d)
seq.append(d)
return ner_fea.extract_features(seq, content_type)
|
the-stack_0_12477 | from typing import List, Optional
import attr
from casexml.apps.case.xform import extract_case_blocks
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.motech.value_source import CaseTriggerInfo
@attr.s
class RepeaterResponse:
"""
Ducktypes an HTTP response for Repeater.handle_response(),
RepeatRecord.handle_success() and RepeatRecord.handle_failure()
"""
status_code = attr.ib()
reason = attr.ib()
text = attr.ib(default="")
retry = attr.ib(default=True)
def get_relevant_case_updates_from_form_json(
domain: str,
form_json: dict,
case_types: list,
extra_fields: list,
form_question_values: Optional[dict] = None,
) -> List[CaseTriggerInfo]:
result = []
case_blocks = extract_case_blocks(form_json)
case_ids = [case_block['@case_id'] for case_block in case_blocks]
cases = CaseAccessors(domain).get_cases(case_ids, ordered=True)
db_case_dict = {case.case_id: case for case in cases}
for case_block in case_blocks:
case = db_case_dict[case_block['@case_id']]
if case_types and case.type not in case_types:
continue
case_create = case_block.get('create') or {}
case_update = case_block.get('update') or {}
result.append(CaseTriggerInfo(
domain=domain,
case_id=case_block['@case_id'],
type=case.type,
name=case.name,
owner_id=case.owner_id,
modified_by=case.modified_by,
updates={**case_create, **case_update},
created='create' in case_block,
closed='close' in case_block,
extra_fields={f: case.get_case_property(f) for f in extra_fields},
form_question_values=form_question_values or {},
))
return result
|
the-stack_0_12480 | #!/usr/bin/env python
import unittest
import warnings
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.groups import PointGroup, SpaceGroup, _get_symm_data
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "4/10/14"
class PointGroupTest(unittest.TestCase):
def test_order(self):
order = {"mmm": 8, "432": 24, "-6m2": 12}
for k, v in order.items():
pg = PointGroup(k)
self.assertEqual(order[k], len(pg.symmetry_ops))
def test_get_orbit(self):
pg = PointGroup("mmm")
self.assertEqual(len(pg.get_orbit([0.1, 0.1, 0.1])), 8)
self.assertEqual(len(pg.get_orbit([0, 0, 0.1])), 2)
self.assertEqual(len(pg.get_orbit([1.2, 1.2, 1])), 8)
def test_is_sub_super_group(self):
with warnings.catch_warnings() as w:
warnings.simplefilter("ignore")
pgmmm = PointGroup("mmm")
pgmm2 = PointGroup("mm2")
pg222 = PointGroup("222")
pg4 = PointGroup("4")
self.assertTrue(pgmmm.is_supergroup(pgmm2))
self.assertTrue(pgmm2.is_subgroup(pgmmm))
self.assertTrue(pgmmm.is_supergroup(pg222))
self.assertFalse(pgmmm.is_supergroup(pg4))
pgm3m = PointGroup("m-3m")
pg6mmm = PointGroup("6/mmm")
pg3m = PointGroup("-3m")
# TODO: Fix the test below.
# self.assertTrue(pg3m.is_subgroup(pgm3m))
self.assertTrue(pg3m.is_subgroup(pg6mmm))
self.assertFalse(pgm3m.is_supergroup(pg6mmm))
class SpaceGroupTest(unittest.TestCase):
def test_renamed_e_symbols(self):
sg = SpaceGroup.from_int_number(64)
assert sg.symbol == "Cmce"
for sym, num in (
("Aem2", 39),
("Aea2", 41),
("Cmce", 64),
("Cmme", 67),
("Ccce", 68),
):
assert SpaceGroup(sym).int_number == num
def test_abbrev_symbols(self):
sg = SpaceGroup("P2/c")
self.assertEqual(sg.int_number, 13)
sg = SpaceGroup("R-3mH")
self.assertEqual(sg.int_number, 166)
def test_attr(self):
sg = SpaceGroup("Fm-3m")
self.assertEqual(sg.full_symbol, "F4/m-32/m")
self.assertEqual(sg.point_group, "m-3m")
def test_point_group_is_set(self):
for i in range(1, 231):
sg = SpaceGroup.from_int_number(i)
self.assertTrue(hasattr(sg, "point_group"))
for symbol in _get_symm_data("space_group_encoding"):
sg = SpaceGroup(symbol)
self.assertTrue(hasattr(sg, "point_group"))
def test_full_symbols(self):
sg = SpaceGroup("P2/m2/m2/m")
self.assertEqual(sg.symbol, "Pmmm")
def test_order_symm_ops(self):
for name in SpaceGroup.SG_SYMBOLS:
sg = SpaceGroup(name)
self.assertEqual(len(sg.symmetry_ops), sg.order)
def test_get_settings(self):
self.assertEqual({"Fm-3m(a-1/4,b-1/4,c-1/4)", "Fm-3m"}, SpaceGroup.get_settings("Fm-3m"))
self.assertEqual(
{
"Pmmn",
"Pmnm:1",
"Pnmm:2",
"Pmnm:2",
"Pnmm",
"Pnmm:1",
"Pmmn:1",
"Pmnm",
"Pmmn:2",
},
SpaceGroup.get_settings("Pmmn"),
)
self.assertEqual(
{"Pnmb", "Pman", "Pncm", "Pmna", "Pcnm", "Pbmn"},
SpaceGroup.get_settings("Pmna"),
)
def test_crystal_system(self):
sg = SpaceGroup("R-3c")
self.assertEqual(sg.crystal_system, "trigonal")
sg = SpaceGroup("R-3cH")
self.assertEqual(sg.crystal_system, "trigonal")
def test_get_orbit(self):
sg = SpaceGroup("Fm-3m")
p = np.random.randint(0, 100 + 1, size=(3,)) / 100
self.assertLessEqual(len(sg.get_orbit(p)), sg.order)
def test_is_compatible(self):
cubic = Lattice.cubic(1)
hexagonal = Lattice.hexagonal(1, 2)
rhom = Lattice.rhombohedral(3, 80)
tet = Lattice.tetragonal(1, 2)
ortho = Lattice.orthorhombic(1, 2, 3)
sg = SpaceGroup("Fm-3m")
self.assertTrue(sg.is_compatible(cubic))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3m:H")
self.assertFalse(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3m:R")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("Pnma")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P12/c1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P-1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertTrue(sg.is_compatible(rhom))
self.assertTrue(sg.is_compatible(hexagonal))
sg = SpaceGroup("Pmmn:2")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup.from_int_number(165)
self.assertFalse(sg.is_compatible(cubic))
self.assertFalse(sg.is_compatible(tet))
self.assertFalse(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertTrue(sg.is_compatible(hexagonal))
def test_symmops(self):
sg = SpaceGroup("Pnma")
op = SymmOp.from_rotation_and_translation([[1, 0, 0], [0, -1, 0], [0, 0, -1]], [0.5, 0.5, 0.5])
self.assertIn(op, sg.symmetry_ops)
def test_other_settings(self):
sg = SpaceGroup("Pbnm")
self.assertEqual(sg.int_number, 62)
self.assertEqual(sg.order, 8)
self.assertRaises(ValueError, SpaceGroup, "hello")
def test_subgroup_supergroup(self):
with warnings.catch_warnings() as w:
warnings.simplefilter("ignore")
self.assertTrue(SpaceGroup("Pma2").is_subgroup(SpaceGroup("Pccm")))
self.assertFalse(SpaceGroup.from_int_number(229).is_subgroup(SpaceGroup.from_int_number(230)))
def test_hexagonal(self):
sgs = [146, 148, 155, 160, 161, 166, 167]
for sg in sgs:
s = SpaceGroup.from_int_number(sg, hexagonal=False)
self.assertTrue(not s.symbol.endswith("H"))
def test_string(self):
sg = SpaceGroup("R-3c")
self.assertEqual(sg.to_latex_string(), "R$\overline{3}$cH")
sg = SpaceGroup("P6/mmm")
self.assertEqual(sg.to_latex_string(), "P6/mmm")
sg = SpaceGroup("P4_1")
self.assertEqual(sg.to_unicode_string(), "P4₁")
if __name__ == "__main__":
unittest.main()
|
the-stack_0_12481 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ast
import pkg_resources
import threading
import argparse
import logging
import signal
import errno
import json
import stat
import os
import shutil
from contextlib import contextmanager
from uuid import uuid4
from yaml import safe_load
from ansible_runner import run
from ansible_runner import output
from ansible_runner.utils import dump_artifact, Bunch
from ansible_runner.runner import Runner
from ansible_runner.exceptions import AnsibleRunnerException
VERSION = pkg_resources.require("ansible_runner")[0].version
DEFAULT_ROLES_PATH = os.getenv('ANSIBLE_ROLES_PATH', None)
DEFAULT_RUNNER_BINARY = os.getenv('RUNNER_BINARY', None)
DEFAULT_RUNNER_PLAYBOOK = os.getenv('RUNNER_PLAYBOOK', None)
DEFAULT_RUNNER_ROLE = os.getenv('RUNNER_ROLE', None)
DEFAULT_RUNNER_MODULE = os.getenv('RUNNER_MODULE', None)
logger = logging.getLogger('ansible-runner')
@contextmanager
def role_manager(args):
if args.role:
role = {'name': args.role}
if args.role_vars:
role_vars = {}
for item in args.role_vars.split():
key, value = item.split('=')
try:
role_vars[key] = ast.literal_eval(value)
except Exception:
role_vars[key] = value
role['vars'] = role_vars
kwargs = Bunch(**args.__dict__)
kwargs.update(private_data_dir=args.private_data_dir,
json_mode=args.json,
ignore_logging=False,
rotate_artifacts=args.rotate_artifacts)
if args.artifact_dir:
kwargs.artifact_dir = args.artifact_dir
project_path = os.path.join(args.private_data_dir, 'project')
project_exists = os.path.exists(project_path)
env_path = os.path.join(args.private_data_dir, 'env')
env_exists = os.path.exists(env_path)
envvars_path = os.path.join(args.private_data_dir, 'env/envvars')
envvars_exists = os.path.exists(envvars_path)
if args.cmdline:
kwargs.cmdline = args.cmdline
playbook = None
tmpvars = None
play = [{'hosts': args.hosts if args.hosts is not None else "all",
'gather_facts': not args.role_skip_facts,
'roles': [role]}]
filename = str(uuid4().hex)
playbook = dump_artifact(json.dumps(play), project_path, filename)
kwargs.playbook = playbook
output.debug('using playbook file %s' % playbook)
if args.inventory:
inventory_file = os.path.join(args.private_data_dir, 'inventory', args.inventory)
if not os.path.exists(inventory_file):
raise AnsibleRunnerException('location specified by --inventory does not exist')
kwargs.inventory = inventory_file
output.debug('using inventory file %s' % inventory_file)
roles_path = args.roles_path or os.path.join(args.private_data_dir, 'roles')
roles_path = os.path.abspath(roles_path)
output.debug('setting ANSIBLE_ROLES_PATH to %s' % roles_path)
envvars = {}
if envvars_exists:
with open(envvars_path, 'rb') as f:
tmpvars = f.read()
new_envvars = safe_load(tmpvars)
if new_envvars:
envvars = new_envvars
envvars['ANSIBLE_ROLES_PATH'] = roles_path
kwargs.envvars = envvars
else:
kwargs = args
yield kwargs
if args.role:
if not project_exists and os.path.exists(project_path):
logger.debug('removing dynamically generated project folder')
shutil.rmtree(project_path)
elif playbook and os.path.isfile(playbook):
logger.debug('removing dynamically generated playbook')
os.remove(playbook)
# if a previous envvars existed in the private_data_dir,
# restore the original file contents
if tmpvars:
with open(envvars_path, 'wb') as f:
f.write(tmpvars)
elif not envvars_exists and os.path.exists(envvars_path):
logger.debug('removing dynamically generated envvars folder')
os.remove(envvars_path)
# since ansible-runner created the env folder, remove it
if not env_exists and os.path.exists(env_path):
logger.debug('removing dynamically generated env folder')
shutil.rmtree(env_path)
def main(sys_args=None):
parser = argparse.ArgumentParser(description='manage ansible execution')
parser.add_argument('--version', action='version', version=VERSION)
parser.add_argument('command', choices=['run', 'start', 'stop', 'is-alive'])
parser.add_argument('private_data_dir',
help='Base directory containing Runner metadata (project, inventory, etc')
group = parser.add_mutually_exclusive_group()
group.add_argument("-m", "--module", default=DEFAULT_RUNNER_MODULE,
help="Invoke an Ansible module directly without a playbook")
group.add_argument("-p", "--playbook", default=DEFAULT_RUNNER_PLAYBOOK,
help="The name of the playbook to execute")
group.add_argument("-r", "--role", default=DEFAULT_RUNNER_ROLE,
help="Invoke an Ansible role directly without a playbook")
parser.add_argument("-b", "--binary", default=DEFAULT_RUNNER_BINARY,
help="The full path to ansible[-playbook] binary")
parser.add_argument("--hosts",
help="Define the set of hosts to execute against")
parser.add_argument("-i", "--ident",
default=uuid4(),
help="An identifier that will be used when generating the"
"artifacts directory and can be used to uniquely identify a playbook run")
parser.add_argument("--rotate-artifacts",
default=0,
type=int,
help="Automatically clean up old artifact directories after a given number has been created, the default is 0 which disables rotation")
parser.add_argument("--roles-path", default=DEFAULT_ROLES_PATH,
help="Path to the Ansible roles directory")
parser.add_argument("--role-vars",
help="Variables to pass to the role at runtime")
parser.add_argument("--role-skip-facts", action="store_true", default=False,
help="Disable fact collection when executing a role directly")
parser.add_argument("--artifact-dir",
help="Optional Path for the artifact root directory, by default it is located inside the private data dir")
parser.add_argument("--inventory",
help="Override the default inventory location in private_data_dir")
parser.add_argument("-j", "--json", action="store_true",
help="Output the json event structure to stdout instead of Ansible output")
parser.add_argument("-v", action="count",
help="Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output")
parser.add_argument("-q", "--quiet", action="store_true",
help="Disable all output")
parser.add_argument("--cmdline",
help="Command line options to pass to ansible-playbook at execution time")
parser.add_argument("--debug", action="store_true",
help="Enable Runner debug output logging")
parser.add_argument("--logfile",
help="Log output messages to a file")
parser.add_argument("-a", "--args", dest='module_args',
help="Module arguments")
parser.add_argument("--process-isolation", dest='process_isolation', action="store_true",
help="Limits what directories on the filesystem the playbook run has access to, defaults to /tmp")
parser.add_argument("--process-isolation-executable", dest='process_isolation_executable', default="bwrap",
help="Process isolation executable that will be used. Defaults to bwrap")
parser.add_argument("--process-isolation-path", dest='process_isolation_path', default="/tmp",
help="Path that an isolated playbook run will use for staging. Defaults to /tmp")
parser.add_argument("--process-isolation-hide-paths", dest='process_isolation_hide_paths',
help="List of paths on the system that should be hidden from the playbook run")
parser.add_argument("--process-isolation-show-paths", dest='process_isolation_show_paths',
help="List of paths on the system that should be exposed to the playbook run")
parser.add_argument("--process-isolation-ro-paths", dest='process_isolation_ro_paths',
help="List of paths on the system that should be exposed to the playbook run as read-only")
args = parser.parse_args(sys_args)
output.configure()
# enable or disable debug mode
output.set_debug('enable' if args.debug else 'disable')
# set the output logfile
if args.logfile:
output.set_logfile(args.logfile)
output.debug('starting debug logging')
# get the absolute path for start since it is a daemon
args.private_data_dir = os.path.abspath(args.private_data_dir)
pidfile = os.path.join(args.private_data_dir, 'pid')
try:
os.makedirs(args.private_data_dir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
pass
else:
raise
if args.command != 'run':
stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
if not os.path.exists(stderr_path):
os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
stderr = open(stderr_path, 'w+')
if args.command in ('start', 'run'):
if args.command == 'start':
import daemon
from daemon.pidfile import TimeoutPIDLockFile
context = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pidfile),
stderr=stderr
)
else:
context = threading.Lock()
with context:
with role_manager(args) as args:
if args.inventory:
with open(args.inventory) as f:
inventory_data = f.read()
else:
inventory_data = None
run_options = dict(private_data_dir=args.private_data_dir,
ident=args.ident,
binary=args.binary,
playbook=args.playbook,
module=args.module,
module_args=args.module_args,
host_pattern=args.hosts,
verbosity=args.v,
quiet=args.quiet,
rotate_artifacts=args.rotate_artifacts,
ignore_logging=False,
json_mode=args.json,
inventory=inventory_data,
roles_path=[args.roles_path] if args.roles_path else None,
process_isolation=args.process_isolation,
process_isolation_executable=args.process_isolation_executable,
process_isolation_path=args.process_isolation_path,
process_isolation_hide_paths=args.process_isolation_hide_paths,
process_isolation_show_paths=args.process_isolation_show_paths,
process_isolation_ro_paths=args.process_isolation_ro_paths)
if args.cmdline:
run_options['cmdline'] = args.cmdline
res = run(**run_options)
return(res.rc)
try:
with open(pidfile, 'r') as f:
pid = int(f.readline())
except IOError:
return(1)
if args.command == 'stop':
Runner.handle_termination(pid)
return (0)
elif args.command == 'is-alive':
try:
os.kill(pid, signal.SIG_DFL)
return(0)
except OSError:
return(1)
|
the-stack_0_12482 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
import decimal
import json
import logging
import os
import pickle
import time
from datetime import date, datetime
from typing import TYPE_CHECKING, List, NamedTuple
import mock
import pytest
import pytz
import snowflake.connector
from snowflake.connector import (
DictCursor,
InterfaceError,
NotSupportedError,
ProgrammingError,
constants,
errorcode,
errors,
)
from snowflake.connector.compat import BASE_EXCEPTION_CLASS, IS_WINDOWS
from snowflake.connector.cursor import SnowflakeCursor
try:
from snowflake.connector.cursor import ResultMetadata
except ImportError:
class ResultMetadata(NamedTuple):
name: str
type_code: int
display_size: int
internal_size: int
precision: int
scale: int
is_nullable: bool
from snowflake.connector.errorcode import (
ER_FAILED_TO_REWRITE_MULTI_ROW_INSERT,
ER_INVALID_VALUE,
ER_NOT_POSITIVE_SIZE,
)
from snowflake.connector.sqlstate import SQLSTATE_FEATURE_NOT_SUPPORTED
from snowflake.connector.telemetry import TelemetryField
from ..randomize import random_string
try:
from snowflake.connector.constants import (
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT,
)
from snowflake.connector.errorcode import (
ER_NO_ARROW_RESULT,
ER_NO_PYARROW,
ER_NO_PYARROW_SNOWSQL,
)
from snowflake.connector.result_batch import ArrowResultBatch, JSONResultBatch
except ImportError:
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT = None
ER_NO_ARROW_RESULT = None
ER_NO_PYARROW = None
ER_NO_PYARROW_SNOWSQL = None
ArrowResultBatch = JSONResultBatch = None
if TYPE_CHECKING: # pragma: no cover
from snowflake.connector.result_batch import ResultBatch
def _drop_warehouse(conn, db_parameters):
conn.cursor().execute(
"drop warehouse if exists {}".format(db_parameters["name_wh"])
)
@pytest.fixture()
def conn(request, conn_cnx, db_parameters):
def fin():
with conn_cnx() as cnx:
cnx.cursor().execute(
"use {db}.{schema}".format(
db=db_parameters["database"], schema=db_parameters["schema"]
)
)
cnx.cursor().execute("drop table {name}".format(name=db_parameters["name"]))
request.addfinalizer(fin)
with conn_cnx() as cnx:
cnx.cursor().execute(
"""
create table {name} (
aa int,
dt date,
tm time,
ts timestamp,
tsltz timestamp_ltz,
tsntz timestamp_ntz,
tstz timestamp_tz,
pct float,
ratio number(5,2),
b binary)
""".format(
name=db_parameters["name"]
)
)
return conn_cnx
def _check_results(cursor, results):
assert cursor.sfqid, "Snowflake query id is None"
assert cursor.rowcount == 3, "the number of records"
assert results[0] == 65432, "the first result was wrong"
assert results[1] == 98765, "the second result was wrong"
assert results[2] == 123456, "the third result was wrong"
def test_insert_select(conn, db_parameters):
"""Inserts and selects integer data."""
with conn() as cnx:
c = cnx.cursor()
try:
c.execute(
"insert into {name}(aa) values(123456),"
"(98765),(65432)".format(name=db_parameters["name"])
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 3, "wrong number of records were inserted"
assert c.rowcount == 3, "wrong number of records were inserted"
finally:
c.close()
try:
c = cnx.cursor()
c.execute(
"select aa from {name} order by aa".format(name=db_parameters["name"])
)
results = []
for rec in c:
results.append(rec[0])
_check_results(c, results)
finally:
c.close()
with cnx.cursor(snowflake.connector.DictCursor) as c:
c.execute(
"select aa from {name} order by aa".format(name=db_parameters["name"])
)
results = []
for rec in c:
results.append(rec["AA"])
_check_results(c, results)
def test_insert_and_select_by_separate_connection(conn, db_parameters):
"""Inserts a record and select it by a separate connection."""
with conn() as cnx:
result = cnx.cursor().execute(
"insert into {name}(aa) values({value})".format(
name=db_parameters["name"], value="1234"
)
)
cnt = 0
for rec in result:
cnt += int(rec[0])
assert cnt == 1, "wrong number of records were inserted"
assert result.rowcount == 1, "wrong number of records were inserted"
cnx2 = snowflake.connector.connect(
user=db_parameters["user"],
password=db_parameters["password"],
host=db_parameters["host"],
port=db_parameters["port"],
account=db_parameters["account"],
database=db_parameters["database"],
schema=db_parameters["schema"],
protocol=db_parameters["protocol"],
timezone="UTC",
)
try:
c = cnx2.cursor()
c.execute("select aa from {name}".format(name=db_parameters["name"]))
results = []
for rec in c:
results.append(rec[0])
c.close()
assert results[0] == 1234, "the first result was wrong"
assert result.rowcount == 1, "wrong number of records were selected"
finally:
cnx2.close()
def _total_milliseconds_from_timedelta(td):
"""Returns the total number of milliseconds contained in the duration object."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) // 10 ** 3
def _total_seconds_from_timedelta(td):
"""Returns the total number of seconds contained in the duration object."""
return _total_milliseconds_from_timedelta(td) // 10 ** 3
def test_insert_timestamp_select(conn, db_parameters):
"""Inserts and gets timestamp, timestamp with tz, date, and time.
Notes:
Currently the session parameter TIMEZONE is ignored.
"""
PST_TZ = "America/Los_Angeles"
JST_TZ = "Asia/Tokyo"
current_timestamp = datetime.utcnow()
current_timestamp = current_timestamp.replace(tzinfo=pytz.timezone(PST_TZ))
current_date = current_timestamp.date()
current_time = current_timestamp.time()
other_timestamp = current_timestamp.replace(tzinfo=pytz.timezone(JST_TZ))
with conn() as cnx:
cnx.cursor().execute("alter session set TIMEZONE=%s", (PST_TZ,))
c = cnx.cursor()
try:
fmt = (
"insert into {name}(aa, tsltz, tstz, tsntz, dt, tm) "
"values(%(value)s,%(tsltz)s, %(tstz)s, %(tsntz)s, "
"%(dt)s, %(tm)s)"
)
c.execute(
fmt.format(name=db_parameters["name"]),
{
"value": 1234,
"tsltz": current_timestamp,
"tstz": other_timestamp,
"tsntz": current_timestamp,
"dt": current_date,
"tm": current_time,
},
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 1, "wrong number of records were inserted"
assert c.rowcount == 1, "wrong number of records were selected"
finally:
c.close()
cnx2 = snowflake.connector.connect(
user=db_parameters["user"],
password=db_parameters["password"],
host=db_parameters["host"],
port=db_parameters["port"],
account=db_parameters["account"],
database=db_parameters["database"],
schema=db_parameters["schema"],
protocol=db_parameters["protocol"],
timezone="UTC",
)
try:
c = cnx2.cursor()
c.execute(
"select aa, tsltz, tstz, tsntz, dt, tm from {name}".format(
name=db_parameters["name"]
)
)
result_numeric_value = []
result_timestamp_value = []
result_other_timestamp_value = []
result_ntz_timestamp_value = []
result_date_value = []
result_time_value = []
for (aa, ts, tstz, tsntz, dt, tm) in c:
result_numeric_value.append(aa)
result_timestamp_value.append(ts)
result_other_timestamp_value.append(tstz)
result_ntz_timestamp_value.append(tsntz)
result_date_value.append(dt)
result_time_value.append(tm)
c.close()
assert result_numeric_value[0] == 1234, "the integer result was wrong"
td_diff = _total_milliseconds_from_timedelta(
current_timestamp - result_timestamp_value[0]
)
assert td_diff == 0, "the timestamp result was wrong"
td_diff = _total_milliseconds_from_timedelta(
other_timestamp - result_other_timestamp_value[0]
)
assert td_diff == 0, "the other timestamp result was wrong"
td_diff = _total_milliseconds_from_timedelta(
current_timestamp.replace(tzinfo=None) - result_ntz_timestamp_value[0]
)
assert td_diff == 0, "the other timestamp result was wrong"
assert current_date == result_date_value[0], "the date result was wrong"
assert current_time == result_time_value[0], "the time result was wrong"
desc = c.description
assert len(desc) == 6, "invalid number of column meta data"
assert desc[0][0].upper() == "AA", "invalid column name"
assert desc[1][0].upper() == "TSLTZ", "invalid column name"
assert desc[2][0].upper() == "TSTZ", "invalid column name"
assert desc[3][0].upper() == "TSNTZ", "invalid column name"
assert desc[4][0].upper() == "DT", "invalid column name"
assert desc[5][0].upper() == "TM", "invalid column name"
assert (
constants.FIELD_ID_TO_NAME[desc[0][1]] == "FIXED"
), "invalid column name: {}".format(constants.FIELD_ID_TO_NAME[desc[0][1]])
assert (
constants.FIELD_ID_TO_NAME[desc[1][1]] == "TIMESTAMP_LTZ"
), "invalid column name"
assert (
constants.FIELD_ID_TO_NAME[desc[2][1]] == "TIMESTAMP_TZ"
), "invalid column name"
assert (
constants.FIELD_ID_TO_NAME[desc[3][1]] == "TIMESTAMP_NTZ"
), "invalid column name"
assert constants.FIELD_ID_TO_NAME[desc[4][1]] == "DATE", "invalid column name"
assert constants.FIELD_ID_TO_NAME[desc[5][1]] == "TIME", "invalid column name"
finally:
cnx2.close()
def test_insert_timestamp_ltz(conn, db_parameters):
"""Inserts and retrieve timestamp ltz."""
tzstr = "America/New_York"
# sync with the session parameter
with conn() as cnx:
cnx.cursor().execute("alter session set timezone='{tzstr}'".format(tzstr=tzstr))
current_time = datetime.now()
current_time = current_time.replace(tzinfo=pytz.timezone(tzstr))
c = cnx.cursor()
try:
fmt = "insert into {name}(aa, tsltz) values(%(value)s,%(ts)s)"
c.execute(
fmt.format(name=db_parameters["name"]),
{
"value": 8765,
"ts": current_time,
},
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 1, "wrong number of records were inserted"
finally:
c.close()
try:
c = cnx.cursor()
c.execute("select aa,tsltz from {name}".format(name=db_parameters["name"]))
result_numeric_value = []
result_timestamp_value = []
for (aa, ts) in c:
result_numeric_value.append(aa)
result_timestamp_value.append(ts)
td_diff = _total_milliseconds_from_timedelta(
current_time - result_timestamp_value[0]
)
assert td_diff == 0, "the first result was wrong"
finally:
c.close()
def test_struct_time(conn, db_parameters):
"""Binds struct_time object for updating timestamp."""
tzstr = "America/New_York"
os.environ["TZ"] = tzstr
if not IS_WINDOWS:
time.tzset()
test_time = time.strptime("30 Sep 01 11:20:30", "%d %b %y %H:%M:%S")
with conn() as cnx:
c = cnx.cursor()
try:
fmt = "insert into {name}(aa, tsltz) values(%(value)s,%(ts)s)"
c.execute(
fmt.format(name=db_parameters["name"]),
{
"value": 87654,
"ts": test_time,
},
)
cnt = 0
for rec in c:
cnt += int(rec[0])
finally:
c.close()
os.environ["TZ"] = "UTC"
if not IS_WINDOWS:
time.tzset()
assert cnt == 1, "wrong number of records were inserted"
try:
result = cnx.cursor().execute(
"select aa, tsltz from {name}".format(name=db_parameters["name"])
)
for (_, _tsltz) in result:
pass
_tsltz -= _tsltz.tzinfo.utcoffset(_tsltz)
assert test_time.tm_year == _tsltz.year, "Year didn't match"
assert test_time.tm_mon == _tsltz.month, "Month didn't match"
assert test_time.tm_mday == _tsltz.day, "Day didn't match"
assert test_time.tm_hour == _tsltz.hour, "Hour didn't match"
assert test_time.tm_min == _tsltz.minute, "Minute didn't match"
assert test_time.tm_sec == _tsltz.second, "Second didn't match"
finally:
os.environ["TZ"] = "UTC"
if not IS_WINDOWS:
time.tzset()
def test_insert_binary_select(conn, db_parameters):
"""Inserts and get a binary value."""
value = b"\x00\xFF\xA1\xB2\xC3"
with conn() as cnx:
c = cnx.cursor()
try:
fmt = "insert into {name}(b) values(%(b)s)"
c.execute(fmt.format(name=db_parameters["name"]), {"b": value})
count = sum(int(rec[0]) for rec in c)
assert count == 1, "wrong number of records were inserted"
assert c.rowcount == 1, "wrong number of records were selected"
finally:
c.close()
cnx2 = snowflake.connector.connect(
user=db_parameters["user"],
password=db_parameters["password"],
host=db_parameters["host"],
port=db_parameters["port"],
account=db_parameters["account"],
database=db_parameters["database"],
schema=db_parameters["schema"],
protocol=db_parameters["protocol"],
)
try:
c = cnx2.cursor()
c.execute("select b from {name}".format(name=db_parameters["name"]))
results = [b for (b,) in c]
assert value == results[0], "the binary result was wrong"
desc = c.description
assert len(desc) == 1, "invalid number of column meta data"
assert desc[0][0].upper() == "B", "invalid column name"
assert constants.FIELD_ID_TO_NAME[desc[0][1]] == "BINARY", "invalid column name"
finally:
cnx2.close()
def test_insert_binary_select_with_bytearray(conn, db_parameters):
"""Inserts and get a binary value using the bytearray type."""
value = bytearray(b"\x00\xFF\xA1\xB2\xC3")
with conn() as cnx:
c = cnx.cursor()
try:
fmt = "insert into {name}(b) values(%(b)s)"
c.execute(fmt.format(name=db_parameters["name"]), {"b": value})
count = sum(int(rec[0]) for rec in c)
assert count == 1, "wrong number of records were inserted"
assert c.rowcount == 1, "wrong number of records were selected"
finally:
c.close()
cnx2 = snowflake.connector.connect(
user=db_parameters["user"],
password=db_parameters["password"],
host=db_parameters["host"],
port=db_parameters["port"],
account=db_parameters["account"],
database=db_parameters["database"],
schema=db_parameters["schema"],
protocol=db_parameters["protocol"],
)
try:
c = cnx2.cursor()
c.execute("select b from {name}".format(name=db_parameters["name"]))
results = [b for (b,) in c]
assert bytes(value) == results[0], "the binary result was wrong"
desc = c.description
assert len(desc) == 1, "invalid number of column meta data"
assert desc[0][0].upper() == "B", "invalid column name"
assert constants.FIELD_ID_TO_NAME[desc[0][1]] == "BINARY", "invalid column name"
finally:
cnx2.close()
def test_variant(conn, db_parameters):
"""Variant including JSON object."""
name_variant = db_parameters["name"] + "_variant"
with conn() as cnx:
cnx.cursor().execute(
"""
create table {name} (
created_at timestamp, data variant)
""".format(
name=name_variant
)
)
try:
with conn() as cnx:
current_time = datetime.now()
c = cnx.cursor()
try:
fmt = (
"insert into {name}(created_at, data) "
"select column1, parse_json(column2) "
"from values(%(created_at)s, %(data)s)"
)
c.execute(
fmt.format(name=name_variant),
{
"created_at": current_time,
"data": (
'{"SESSION-PARAMETERS":{'
'"TIMEZONE":"UTC", "SPECIAL_FLAG":true}}'
),
},
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 1, "wrong number of records were inserted"
assert c.rowcount == 1, "wrong number of records were inserted"
finally:
c.close()
result = cnx.cursor().execute(
"select created_at, data from {name}".format(name=name_variant)
)
_, data = result.fetchone()
data = json.loads(data)
assert data["SESSION-PARAMETERS"]["SPECIAL_FLAG"], (
"JSON data should be parsed properly. " "Invalid JSON data"
)
finally:
with conn() as cnx:
cnx.cursor().execute("drop table {name}".format(name=name_variant))
def test_callproc(conn_cnx):
"""Callproc test.
Notes:
It's a nop as of now.
"""
with conn_cnx() as cnx:
with pytest.raises(errors.NotSupportedError):
cnx.cursor().callproc("whatever the stored procedure")
def test_invalid_bind_data_type(conn_cnx):
"""Invalid bind data type."""
with conn_cnx() as cnx:
with pytest.raises(errors.ProgrammingError):
cnx.cursor().execute("select 1 from dual where 1=%s", ([1, 2, 3],))
def test_timeout_query(conn_cnx):
with conn_cnx() as cnx:
cnx.cursor().execute("select 1")
c = cnx.cursor()
try:
c.execute(
"select seq8() as c1 " "from table(generator(timeLimit => 60))",
timeout=5,
)
raise Exception("Must be canceled")
except BASE_EXCEPTION_CLASS as err:
assert isinstance(
err, errors.ProgrammingError
), "Programming Error Exception"
assert err.errno == 604, "Invalid error code"
finally:
c.close()
def test_executemany(conn, db_parameters):
"""Executes many statements. Client binding is supported by either dict, or list data types.
Notes:
The binding data type is dict and tuple, respectively.
"""
with conn() as cnx:
c = cnx.cursor()
fmt = "insert into {name}(aa) values(%(value)s)".format(
name=db_parameters["name"]
)
c.executemany(
fmt,
[
{"value": "1234"},
{"value": "234"},
{"value": "34"},
{"value": "4"},
],
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 4, "number of records"
assert c.rowcount == 4, "wrong number of records were inserted"
c.close()
c = cnx.cursor()
fmt = "insert into {name}(aa) values(%s)".format(name=db_parameters["name"])
c.executemany(
fmt,
[
(12345,),
(1234,),
(234,),
(34,),
(4,),
],
)
rec = c.fetchone()
assert rec[0] == 5, "number of records"
assert c.rowcount == 5, "wrong number of records were inserted"
c.close()
@pytest.mark.skipolddriver
def test_executemany_qmark_types(conn, db_parameters):
table_name = random_string(5, "date_test_")
with conn(paramstyle="qmark") as cnx:
with cnx.cursor() as cur:
cur.execute(f"create table {table_name} (birth_date date)")
insert_qy = f"INSERT INTO {table_name} (birth_date) values (?)"
date_1, date_2 = date(1969, 2, 7), date(1969, 1, 1)
try:
# insert two dates, one in tuple format which specifies
# the snowflake type similar to how we support it in this
# example:
# https://docs.snowflake.com/en/user-guide/python-connector-example.html#using-qmark-or-numeric-binding-with-datetime-objects
cur.executemany(
insert_qy,
[[date_1], [("DATE", date_2)]],
)
cur.execute(f"select * from {table_name}")
inserted_dates = [row[0] for row in cur.fetchall()]
assert date_1 in inserted_dates
assert date_2 in inserted_dates
finally:
cur.execute(f"drop table if exists {table_name}")
def test_closed_cursor(conn, db_parameters):
"""Attempts to use the closed cursor. It should raise errors.
Notes:
The binding data type is scalar.
"""
with conn() as cnx:
c = cnx.cursor()
fmt = "insert into {name}(aa) values(%s)".format(name=db_parameters["name"])
c.executemany(
fmt,
[
12345,
1234,
234,
34,
4,
],
)
rec = c.fetchone()
assert rec[0] == 5, "number of records"
assert c.rowcount == 5, "number of records"
c.close()
fmt = "select aa from {name}".format(name=db_parameters["name"])
try:
c.execute(fmt)
raise Exception("should fail as the cursor was closed.")
except snowflake.connector.Error as err:
assert err.errno == errorcode.ER_CURSOR_IS_CLOSED
def test_fetchmany(conn, db_parameters):
with conn() as cnx:
c = cnx.cursor()
fmt = "insert into {name}(aa) values(%(value)s)".format(
name=db_parameters["name"]
)
c.executemany(
fmt,
[
{"value": "3456789"},
{"value": "234567"},
{"value": "1234"},
{"value": "234"},
{"value": "34"},
{"value": "4"},
],
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 6, "number of records"
assert c.rowcount == 6, "number of records"
c.close()
c = cnx.cursor()
fmt = "select aa from {name} order by aa desc".format(
name=db_parameters["name"]
)
c.execute(fmt)
rows = c.fetchmany(2)
assert len(rows) == 2, "The number of records"
assert rows[1][0] == 234567, "The second record"
rows = c.fetchmany(1)
assert len(rows) == 1, "The number of records"
assert rows[0][0] == 1234, "The first record"
rows = c.fetchmany(5)
assert len(rows) == 3, "The number of records"
assert rows[-1][0] == 4, "The last record"
rows = c.fetchmany(15)
assert len(rows) == 0, "The number of records"
c.close()
def test_process_params(conn, db_parameters):
"""Binds variables for insert and other queries."""
with conn() as cnx:
c = cnx.cursor()
fmt = "insert into {name}(aa) values(%(value)s)".format(
name=db_parameters["name"]
)
c.executemany(
fmt,
[
{"value": "3456789"},
{"value": "234567"},
{"value": "1234"},
{"value": "234"},
{"value": "34"},
{"value": "4"},
],
)
cnt = 0
for rec in c:
cnt += int(rec[0])
c.close()
assert cnt == 6, "number of records"
fmt = "select count(aa) from {name} where aa > %(value)s".format(
name=db_parameters["name"]
)
c = cnx.cursor()
c.execute(fmt, {"value": 1233})
for (_cnt,) in c:
pass
assert _cnt == 3, "the number of records"
c.close()
fmt = "select count(aa) from {name} where aa > %s".format(
name=db_parameters["name"]
)
c = cnx.cursor()
c.execute(fmt, (1234,))
for (_cnt,) in c:
pass
assert _cnt == 2, "the number of records"
c.close()
def test_real_decimal(conn, db_parameters):
with conn() as cnx:
c = cnx.cursor()
fmt = ("insert into {name}(aa, pct, ratio) " "values(%s,%s,%s)").format(
name=db_parameters["name"]
)
c.execute(fmt, (9876, 12.3, decimal.Decimal("23.4")))
for (_cnt,) in c:
pass
assert _cnt == 1, "the number of records"
c.close()
c = cnx.cursor()
fmt = "select aa, pct, ratio from {name}".format(name=db_parameters["name"])
c.execute(fmt)
for (_aa, _pct, _ratio) in c:
pass
assert _aa == 9876, "the integer value"
assert _pct == 12.3, "the float value"
assert _ratio == decimal.Decimal("23.4"), "the decimal value"
c.close()
with cnx.cursor(snowflake.connector.DictCursor) as c:
fmt = "select aa, pct, ratio from {name}".format(name=db_parameters["name"])
c.execute(fmt)
rec = c.fetchone()
assert rec["AA"] == 9876, "the integer value"
assert rec["PCT"] == 12.3, "the float value"
assert rec["RATIO"] == decimal.Decimal("23.4"), "the decimal value"
def test_none_errorhandler(conn_testaccount):
c = conn_testaccount.cursor()
with pytest.raises(errors.ProgrammingError):
c.errorhandler = None
def test_nope_errorhandler(conn_testaccount):
def user_errorhandler(connection, cursor, errorclass, errorvalue):
pass
c = conn_testaccount.cursor()
c.errorhandler = user_errorhandler
c.execute("select * foooooo never_exists_table")
c.execute("select * barrrrr never_exists_table")
c.execute("select * daaaaaa never_exists_table")
assert c.messages[0][0] == errors.ProgrammingError, "One error was recorded"
assert len(c.messages) == 1, "should be one error"
@pytest.mark.internal
def test_binding_negative(negative_conn_cnx, db_parameters):
with negative_conn_cnx() as cnx:
with pytest.raises(TypeError):
cnx.cursor().execute(
"INSERT INTO {name}(aa) VALUES(%s)".format(name=db_parameters["name"]),
(1, 2, 3),
)
with pytest.raises(errors.ProgrammingError):
cnx.cursor().execute(
"INSERT INTO {name}(aa) VALUES(%s)".format(name=db_parameters["name"]),
(),
)
with pytest.raises(errors.ProgrammingError):
cnx.cursor().execute(
"INSERT INTO {name}(aa) VALUES(%s)".format(name=db_parameters["name"]),
(["a"],),
)
def test_execute_after_close(conn_testaccount):
"""SNOW-13588: Raises an error if executing after the connection is closed."""
cursor = conn_testaccount.cursor()
conn_testaccount.close()
with pytest.raises(errors.Error):
cursor.execute("show tables")
def test_multi_table_insert(conn, db_parameters):
try:
with conn() as cnx:
cur = cnx.cursor()
cur.execute(
"""
INSERT INTO {name}(aa) VALUES(1234),(9876),(2345)
""".format(
name=db_parameters["name"]
)
)
assert cur.rowcount == 3, "the number of records"
cur.execute(
"""
CREATE OR REPLACE TABLE {name}_foo (aa_foo int)
""".format(
name=db_parameters["name"]
)
)
cur.execute(
"""
CREATE OR REPLACE TABLE {name}_bar (aa_bar int)
""".format(
name=db_parameters["name"]
)
)
cur.execute(
"""
INSERT ALL
INTO {name}_foo(aa_foo) VALUES(aa)
INTO {name}_bar(aa_bar) VALUES(aa)
SELECT aa FROM {name}
""".format(
name=db_parameters["name"]
)
)
assert cur.rowcount == 6
finally:
with conn() as cnx:
cnx.cursor().execute(
"""
DROP TABLE IF EXISTS {name}_foo
""".format(
name=db_parameters["name"]
)
)
cnx.cursor().execute(
"""
DROP TABLE IF EXISTS {name}_bar
""".format(
name=db_parameters["name"]
)
)
@pytest.mark.skipif(
True,
reason="""
Negative test case.
""",
)
def test_fetch_before_execute(conn_testaccount):
"""SNOW-13574: Fetch before execute."""
cursor = conn_testaccount.cursor()
with pytest.raises(errors.DataError):
cursor.fetchone()
def test_close_twice(conn_testaccount):
conn_testaccount.close()
conn_testaccount.close()
@pytest.mark.parametrize("result_format", ("arrow", "json"))
def test_fetch_out_of_range_timestamp_value(conn, result_format):
with conn() as cnx:
cur = cnx.cursor()
cur.execute(
f"alter session set python_connector_query_result_format='{result_format}'"
)
cur.execute("select '12345-01-02'::timestamp_ntz")
with pytest.raises(errors.InterfaceError):
cur.fetchone()
@pytest.mark.parametrize("sql", (None, ""), ids=["None", "empty"])
def test_empty_execution(conn, sql):
"""Checks whether executing an empty string, or nothing behaves as expected."""
with conn() as cnx:
with cnx.cursor() as cur:
if sql is not None:
cur.execute(sql)
assert cur._result is None
with pytest.raises(
TypeError, match="'NoneType' object is not( an)? itera(tor|ble)"
):
cur.fetchone()
with pytest.raises(
TypeError, match="'NoneType' object is not( an)? itera(tor|ble)"
):
cur.fetchall()
@pytest.mark.parametrize(
"reuse_results", (False, pytest.param(True, marks=pytest.mark.skipolddriver))
)
def test_reset_fetch(conn, reuse_results):
"""Tests behavior after resetting the cursor."""
with conn(reuse_results=reuse_results) as cnx:
with cnx.cursor() as cur:
cur.execute("select 1")
cur.reset()
if reuse_results:
assert cur.fetchone() == (1,)
else:
assert cur.fetchone() is None
assert len(cur.fetchall()) == 0
def test_rownumber(conn):
"""Checks whether rownumber is returned as expected."""
with conn() as cnx:
with cnx.cursor() as cur:
assert cur.execute("select * from values (1), (2)")
assert cur.rownumber is None
assert cur.fetchone() == (1,)
assert cur.rownumber == 0
assert cur.fetchone() == (2,)
assert cur.rownumber == 1
def test_values_set(conn):
"""Checks whether a bunch of properties start as Nones, but get set to something else when a query was executed."""
properties = [
"timestamp_output_format",
"timestamp_ltz_output_format",
"timestamp_tz_output_format",
"timestamp_ntz_output_format",
"date_output_format",
"timezone",
"time_output_format",
"binary_output_format",
]
with conn() as cnx:
with cnx.cursor() as cur:
for property in properties:
assert getattr(cur, property) is None
assert cur.execute("select 1").fetchone() == (1,)
# The default values might change in future, so let's just check that they aren't None anymore
for property in properties:
assert getattr(cur, property) is not None
def test_execute_helper_params_error(conn_testaccount):
"""Tests whether calling _execute_helper with a non-dict statement params is handled correctly."""
with conn_testaccount.cursor() as cur:
with pytest.raises(
ProgrammingError,
match=r"The data type of statement params is invalid. It must be dict.$",
):
cur._execute_helper("select %()s", statement_params="1")
def test_desc_rewrite(conn, caplog):
"""Tests whether describe queries are rewritten as expected and this action is logged."""
with conn() as cnx:
with cnx.cursor() as cur:
table_name = random_string(5, "test_desc_rewrite_")
try:
cur.execute("create or replace table {} (a int)".format(table_name))
caplog.set_level(logging.DEBUG, "snowflake.connector")
cur.execute("desc {}".format(table_name))
assert (
"snowflake.connector.cursor",
20,
"query was rewritten: org=desc {table_name}, new=describe table {table_name}".format(
table_name=table_name
),
) in caplog.record_tuples
finally:
cur.execute("drop table {}".format(table_name))
@pytest.mark.skipolddriver
@pytest.mark.parametrize("result_format", [False, None, "json"])
def test_execute_helper_cannot_use_arrow(conn_cnx, caplog, result_format):
"""Tests whether cannot use arrow is handled correctly inside of _execute_helper."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
with mock.patch(
"snowflake.connector.cursor.CAN_USE_ARROW_RESULT_FORMAT", False
):
if result_format is False:
result_format = None
else:
result_format = {
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT: result_format
}
caplog.set_level(logging.DEBUG, "snowflake.connector")
cur.execute("select 1", _statement_params=result_format)
assert (
"snowflake.connector.cursor",
logging.DEBUG,
"Cannot use arrow result format, fallback to json format",
) in caplog.record_tuples
assert cur.fetchone() == (1,)
@pytest.mark.skipolddriver
def test_execute_helper_cannot_use_arrow_exception(conn_cnx):
"""Like test_execute_helper_cannot_use_arrow but when we are trying to force arrow an Exception should be raised."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
with mock.patch(
"snowflake.connector.cursor.CAN_USE_ARROW_RESULT_FORMAT", False
):
with pytest.raises(
ProgrammingError,
match="The result set in Apache Arrow format is not supported for the platform.",
):
cur.execute(
"select 1",
_statement_params={
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT: "arrow"
},
)
@pytest.mark.skipolddriver
def test_check_can_use_arrow_resultset(conn_cnx, caplog):
"""Tests check_can_use_arrow_resultset has no effect when we can use arrow."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
with mock.patch(
"snowflake.connector.cursor.CAN_USE_ARROW_RESULT_FORMAT", True
):
caplog.set_level(logging.DEBUG, "snowflake.connector")
cur.check_can_use_arrow_resultset()
assert "Arrow" not in caplog.text
@pytest.mark.skipolddriver
@pytest.mark.parametrize("snowsql", [True, False])
def test_check_cannot_use_arrow_resultset(conn_cnx, caplog, snowsql):
"""Tests check_can_use_arrow_resultset expected outcomes."""
config = {}
if snowsql:
config["application"] = "SnowSQL"
with conn_cnx(**config) as cnx:
with cnx.cursor() as cur:
with mock.patch(
"snowflake.connector.cursor.CAN_USE_ARROW_RESULT_FORMAT", False
):
with pytest.raises(
ProgrammingError,
match="Currently SnowSQL doesn't support the result set in Apache Arrow format."
if snowsql
else "The result set in Apache Arrow format is not supported for the platform.",
) as pe:
cur.check_can_use_arrow_resultset()
assert pe.errno == (
ER_NO_PYARROW_SNOWSQL if snowsql else ER_NO_ARROW_RESULT
)
@pytest.mark.skipolddriver
def test_check_can_use_pandas(conn_cnx):
"""Tests check_can_use_arrow_resultset has no effect when we can import pandas."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
with mock.patch("snowflake.connector.cursor.installed_pandas", True):
cur.check_can_use_pandas()
@pytest.mark.skipolddriver
def test_check_cannot_use_pandas(conn_cnx):
"""Tests check_can_use_arrow_resultset has expected outcomes."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
with mock.patch("snowflake.connector.cursor.installed_pandas", False):
with pytest.raises(
ProgrammingError,
match=r"Optional dependency: 'pyarrow' is not installed, please see the "
"following link for install instructions: https:.*",
) as pe:
cur.check_can_use_pandas()
assert pe.errno == ER_NO_PYARROW
@pytest.mark.skipolddriver
def test_not_supported_pandas(conn_cnx):
"""Check that fetch_pandas functions return expected error when arrow results are not available."""
result_format = {PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT: "json"}
with conn_cnx() as cnx:
with cnx.cursor() as cur:
cur.execute("select 1", _statement_params=result_format)
with mock.patch("snowflake.connector.cursor.installed_pandas", True):
with pytest.raises(NotSupportedError):
cur.fetch_pandas_all()
with pytest.raises(NotSupportedError):
list(cur.fetch_pandas_batches())
def test_query_cancellation(conn_cnx):
"""Tests whether query_cancellation works."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
cur.execute(
"select max(seq8()) from table(generator(timeLimit=>30));",
_no_results=True,
)
sf_qid = cur.sfqid
cur.abort_query(sf_qid)
def test_executemany_error(conn_cnx):
"""Tests calling executemany without many things."""
with conn_cnx() as con:
with con.cursor() as cur:
with pytest.raises(
InterfaceError,
match="No parameters are specified for the command: select 1",
) as ie:
cur.executemany("select 1", [])
assert ie.errno == ER_INVALID_VALUE
def test_executemany_insert_rewrite(conn_cnx):
"""Tests calling executemany with a non rewritable pyformat insert query."""
with conn_cnx() as con:
with con.cursor() as cur:
with pytest.raises(
InterfaceError, match="Failed to rewrite multi-row insert"
) as ie:
cur.executemany("insert into numbers (select 1)", [1, 2])
assert ie.errno == ER_FAILED_TO_REWRITE_MULTI_ROW_INSERT
def test_executemany_bulk_insert_size_mismatch(conn_cnx):
"""Tests bulk insert error with variable length of arguments."""
with conn_cnx(paramstyle="qmark") as con:
with con.cursor() as cur:
with pytest.raises(
InterfaceError, match="Bulk data size don't match. expected: 1, got: 2"
) as ie:
cur.executemany("insert into numbers values (?,?)", [[1], [1, 2]])
assert ie.errno == ER_FAILED_TO_REWRITE_MULTI_ROW_INSERT
def test_fetchmany_size_error(conn_cnx):
"""Tests retrieving a negative number of results."""
with conn_cnx() as con:
with con.cursor() as cur:
cur.execute("select 1")
with pytest.raises(
ProgrammingError,
match="The number of rows is not zero or positive number: -1",
) as ie:
cur.fetchmany(-1)
assert ie.errno == ER_NOT_POSITIVE_SIZE
def test_nextset(conn_cnx, caplog):
"""Tests no op function nextset."""
caplog.set_level(logging.DEBUG, "snowflake.connector")
with conn_cnx() as con:
with con.cursor() as cur:
caplog.set_level(logging.DEBUG, "snowflake.connector")
assert cur.nextset() is None
assert ("snowflake.connector.cursor", logging.DEBUG, "nop") in caplog.record_tuples
def test_scroll(conn_cnx):
"""Tests if scroll returns a NotSupported exception."""
with conn_cnx() as con:
with con.cursor() as cur:
with pytest.raises(
NotSupportedError, match="scroll is not supported."
) as nse:
cur.scroll(2)
assert nse.errno == SQLSTATE_FEATURE_NOT_SUPPORTED
def test__log_telemetry_job_data(conn_cnx, caplog):
"""Tests whether we handle missing connection object correctly while logging a telemetry event."""
with conn_cnx() as con:
with con.cursor() as cur:
with mock.patch.object(cur, "_connection", None):
caplog.set_level(logging.DEBUG, "snowflake.connector")
cur._log_telemetry_job_data("test", True)
assert (
"snowflake.connector.cursor",
logging.WARNING,
"Cursor failed to log to telemetry. Connection object may be None.",
) in caplog.record_tuples
@pytest.mark.skipolddriver(reason="new feature in v2.5.0")
@pytest.mark.parametrize(
"result_format,expected_chunk_type",
(
("json", JSONResultBatch),
("arrow", ArrowResultBatch),
),
)
def test_resultbatch(
conn_cnx,
result_format,
expected_chunk_type,
capture_sf_telemetry,
):
"""This test checks the following things:
1. After executing a query can we pickle the result batches
2. When we get the batches, do we emit a telemetry log
3. Whether we can iterate through ResultBatches multiple times
4. Whether the results make sense
5. See whether getter functions are working
"""
rowcount = 100000
with conn_cnx(
session_parameters={
"python_connector_query_result_format": result_format,
}
) as con:
with capture_sf_telemetry.patch_connection(con) as telemetry_data:
with con.cursor() as cur:
cur.execute(
f"select seq4() from table(generator(rowcount => {rowcount}));"
)
assert cur._result_set.total_row_index() == rowcount
pre_pickle_partitions = cur.get_result_batches()
assert len(pre_pickle_partitions) > 1
assert pre_pickle_partitions is not None
assert all(
isinstance(p, expected_chunk_type) for p in pre_pickle_partitions
)
pickle_str = pickle.dumps(pre_pickle_partitions)
assert any(
t.message["type"] == TelemetryField.GET_PARTITIONS_USED
for t in telemetry_data.records
)
post_pickle_partitions: List["ResultBatch"] = pickle.loads(pickle_str)
total_rows = 0
# Make sure the batches can be iterated over individually
for i, partition in enumerate(post_pickle_partitions):
# Tests whether the getter functions are working
if i == 0:
assert partition.compressed_size is None
assert partition.uncompressed_size is None
else:
assert partition.compressed_size is not None
assert partition.uncompressed_size is not None
for row in partition:
col1 = row[0]
assert col1 == total_rows
total_rows += 1
assert total_rows == rowcount
total_rows = 0
# Make sure the batches can be iterated over again
for partition in post_pickle_partitions:
for row in partition:
col1 = row[0]
assert col1 == total_rows
total_rows += 1
assert total_rows == rowcount
@pytest.mark.skipolddriver(reason="new feature in v2.5.0")
@pytest.mark.parametrize(
"result_format,patch_path",
(
("json", "snowflake.connector.result_batch.JSONResultBatch.create_iter"),
("arrow", "snowflake.connector.result_batch.ArrowResultBatch.create_iter"),
),
)
def test_resultbatch_lazy_fetching_and_schemas(conn_cnx, result_format, patch_path):
"""Tests whether pre-fetching results chunks fetches the right amount of them."""
rowcount = 1000000 # We need at least 5 chunks for this test
with conn_cnx(
session_parameters={
"python_connector_query_result_format": result_format,
}
) as con:
with con.cursor() as cur:
# Dummy return value necessary to not iterate through every batch with
# first fetchone call
downloads = [iter([(i,)]) for i in range(10)]
with mock.patch(
patch_path,
side_effect=downloads,
) as patched_download:
cur.execute(
f"select seq4() as c1, randstr(1,random()) as c2 "
f"from table(generator(rowcount => {rowcount}));"
)
result_batches = cur.get_result_batches()
batch_schemas = [batch.schema for batch in result_batches]
for schema in batch_schemas:
# all batches should have the same schema
assert schema == [
ResultMetadata("C1", 0, None, None, 10, 0, False),
ResultMetadata("C2", 2, None, 16777216, None, None, False),
]
assert patched_download.call_count == 0
assert len(result_batches) > 5
assert result_batches[0]._local # Sanity check first chunk being local
cur.fetchone() # Trigger pre-fetching
# While the first chunk is local we still call _download on it, which
# short circuits and just parses (for JSON batches) and then returns
# an iterator through that data, so we expect the call count to be 5.
# (0 local and 1, 2, 3, 4 pre-fetched) = 5 total
start_time = time.time()
while time.time() < start_time + 1:
if patched_download.call_count == 5:
break
else:
assert patched_download.call_count == 5
@pytest.mark.skipolddriver(reason="new feature in v2.5.0")
@pytest.mark.parametrize("result_format", ["json", "arrow"])
def test_resultbatch_schema_exists_when_zero_rows(conn_cnx, result_format):
with conn_cnx(
session_parameters={"python_connector_query_result_format": result_format}
) as con:
with con.cursor() as cur:
cur.execute(
"select seq4() as c1, randstr(1,random()) as c2 from table(generator(rowcount => 1)) where 1=0"
)
result_batches = cur.get_result_batches()
# verify there is 1 batch and 0 rows in that batch
assert len(result_batches) == 1
assert result_batches[0].rowcount == 0
# verify that the schema is correct
schema = result_batches[0].schema
assert schema == [
ResultMetadata("C1", 0, None, None, 10, 0, False),
ResultMetadata("C2", 2, None, 16777216, None, None, False),
]
def test_optional_telemetry(conn_cnx, capture_sf_telemetry):
"""Make sure that we do not fail when _first_chunk_time is not present in cursor."""
with conn_cnx() as con:
with con.cursor() as cur:
with capture_sf_telemetry.patch_connection(con, False) as telemetry:
cur.execute("select 1;")
cur._first_chunk_time = None
assert cur.fetchall() == [
(1,),
]
assert not any(
r.message.get("type", "") == TelemetryField.TIME_CONSUME_LAST_RESULT
for r in telemetry.records
)
@pytest.mark.parametrize("result_format", ("json", "arrow"))
@pytest.mark.parametrize("cursor_type", (SnowflakeCursor, DictCursor))
@pytest.mark.parametrize("fetch_method", ("__next__", "fetchone"))
def test_out_of_range_year(conn_cnx, result_format, cursor_type, fetch_method):
"""Tests whether the year 10000 is out of range exception is raised as expected."""
with conn_cnx(
session_parameters={
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT: result_format
}
) as con:
with con.cursor(cursor_type) as cur:
cur.execute(
"select * from VALUES (1, TO_TIMESTAMP('9999-01-01 00:00:00')), (2, TO_TIMESTAMP('10000-01-01 00:00:00'))"
)
iterate_obj = cur if fetch_method == "fetchone" else iter(cur)
fetch_next_fn = getattr(iterate_obj, fetch_method)
# first fetch doesn't raise error
fetch_next_fn()
with pytest.raises(
InterfaceError,
match="date value out of range"
if IS_WINDOWS
else "year 10000 is out of range",
):
fetch_next_fn()
@pytest.mark.skipolddriver
def test_describe(conn_cnx):
with conn_cnx() as con:
with con.cursor() as cur:
table_name = random_string(5, "test_describe_")
# test select
description = cur.describe(
"select * from VALUES(1, 3.1415926, 'snow', TO_TIMESTAMP('2021-01-01 00:00:00'))"
)
assert description is not None
column_types = [column[1] for column in description]
assert constants.FIELD_ID_TO_NAME[column_types[0]] == "FIXED"
assert constants.FIELD_ID_TO_NAME[column_types[1]] == "FIXED"
assert constants.FIELD_ID_TO_NAME[column_types[2]] == "TEXT"
assert "TIMESTAMP" in constants.FIELD_ID_TO_NAME[column_types[3]]
assert len(cur.fetchall()) == 0
# test insert
cur.execute(f"create table {table_name} (aa int)")
try:
description = cur.describe(
"insert into {name}(aa) values({value})".format(
name=table_name, value="1234"
)
)
assert description[0][0] == "number of rows inserted"
assert cur.rowcount is None
finally:
cur.execute(f"drop table if exists {table_name}")
@pytest.mark.skipolddriver
def test_fetch_batches_with_sessions(conn_cnx):
rowcount = 250_000
with conn_cnx() as con:
with con.cursor() as cur:
cur.execute(
f"select seq4() as foo from table(generator(rowcount=>{rowcount}))"
)
num_batches = len(cur.get_result_batches())
with mock.patch(
"snowflake.connector.network.SnowflakeRestful._use_requests_session",
side_effect=con._rest._use_requests_session,
) as get_session_mock:
result = cur.fetchall()
# all but one batch is downloaded using a session
assert get_session_mock.call_count == num_batches - 1
assert len(result) == rowcount
|
the-stack_0_12484 | from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import os
import sqlite3
import random
from panel import Ui_quiz
# Questions Database
conn = sqlite3.connect('questions.db')
c = conn.cursor()
# User information Database
conn2 = sqlite3.connect('info_user.db')
c2 = conn2.cursor()
c2.execute('''CREATE TABLE IF NOT EXISTS level(
level text
)''')
conn2.commit()
# default time question
time = 8
# answer question variable
answer_question = 0
check_answer = True
# Page status variable (question page/other page)
status_question = False
# level variable
level = 0
# check buy time and wrong option
status_buy_time = True
status_buy_option = True
class Root(QMainWindow):
def __init__(self):
global level
QMainWindow.__init__(self)
self.ui = Ui_quiz()
self.ui.setupUi(self)
self.oldPos = []
self.show()
# set timer
self.timer = QTimer(self)
self.timer.timeout.connect(self.timer_func)
self.timer.start(1000)
# set info user
self.ui.username.setText(os.getlogin())
self.ui.profile.setText(str(os.getlogin())[0].lower())
self.ui.username2.setText(os.getlogin())
# Set level
try:
c2.execute('SELECT * FROM level')
level = c2.fetchone()[0]
self.ui.level.setText(level)
self.ui.level2.setText(level)
except:
c2.execute('INSERT INTO level VALUES(1)')
conn2.commit()
# Set Button
self.ui.letsgo.clicked.connect(lambda: self.ui.pages.setCurrentWidget(self.ui.select))
self.ui.tech.clicked.connect(self.tech)
self.ui.sport.clicked.connect(self.sport)
self.ui.info.clicked.connect(self.info)
self.ui.cinema.clicked.connect(self.cinema)
self.ui.math.clicked.connect(self.math)
self.ui.nature.clicked.connect(self.nature)
# set option
self.ui.one.clicked.connect(self.one)
self.ui.two.clicked.connect(self.two)
self.ui.three.clicked.connect(self.three)
self.ui.four.clicked.connect(self.four)
# set Button end question
self.ui.end.clicked.connect(lambda: self.ui.pages.setCurrentWidget(self.ui.select))
self.ui.end.clicked.connect(self.end_question)
self.ui.end2.clicked.connect(lambda: self.ui.pages.setCurrentWidget(self.ui.select))
self.ui.end2.clicked.connect(self.end_question)
# help user
self.ui.buy_option.clicked.connect(self.wrong_option)
self.ui.buy_time.clicked.connect(self.buy_time)
def mousePressEvent(self, evt):
self.oldPos = evt.globalPos()
def mouseMoveEvent(self, evt):
delta = QPoint(evt.globalPos() - self.oldPos)
self.move(self.x() + delta.x(), self.y() + delta.y())
self.oldPos = evt.globalPos()
# Technology category
def tech(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.tech)
self.ui.next2.clicked.connect(self.tech)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM tech')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
# Sports category
def sport(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.sport)
self.ui.next2.clicked.connect(self.sport)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM Football')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
def info(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.info)
self.ui.next2.clicked.connect(self.info)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM information')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
def cinema(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.cinema)
self.ui.next2.clicked.connect(self.cinema)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM cinema')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
def math(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.math)
self.ui.next2.clicked.connect(self.math)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM math')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
def nature(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.nature)
self.ui.next2.clicked.connect(self.nature)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM nature')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
# Set option questions
def set_qu(self, question, one, two, three, four, answer):
global answer_question
global check_answer
global status_buy_option
global status_buy_time
# clear Ui
self.ui.quest.clear()
self.ui.quest_2.clear()
status_buy_time = True
status_buy_option = True
self.ui.line1.hide()
self.ui.line2.hide()
self.ui.line3.hide()
self.ui.line4.hide()
if len(question) <= 45:
self.ui.quest.setText(question)
self.ui.quest_2.clear()
else:
self.ui.quest.setText(question[:40])
self.ui.quest_2.setText(question[40:])
self.ui.quest_win.setText(question)
self.ui.quest_lost.setText(question)
self.ui.one.setText(one)
self.ui.two.setText(two)
self.ui.three.setText(three)
self.ui.four.setText(four)
answer_question = answer
if answer == 1:
self.ui.answer_win.setText(one)
self.ui.answer_lost.setText(one)
elif answer == 2:
self.ui.answer_win.setText(two)
self.ui.answer_lost.setText(two)
elif answer == 3:
self.ui.answer_win.setText(three)
self.ui.answer_lost.setText(three)
else:
self.ui.answer_win.setText(four)
self.ui.answer_lost.setText(four)
# One second timer
def timer_func(self):
global time
global status_question
global level
if status_question:
# timer
time -= 1
if len(str(time)) == 2:
self.ui.time.setText('00:'+str(time))
else:
self.ui.time.setText('00:0' + str(time))
if time == 0 and check_answer:
self.ui.pages.setCurrentWidget(self.ui.False_answer)
status_question = False
c2.execute('SELECT * FROM level')
level = c2.fetchone()[0]
self.ui.level.setText(level)
self.ui.level2.setText(level)
# Option one to four
def one(self):
self.check(1)
def two(self):
self.check(2)
def three(self):
self.check(3)
def four(self):
self.check(4)
# Check user answer
def check(self, user_answer):
global check_answer
global answer_question
global level
if user_answer == answer_question:
check_answer = False
self.ui.pages.setCurrentWidget(self.ui.True_answer)
new_level = float(level) + 1
sql_update_query = f"""Update level set level = {new_level} where level = {level}"""
c2.execute(sql_update_query)
conn2.commit()
else:
self.ui.pages.setCurrentWidget(self.ui.False_answer)
# help user (show wrong option)
def wrong_option(self):
global answer_question
global level
global status_buy_option
if status_buy_option:
status_buy_option = False
if answer_question != 1:
self.ui.line1.show()
elif answer_question != 2:
self.ui.line2.show()
elif answer_question != 3:
self.ui.line3.show()
elif answer_question != 4:
self.ui.line4.show()
new_level = float(level) - 0.5
sql_update_query = f"""Update level set level = {new_level} where level = {level}"""
c2.execute(sql_update_query)
conn2.commit()
# buy time
@staticmethod
def buy_time():
global time
global level
global status_buy_time
if status_buy_time:
time += 5
status_buy_time = False
new_level = float(level) - 0.5
sql_update_query = f"""Update level set level = {new_level} where level = {level}"""
c2.execute(sql_update_query)
conn2.commit()
# end question
@staticmethod
def end_question():
global status_question
status_question = False
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
root = Root()
sys.exit(app.exec_())
|
the-stack_0_12485 | import cv2
import numpy as np
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
def convert_2d_to_3d(u, v, z, K):
v0 = K[1][2]
u0 = K[0][2]
fy = K[1][1]
fx = K[0][0]
x = (u - u0) * z / fx
y = (v - v0) * z / fy
return (x, y, z)
def feature_match(img1, img2):
r''' Find features on both images and match them pairwise
'''
max_n_features = 1000
# max_n_features = 500
use_flann = False # better not use flann
detector = cv2.xfeatures2d.SIFT_create(max_n_features)
# find the keypoints and descriptors with SIFT
kp1, des1 = detector.detectAndCompute(img1, None)
kp2, des2 = detector.detectAndCompute(img2, None)
if (des1 is None) or (des2 is None):
return [], []
des1 = des1.astype(np.float32)
des2 = des2.astype(np.float32)
if use_flann:
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
else:
matcher = cv2.DescriptorMatcher().create('BruteForce')
matches = matcher.knnMatch(des1, des2, k=2)
good = []
pts1 = []
pts2 = []
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
if m.distance < 0.8 * n.distance:
good.append(m)
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
return pts1, pts2
def get_pose_pnp(rgb_curr, rgb_near, depth_curr, K):
gray_curr = rgb2gray(rgb_curr).astype(np.uint8)
gray_near = rgb2gray(rgb_near).astype(np.uint8)
height, width = gray_curr.shape
pts2d_curr, pts2d_near = feature_match(gray_curr,
gray_near) # feature matching
# dilation of depth
kernel = np.ones((4, 4), np.uint8)
depth_curr_dilated = cv2.dilate(depth_curr, kernel)
# extract 3d pts
pts3d_curr = []
pts2d_near_filtered = [
] # keep only feature points with depth in the current frame
for i, pt2d in enumerate(pts2d_curr):
# print(pt2d)
u, v = pt2d[0], pt2d[1]
z = depth_curr_dilated[v, u]
if z > 0:
xyz_curr = convert_2d_to_3d(u, v, z, K)
pts3d_curr.append(xyz_curr)
pts2d_near_filtered.append(pts2d_near[i])
# the minimal number of points accepted by solvePnP is 4:
if len(pts3d_curr) >= 4 and len(pts2d_near_filtered) >= 4:
pts3d_curr = np.expand_dims(np.array(pts3d_curr).astype(np.float32),
axis=1)
pts2d_near_filtered = np.expand_dims(
np.array(pts2d_near_filtered).astype(np.float32), axis=1)
# ransac
ret = cv2.solvePnPRansac(pts3d_curr,
pts2d_near_filtered,
np.asarray(K),
distCoeffs=None)
success = ret[0]
rotation_vector = ret[1]
translation_vector = ret[2]
return (success, rotation_vector, translation_vector)
else:
return (0, None, None)
|
the-stack_0_12488 | '''
Various classes and functions for handling Regier and colleagues'
communicative cost model.
'''
import numpy as np
from scipy.spatial.distance import pdist, squareform
class Partition:
'''
A partition object represents a partition of an n-dimensional
space. To create a partition, pass a list like [[0,0,1,1],
[0,0,1,1]], where the structure of the lists represents the space
(here 2x4), and the numbers represent the categories (here
category 0 and 1). Passing a tuple like (2,4) creates a trivial
partition of given dimensionality. Various iteration methods are
available for traversing the partition.
'''
@property
def shape(self):
return self._partition.shape
@property
def size(self):
return self._partition.size
def __init__(self, partition):
if isinstance(partition, tuple):
self._partition = np.zeros(partition, dtype=int)
else:
self._partition = np.array(partition, dtype=int)
self._boolean_matrix = None
def __repr__(self):
'''
Provides textual description of the partition object.
'''
if len(self.shape) == 1:
return 'Partition[length=%i, n_categories=%i]' % (self.shape[0], self.__len__())
return 'Partition[shape=%s, n_categories=%i]' % ('x'.join(map(str, self.shape)), self.__len__())
def __str__(self):
'''
Provides printable representation of the partition object.
'''
return self._partition.__str__()
def __len__(self):
'''
The length of a partition is the number of categories it
contains.
'''
return np.unique(self._partition).size
def __getitem__(self, key):
'''
Pass a tuple to get the category memebership of a point. Pass
an integer to get a list of points that belong to a category.
'''
if isinstance(key, tuple):
return self._partition[key]
return list(map(tuple, np.transpose(np.where(self._partition==key))))
def __setitem__(self, key, value):
'''
Change the category membership of a particular point.
'''
if not isinstance(key, tuple):
raise ValueError('Index must be tuple. For 1D spaces, include a trailing comma in the index.')
self._boolean_matrix = None
self._partition[key] = value
def __iter__(self):
'''
Default iterator. Each iteration returns a point in the space
along with its associated category.
'''
for point, category in np.ndenumerate(self._partition):
yield point, category
def iter_categories(self):
'''
Iterate over categories in the partition. Each iteration
returns an integer.
'''
for category in np.unique(self._partition):
yield category
def iter_points(self):
'''
Iterate over points in the space. Each iteration returns a
tuple.
'''
for point in np.ndindex(self.shape):
yield point
def boolean_matrix(self):
'''
Returns a 2D Boolean matrix, where rows correspond to meanings
and columns correspond to categories. True indicates that the
ith meaning belongs to the jth category. This Boolean matrix
representation is used by the communicative_cost method in the
Space object for fast computation using a similarity matrix.
'''
if self._boolean_matrix:
return self._boolean_matrix
self._boolean_matrix = convert_to_bool_matrix(self._partition)
return self._boolean_matrix
def spawn_speaker(self):
'''
Creates a Speaker with perfect speaker certainty.
'''
return Speaker(self.shape)
def spawn_listener(self, gamma, mu=2):
'''
Creates a Listener who represents the partition according to
the specified gamma and mu parameters. gamma may be set to
'uniform' to create a uniform listener.
'''
return Listener(self.shape, self.listener_distributions(gamma, mu))
def listener_distributions(self, gamma, mu=2):
'''
Returns a dictionary mapping categories to distributions
created under the specified gamma and mu parameters. gamma may
be set to 'uniform' to create uniform category distributions.
'''
if gamma == 'uniform':
return {category:self.uniform_distribution(category) for category in self.iter_categories()}
else:
return {category:self.gaussian_distribution(category, gamma, mu) for category in self.iter_categories()}
def uniform_distribution(self, category):
'''
Returns the uniform distribution for a particular category.
'''
category_members = self[category]
uniform_probability = 1.0 / len(category_members)
distribution = np.zeros(self.shape, dtype=float)
for point in category_members:
distribution[point] = uniform_probability
return Distribution(distribution, normalize=False)
def gaussian_distribution(self, category, gamma=1, mu=2):
'''
Returns the Gaussian distribution for a particular category
under the specified gamma and mu parameters.
'''
distribution = np.zeros(self.shape, dtype=float)
for point in self.iter_points():
distribution[point] = self._category_similarity(point, category, gamma, mu)
return Distribution(distribution, normalize=True)
def _category_similarity(self, point, category, gamma, mu):
'''
Returns the sum similarity between a point and all members of
a category under the specified gamma and mu parameters.
'''
return sum(self._similarity(point, member, gamma, mu) for member in self[category])
def _similarity(self, x, y, gamma, mu):
'''
Returns the similarity between two points under the specified
gamma and mu parameters.
'''
if not ((isinstance(gamma, int) or isinstance(gamma, float)) and gamma >= 0):
raise ValueError('Gamma parameter must be positive number.')
return np.exp(-gamma * self._distance(x, y, mu)**2)
def _distance(self, x, y, mu):
'''
Returns the Minkowski distance between two points for some mu.
mu = 1: Manhattan distance
mu = 2: Euclidean distance
'''
if not ((isinstance(mu, int) or isinstance(mu, float)) and mu > 0):
if mu == 'circle_euclidean':
return self._circle_euclidean(x, y)
raise ValueError('Mu parameter must be positive number.')
return sum(abs(x - y)**mu for x, y in zip(x, y))**(1.0/mu)
def _circle_euclidean(self, x, y):
'''
Returns the Euclidean distance between two points on a line
which wraps back around on itself (the shorter distance in
either direction is returned).
'''
sigma = 0.0
for dim in range(len(self.shape)):
d1 = abs(x[dim] - y[dim])
d2 = abs(d1 - self.shape[dim])
if d1 < d2:
sigma += d1**2
else:
sigma += d2**2
return sigma**0.5
########################################################################
class Distribution:
'''
A Distribution object represents a probability distribution. An
error is raised if the passed probabilities do not sum to 1; to
correct this, set normalize to True, which will automatically
normalize the distribution.
'''
@property
def shape(self):
return self.probabilities.shape
def __init__(self, distribution, normalize=False):
distribution = np.array(distribution, dtype=float)
if distribution.ndim == 0:
raise ValueError('Distribution must have at least one dimension')
if normalize is True:
self.probabilities = distribution / distribution.sum()
elif np.isclose(distribution.sum(), 1.0):
self.probabilities = distribution
else:
raise ValueError('Probabilities do not sum to 1: Use normalize=True')
def __repr__(self):
'''
Provides textual description of the distribution.
'''
dims = len(self.shape)
start = '['*dims + str(self.probabilities[(0,)*dims])
end = str(self.probabilities[(-1,)*dims]) + ']'*dims
return 'Distribution%s ... %s' % (start, end)
def __str__(self):
'''
Provides printable representation of the distribution.
'''
return self.probabilities.__str__()
def __getitem__(self, key):
'''
Pass an int (1D) or tuple (ND) to get the probability of that
point on the distribution.
'''
return self.probabilities[key]
def __iter__(self):
'''
Default iterator. Each iteration returns a point in the
distribution along with its associated probability.
'''
for point, probability in np.ndenumerate(self.probabilities):
yield point, probability
def __mul__(self, operand):
return self.probabilities * operand.probabilities
def smooth(self, alpha):
'''
Returns a smoothed copy of the Distribution using convex
combination smoothing. alpha=0: no smoothing; alpha=1: smooth
to a uniform distribution.
'''
if alpha:
if not isinstance(alpha, (int, float)) and (alpha < 0 or alpha > 1):
raise ValueError('Alpha must be number between 0 and 1.')
uniform = np.full(self.shape, 1.0 / np.product(self.shape), dtype=float)
return Distribution(uniform*alpha + self.probabilities*(1.0 - alpha), False)
return self
########################################################################
class Need(Distribution):
'''
A Need object represents the probability with which each point in
an n-dimensional space will need to be expressed. To create a Need
object, pass a list like [[2,2,4,5], [3,1,6,8]], where the
structure of the lists represents the space (here 2x4), and the
numbers represent the frequency or probability of each point.
Frequencies will automatically be converted to probabilities.
Passing a tuple like (2,4) creates a Need object of given
dimensionality with uniform need probabilities.
'''
def __init__(self, need_frequencies):
if isinstance(need_frequencies, tuple):
self.probabilities = np.full(need_frequencies, 1.0 / np.product(need_frequencies), dtype=float)
else:
need_frequencies = np.array(need_frequencies, dtype=float)
if need_frequencies.ndim == 0:
raise ValueError('Distribution must be at least one dimensional')
self.probabilities = need_frequencies / need_frequencies.sum()
########################################################################
class Speaker:
'''
Collection of distributions - one for each point in the space.
'''
@property
def shape(self):
return self._shape
def __init__(self, shape, speaker_distributions=None):
if not isinstance(shape, tuple):
raise ValueError('Shape must be tuple')
self._shape = shape
self._distributions = {}
if speaker_distributions:
if not isinstance(speaker_distributions, dict):
raise ValueError('Speaker distributions shoud be passed as dictionary: point:distribution')
else:
points = list(np.ndindex(self._shape))
for point in points:
if point not in speaker_distributions:
raise ValueError('Speaker distributions must be provided for every point')
for point, speaker_distribution in speaker_distributions.items():
if point not in points:
raise ValueError('Invalid point contained in passed speaker distributions')
self[point] = speaker_distribution
else: # Assume speaker certainty and create point distributions
for point in np.ndindex(self._shape):
point_distribution = np.zeros(self._shape, dtype=float)
point_distribution[point] = 1.0
self._distributions[point] = Distribution(point_distribution)
def __getitem__(self, key):
'''
Pass a tuple to get the category memebership of a point. Pass
an integer to get a list of points that belong to a category.
'''
if key not in self._distributions:
raise ValueError('Invalid point.')
return self._distributions[key]
def __setitem__(self, key, value):
'''
Change the category membership of a particular point.
'''
if not self._valid_key(key):
raise ValueError('Invalid point.')
if not isinstance(value, Distribution):
value = Distribution(value)
if value.shape != self._shape:
raise ValueError('Distribution shape does not match the shape of the speaker.')
self._distributions[key] = value
def __iter__(self):
'''
Default iterator. Each iteration returns a point in the
distribution along with its associated probability.
'''
for point in np.ndindex(self._shape):
yield (point, self[point])
def _valid_key(self, key):
if not isinstance(key, tuple):
return False
if len(key) != len(self.shape):
return False
for dim in range(len(key)):
if key[dim] >= self._shape[dim]:
return False
return True
########################################################################
class Listener:
'''
Collection of distributions - one for each category
'''
@property
def shape(self):
return self._shape
def __init__(self, shape, listener_distributions):
if not isinstance(shape, tuple):
raise ValueError('Shape must be tuple')
if not isinstance(listener_distributions, dict):
raise ValueError('Listener distributions shoud be passed as dictionary: category:Distribution')
self._shape = shape
self._distributions = {}
for category, listener_distribution in listener_distributions.items():
self[category] = listener_distribution
def __getitem__(self, key):
'''
Pass an integer to get the distribution for that category.
'''
if key not in self._distributions:
raise ValueError('Invalid category.')
return self._distributions[key]
def __setitem__(self, key, value):
'''
Change the distribution for a particular category
'''
if not isinstance(value, Distribution):
value = Distribution(value)
if value.shape != self._shape:
raise ValueError('Distribution shape does not match the shape of the listener.')
self._distributions[key] = value
def __iter__(self):
'''
Default iterator. Each iteration returns a point in the
distribution along with its associated probability.
'''
for category in sorted(list(self._distributions.keys())):
yield (category, self[category])
def smooth(self, alpha):
if alpha:
smoothed_distributions = {}
for category, distribution in self._distributions.items():
smoothed_distributions[category] = distribution.smooth(alpha)
return Listener(self.shape, smoothed_distributions)
return self
########################################################################
class Space:
'''
A Space object represents an n-dimensional universe. To create a
space object of certain dimensionality, pass a tuple like (2,4).
Optionally, you can pass a need object specifying, a gamma setting
(default: 1), a mu setting (default: 2 (Euclidean), 1 =
Manhattan), If no need object is passed, a uniform need object
will be created.
'''
@property
def shape(self):
return self._shape
def __init__(self, shape, need=None, gamma=1, mu=2):
if not isinstance(shape, tuple):
raise ValueError('The shape of the space must be a tuple.')
self._shape = shape
if need:
if not isinstance(need, Need):
raise ValueError('Invalid need object. Pass a need object or set to None for uniform need probabilities.')
self._need = need
else: # Need unspecified, so create a uniform need object
self._need = Need(self._shape)
if not ((isinstance(gamma, int) or isinstance(gamma, float)) and gamma >= 0):
raise ValueError('Gamma parameter must be positive number.')
self._gamma = gamma
if not ((isinstance(mu, int) or isinstance(mu, float)) and mu > 0):
raise ValueError('Mu parameter must be positive number.')
self._mu = mu
pairwise_distances = pdist(list(np.ndindex(self._shape)), 'minkowski', self._mu)
distance_matrix = squareform(pairwise_distances)
self._similarity_matrix = np.exp(-self._gamma * distance_matrix**2)
def __repr__(self):
'''
Provides textual description of the space object.
'''
if len(self._shape) == 1:
return 'Space[length=%i, gamma=%i, mu=%s]' % (self._shape[0], self._gamma, self._mu)
return 'Space[dimensionality=%s, gamma=%i, mu=%s]' % ('x'.join(map(str, self._shape)), self._gamma, self._mu)
def communicative_cost(self, partition, need=None):
'''
Returns the communicative cost for a given partition and need
probabilities. If no need object is passed, the need
probabilities will be inherited from the space's own need
object.
'''
if not isinstance(partition, Partition):
raise ValueError('Invalid Partition object.')
if partition.shape != self._shape:
raise ValueError('Partition object does not match the dimensions of the space. Should be %s.' % 'x'.join(map(str, self._shape)))
if need:
if not isinstance(need, Need):
raise ValueError('Invalid Need object. Pass a Need object or set to None to inherit need probabilities from the space.')
if need.shape != self._shape:
raise ValueError('Need object does not match the dimensions of the space. Should be %s.' % 'x'.join(map(str, self._shape)))
else:
need = self._need
boolean_matrix = partition.boolean_matrix()
listener_distributions = np.dot(self._similarity_matrix, boolean_matrix)
norm_listener_distributions = listener_distributions * boolean_matrix / listener_distributions.sum(axis=0)
neg_log_listener_distributions = -np.log2(norm_listener_distributions.sum(axis=1))
return (need.probabilities * neg_log_listener_distributions.reshape(self._shape)).sum()
def cost(self, language_array):
'''
Returns the communicative cost of a language passed as a
simple numpy array under the assumption of uniform need
probabilities. Essentially does the same as the
communicative_cost method above without the need to first
convert the numpy array to a Partition object.
'''
if not isinstance(language_array, np.ndarray):
raise ValueError('language_array should be Numpy array')
if language_array.shape != self._shape:
raise ValueError('Partition object does not match the dimensions of the space. Should be %s.' % 'x'.join(map(str, self._shape)))
boolean_matrix = convert_to_bool_matrix(language_array)
listener_distributions = np.dot(self._similarity_matrix, boolean_matrix)
norm_listener_distributions = listener_distributions * boolean_matrix / listener_distributions.sum(axis=0)
neg_log_listener_distributions = -np.log2(norm_listener_distributions.sum(axis=1))
return (self._need.probabilities * neg_log_listener_distributions.reshape(self._shape)).sum()
########################################################################
def convert_to_bool_matrix(partition):
'''
Returns a 2D Boolean matrix, where rows correspond to meanings and
columns correspond to categories. True indicates that the ith
meaning belongs to the jth category. This Boolean matrix
representation is used by the communicative_cost method in the
Space object for fast computation using a similarity matrix.
'''
n_points = partition.size # determines number of rows
n_categories = len(np.unique(partition)) # determines number of columns
cat_to_col = {cat:col for col, cat in enumerate(np.unique(partition))} # maps categories to columns
boolean_matrix = np.zeros((n_points, n_categories), dtype=bool)
for row, point in enumerate(np.ndindex(partition.shape)):
column = cat_to_col[partition[point]]
boolean_matrix[row, column] = True
return boolean_matrix
########################################################################
def KL_divergence(s, l):
'''
Returns the KL divergence between a speaker and listener
distribution.
'''
if s.shape != l.shape:
raise ValueError('Speaker and listener distributions do not have the same shape')
D_KL = 0.0
for point in np.ndindex(s.shape):
if s[point] == 0:
continue
if l[point] == 0:
raise ValueError('Cannot compute KL divergence because l=0 where s>0 at point %s. Try smoothing.'%str(point))
D_KL += s[point] * np.log2(s[point] / (l[point]))
return D_KL
def cost(partition, need, speaker, listener, alpha=None):
'''
Returns the communicative cost given partition, need, speaker, and
listener objects.
'''
if not isinstance(partition, Partition):
raise ValueError('Invalid Partition object')
if not isinstance(need, Need) or partition.shape != need.shape:
raise ValueError('Invalid Need object')
if not isinstance(speaker, Speaker) or partition.shape != speaker.shape:
raise ValueError('Invalid Speaker object')
if not isinstance(listener, Listener) or partition.shape != listener.shape:
raise ValueError('Invalid Listener object')
if alpha:
listener = listener.smooth(alpha)
return sum(need[target] * KL_divergence(speaker[target], listener[category]) for target, category in partition)
########################################################################
def random_partition(shape, n_categories, convex=False, seeds=None):
'''
Returns a randomly generated partition object with specified
shape, number of categories, and convexity.
'''
space = np.full(shape, -1, dtype=int)
n_items = np.product(shape)
points = list(np.ndindex(shape))
if seeds is None:
seeds = [points[p] for p in np.random.choice(n_items, n_categories, False)]
for category in range(n_categories):
space[seeds[category]] = category
for point in points:
if space[point] == -1:
if convex:
distances = [dist(point, seed, 2) for seed in seeds]
min_distance = min(distances)
category = np.random.choice([c for c in range(n_categories) if distances[c] == min_distance])
else:
category = np.random.choice(n_categories)
space[point] = category
return seeds, space
def iter_partitions(collection):
if len(collection) == 1:
yield [ collection ]
return
first = collection[0]
for smaller in iter_partitions(collection[1:]):
for n, subset in enumerate(smaller):
yield smaller[:n] + [[ first ] + subset] + smaller[n+1:]
yield [ [ first ] ] + smaller
def all_partitions(shape):
'''
Returns all partitions of a space
'''
space = np.zeros(shape, dtype=int)
for partition in iter_partitions(list(np.ndindex(shape))):
for category, points in enumerate(partition):
for point in points:
space[point] = category
yield Partition(space)
def dist(x, y, mu):
return sum(abs(x - y)**mu for x, y in zip(x, y))**(1.0/mu)
|
the-stack_0_12489 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import logging
def conv3x3(in_channels, out_channels, stride=1):
return nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=stride, padding=1, bias=False)
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, apply_activation=False):
super(ResidualBlock, self).__init__()
self.conv1 = conv3x3(in_channels, out_channels, stride)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = conv3x3(out_channels, out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = conv3x3(out_channels, out_channels)
self.bn3 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.apply_activation = apply_activation
def forward(self, x):
"""Output size is same as input size"""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out += residual
residual = out
out = self.conv2(out)
out = self.bn2(out)
out += residual
residual = out
out = self.conv3(out)
out = self.bn3(out)
out += residual
if self.apply_activation: out = self.relu(out)
return out
|
the-stack_0_12491 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the pairs function below.
def pairs(k, arr):
res = 0
memo = dict()
for el in arr:
if el-k in memo:
res += 1
if el+k in memo:
res += 1
memo[el] = True
return res
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
arr = list(map(int, input().rstrip().split()))
result = pairs(k, arr)
fptr.write(str(result) + '\n')
fptr.close()
|
the-stack_0_12494 | import torch
from torch import nn
def normalize(x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
def euclidean_dist(x, y):
"""
Args:
x: pytorch Variable, with shape [m, d]
y: pytorch Variable, with shape [n, d]
Returns:
dist: pytorch Variable, with shape [m, n]
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
def hard_example_mining(dist_mat, labels, return_inds=False):
"""For each anchor, find the hardest positive and negative sample.
Args:
dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]
labels: pytorch LongTensor, with shape [N]
return_inds: whether to return the indices. Save time if `False`(?)
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
NOTE: Only consider the case in which all labels have same num of samples,
thus we can cope with all anchors in parallel.
"""
assert len(dist_mat.size()) == 2
assert dist_mat.size(0) == dist_mat.size(1)
N = dist_mat.size(0)
# shape [N, N]
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
ap_mat = dist_mat.mul(is_pos.float())
an_mat = dist_mat.mul(is_neg.float())
dist_ap, relative_p_inds = torch.max(ap_mat, 1, keepdim=True)
max_dist_an, _ = torch.max(an_mat, 1, keepdim=True)
max_an_mat = dist_mat + max_dist_an * (is_pos.float())
dist_an, relative_n_inds = torch.min(max_an_mat, 1, keepdim=True)
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
if return_inds:
# shape [N, N]
ind = (labels.new().resize_as_(labels)
.copy_(torch.arange(0, N).long())
.unsqueeze(0).expand(N, N))
# shape [N, 1]
p_inds = torch.gather(
ind, 1, relative_p_inds.data)
n_inds = torch.gather(
ind, 1, relative_n_inds.data)
# shape [N]
p_inds = p_inds.squeeze(1)
n_inds = n_inds.squeeze(1)
return dist_ap, dist_an, p_inds, n_inds
return dist_ap, dist_an
class TripletLoss(object):
"""Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).
Related Triplet Loss theory can be found in paper 'In Defense of the Triplet
Loss for Person Re-Identification'."""
def __init__(self, margin=None):
self.margin = margin
if margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
self.ranking_loss = nn.SoftMarginLoss()
def __call__(self, global_feat, labels, normalize_feature=False):
if normalize_feature:
global_feat = normalize(global_feat, axis=-1)
dist_mat = euclidean_dist(global_feat, global_feat)
dist_ap, dist_an = hard_example_mining(
dist_mat, labels)
y = dist_an.new().resize_as_(dist_an).fill_(1)
if self.margin is not None:
loss = self.ranking_loss(dist_an, dist_ap, y)
else:
loss = self.ranking_loss(dist_an - dist_ap, y)
return loss, dist_ap, dist_an
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
Args:
num_classes (int): number of classes.
epsilon (float): weight.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)
if self.use_gpu: targets = targets.cuda()
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).mean(0).sum()
return loss |
the-stack_0_12495 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Miscellaneous generic utility functions and classes."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import filecmp
import inspect
import os
import re
import sys
import six
from six.moves import range
from six.moves import cStringIO as StringIO
from .lang import classproperty
def get_new_uuid():
"""
Return a new UUID (typically to be used for new nodes).
It uses the UUID version specified in
aiida.backends.settings.AIIDANODES_UUID_VERSION
"""
import uuid
return six.text_type(uuid.uuid4())
# To speed up the process (os.path.abspath calls are slow)
_repository_folder_cache = {} # pylint: disable=invalid-name
def get_repository_folder(subfolder=None):
"""
Return the top folder of the local repository.
"""
try:
return _repository_folder_cache[subfolder]
except KeyError:
from aiida.manage.configuration import get_profile
repository_path = get_profile().repository_path
if not os.path.isdir(repository_path):
raise ImportError
if subfolder is None:
retval = os.path.abspath(repository_path)
elif subfolder == "sandbox":
retval = os.path.abspath(os.path.join(repository_path, 'sandbox'))
elif subfolder == "repository":
retval = os.path.abspath(os.path.join(repository_path, 'repository'))
else:
raise ValueError("Invalid 'subfolder' passed to get_repository_folder: {}".format(subfolder))
_repository_folder_cache[subfolder] = retval
return retval
def validate_list_of_string_tuples(val, tuple_length):
"""
Check that:
1. ``val`` is a list or tuple
2. each element of the list:
a. is a list or tuple
b. is of length equal to the parameter tuple_length
c. each of the two elements is a string
Return if valid, raise ValidationError if invalid
"""
from aiida.common.exceptions import ValidationError
err_msg = ("the value must be a list (or tuple) "
"of length-N list (or tuples), whose elements are strings; "
"N={}".format(tuple_length))
if not isinstance(val, (list, tuple)):
raise ValidationError(err_msg)
for element in val:
if (not isinstance(element, (list, tuple)) or (len(element) != tuple_length) or
not all(isinstance(s, six.string_types) for s in element)):
raise ValidationError(err_msg)
return True
def get_unique_filename(filename, list_of_filenames):
"""
Return a unique filename that can be added to the list_of_filenames.
If filename is not in list_of_filenames, it simply returns the filename
string itself. Otherwise, it appends a integer number to the filename
(before the extension) until it finds a unique filename.
:param filename: the filename to add
:param list_of_filenames: the list of filenames to which filename
should be added, without name duplicates
:returns: Either filename or its modification, with a number appended
between the name and the extension.
"""
if filename not in list_of_filenames:
return filename
basename, ext = os.path.splitext(filename)
# Not optimized, but for the moment this should be fast enough
append_int = 1
while True:
new_filename = "{:s}-{:d}{:s}".format(basename, append_int, ext)
if new_filename not in list_of_filenames:
break
append_int += 1
return new_filename
def str_timedelta(dt, max_num_fields=3, short=False, negative_to_zero=False): # pylint: disable=invalid-name
"""
Given a dt in seconds, return it in a HH:MM:SS format.
:param dt: a TimeDelta object
:param max_num_fields: maximum number of non-zero fields to show
(for instance if the number of days is non-zero, shows only
days, hours and minutes, but not seconds)
:param short: if False, print always ``max_num_fields`` fields, even
if they are zero. If True, do not print the first fields, if they
are zero.
:param negative_to_zero: if True, set dt = 0 if dt < 0.
"""
if max_num_fields <= 0:
raise ValueError("max_num_fields must be > 0")
s_tot = dt.total_seconds() # Important to get more than 1 day, and for
# negative values. dt.seconds would give
# wrong results in these cases, see
# http://docs.python.org/2/library/datetime.html
s_tot = int(s_tot)
if negative_to_zero:
if s_tot < 0:
s_tot = 0
negative = (s_tot < 0)
s_tot = abs(s_tot)
negative_string = " in the future" if negative else " ago"
# For the moment stay away from months and years, difficult to get
days, remainder = divmod(s_tot, 3600 * 24)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
all_fields = [(days, 'D'), (hours, 'h'), (minutes, 'm'), (seconds, 's')]
fields = []
start_insert = False
counter = 0
for idx, field in enumerate(all_fields):
if field[0] != 0:
start_insert = True
if (len(all_fields) - idx) <= max_num_fields:
start_insert = True
if start_insert:
if counter >= max_num_fields:
break
fields.append(field)
counter += 1
if short:
while len(fields) > 1: # at least one element has to remain
if fields[0][0] != 0:
break
fields.pop(0) # remove first element
# Join the fields
raw_string = ":".join(["{:02d}{}".format(*f) for f in fields])
if raw_string.startswith('0'):
raw_string = raw_string[1:]
# Return the resulting string, appending a suitable string if the time
# is negative
return "{}{}".format(raw_string, negative_string)
def get_class_string(obj):
"""
Return the string identifying the class of the object (module + object name,
joined by dots).
It works both for classes and for class instances.
"""
if inspect.isclass(obj):
return "{}.{}".format(obj.__module__, obj.__name__)
return "{}.{}".format(obj.__module__, obj.__class__.__name__)
def get_object_from_string(class_string):
"""
Given a string identifying an object (as returned by the get_class_string
method) load and return the actual object.
"""
import importlib
the_module, _, the_name = class_string.rpartition('.')
return getattr(importlib.import_module(the_module), the_name)
def export_shard_uuid(uuid):
"""
Sharding of the UUID for the import/export
"""
return os.path.join(uuid[:2], uuid[2:4], uuid[4:])
def grouper(n, iterable): # pylint: disable=invalid-name
"""
Given an iterable, returns an iterable that returns tuples of groups of
elements from iterable of length n, except the last one that has the
required length to exaust iterable (i.e., there is no filling applied).
:param n: length of each tuple (except the last one,that will have length
<= n
:param iterable: the iterable to divide in groups
"""
import itertools
iterator = iter(iterable)
while True:
chunk = tuple(itertools.islice(iterator, n))
if not chunk:
return
yield chunk
class ArrayCounter(object): # pylint: disable=useless-object-inheritance
"""
A counter & a method that increments it and returns its value.
It is used in various tests.
"""
seq = None
def __init__(self):
self.seq = -1
def array_counter(self):
self.seq += 1
return self.seq
def are_dir_trees_equal(dir1, dir2):
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: True if the directory trees are the same and
there were no errors while accessing the directories or files,
False otherwise.
"""
# Directory comparison
dirs_cmp = filecmp.dircmp(dir1, dir2)
if dirs_cmp.left_only or dirs_cmp.right_only or dirs_cmp.funny_files:
return (False, "Left directory: {}, right directory: {}, files only "
"in left directory: {}, files only in right directory: "
"{}, not comparable files: {}".format(dir1, dir2, dirs_cmp.left_only, dirs_cmp.right_only,
dirs_cmp.funny_files))
# If the directories contain the same files, compare the common files
(_, mismatch, errors) = filecmp.cmpfiles(dir1, dir2, dirs_cmp.common_files, shallow=False)
if mismatch:
return (False, "The following files in the directories {} and {} "
"don't match: {}".format(dir1, dir2, mismatch))
if errors:
return (False, "The following files in the directories {} and {} "
"aren't regular: {}".format(dir1, dir2, errors))
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
res, msg = are_dir_trees_equal(new_dir1, new_dir2)
if not res:
return False, msg
return True, "The given directories ({} and {}) are equal".format(dir1, dir2)
class Prettifier(object): # pylint: disable=useless-object-inheritance
"""
Class to manage prettifiers (typically for labels of kpoints
in band plots)
"""
@classmethod
def _prettify_label_pass(cls, label):
"""
No-op prettifier, simply returns the same label
:param label: a string to prettify
"""
return label
@classmethod
def _prettify_label_agr(cls, label):
"""
Prettifier for XMGrace
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'\xG\f{}')
.replace('DELTA', r'\xD\f{}')
.replace('LAMBDA', r'\xL\f{}')
.replace('SIGMA', r'\xS\f{}')
) # yapf:disable
return re.sub(r'_(.?)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_agr_simple(cls, label):
"""
Prettifier for XMGrace (for old label names)
:param label: a string to prettify
"""
if label == 'G':
return r'\xG'
return re.sub(r'(\d+)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_gnuplot(cls, label):
"""
Prettifier for Gnuplot
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
label = (
label
.replace(u'GAMMA', u'Γ')
.replace(u'DELTA', u'Δ')
.replace(u'LAMBDA', u'Λ')
.replace(u'SIGMA', u'Σ')
) # yapf:disable
return re.sub(r'_(.?)', r'_{\1}', label)
@classmethod
def _prettify_label_gnuplot_simple(cls, label):
"""
Prettifier for Gnuplot (for old label names)
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
if label == 'G':
return u'Γ'
return re.sub(r'(\d+)', r'_{\1}', label)
@classmethod
def _prettify_label_latex(cls, label):
"""
Prettifier for matplotlib, using LaTeX syntax
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'$\Gamma$')
.replace('DELTA', r'$\Delta$')
.replace('LAMBDA', r'$\Lambda$')
.replace('SIGMA', r'$\Sigma$')
) # yapf:disable
label = re.sub(r'_(.?)', r'$_{\1}$', label)
# label += r"$_{\vphantom{0}}$"
return label
@classmethod
def _prettify_label_latex_simple(cls, label):
"""
Prettifier for matplotlib, using LaTeX syntax (for old label names)
:param label: a string to prettify
"""
if label == 'G':
return r'$\Gamma$'
return re.sub(r'(\d+)', r'$_{\1}$', label)
@classproperty
def prettifiers(cls): # pylint: disable=no-self-argument
"""
Property that returns a dictionary that for each string associates
the function to prettify a label
:return: a dictionary where keys are strings and values are functions
"""
return {
'agr_seekpath': cls._prettify_label_agr,
'agr_simple': cls._prettify_label_agr_simple,
'latex_simple': cls._prettify_label_latex_simple,
'latex_seekpath': cls._prettify_label_latex,
'gnuplot_simple': cls._prettify_label_gnuplot_simple,
'gnuplot_seekpath': cls._prettify_label_gnuplot,
'pass': cls._prettify_label_pass,
}
@classmethod
def get_prettifiers(cls):
"""
Return a list of valid prettifier strings
:return: a list of strings
"""
return sorted(cls.prettifiers.keys()) # pylint: disable=no-member
def __init__(self, format): # pylint: disable=redefined-builtin
"""
Create a class to pretttify strings of a given format
:param format: a string with the format to use to prettify.
Valid formats are obtained from self.prettifiers
"""
if format is None:
format = 'pass'
try:
self._prettifier_f = self.prettifiers[format] # pylint: disable=unsubscriptable-object
except KeyError:
raise ValueError("Unknown prettifier format {}; valid formats: {}".format(
format, ", ".join(self.get_prettifiers())))
def prettify(self, label):
"""
Prettify a label using the format passed in the initializer
:param label: the string to prettify
:return: a prettified string
"""
return self._prettifier_f(label)
def prettify_labels(labels, format=None): # pylint: disable=redefined-builtin
"""
Prettify label for typesetting in various formats
:param labels: a list of length-2 tuples, in the format(position, label)
:param format: a string with the format for the prettifier (e.g. 'agr',
'matplotlib', ...)
:return: the same list as labels, but with the second value possibly replaced
with a prettified version that typesets nicely in the selected format
"""
prettifier = Prettifier(format)
return [(pos, prettifier.prettify(label)) for pos, label in labels]
def join_labels(labels, join_symbol="|", threshold=1.e-6):
"""
Join labels with a joining symbol when they are very close
:param labels: a list of length-2 tuples, in the format(position, label)
:param join_symbol: the string to use to join different paths. By default, a pipe
:param threshold: the threshold to decide if two float values are the same and should
be joined
:return: the same list as labels, but with the second value possibly replaced
with strings joined when close enough
"""
if labels:
new_labels = [list(labels[0])]
# modify labels when in overlapping position
j = 0
for i in range(1, len(labels)):
if abs(labels[i][0] - labels[i - 1][0]) < threshold:
new_labels[j][1] += join_symbol + labels[i][1]
else:
new_labels.append(list(labels[i]))
j += 1
else:
new_labels = []
return new_labels
def strip_prefix(full_string, prefix):
"""
Strip the prefix from the given string and return it. If the prefix is not present
the original string will be returned unaltered
:param full_string: the string from which to remove the prefix
:param prefix: the prefix to remove
:return: the string with prefix removed
"""
if full_string.startswith(prefix):
return full_string.rsplit(prefix)[1]
return full_string
class Capturing(object): # pylint: disable=useless-object-inheritance
"""
This class captures stdout and returns it
(as a list, split by lines).
Note: if you raise a SystemExit, you have to catch it outside.
E.g., in our tests, this works::
import sys
with self.assertRaises(SystemExit):
with Capturing() as output:
sys.exit()
But out of the testing environment, the code instead just exits.
To use it, access the obj.stdout_lines, or just iterate over the object
:param capture_stderr: if True, also captures sys.stderr. To access the
lines, use obj.stderr_lines. If False, obj.stderr_lines is None.
"""
# pylint: disable=attribute-defined-outside-init
def __init__(self, capture_stderr=False):
self.stdout_lines = list()
super(Capturing, self).__init__()
self._capture_stderr = capture_stderr
if self._capture_stderr:
self.stderr_lines = list()
else:
self.stderr_lines = None
def __enter__(self):
"""Enter the context where all output is captured."""
self._stdout = sys.stdout
self._stringioout = StringIO()
sys.stdout = self._stringioout
if self._capture_stderr:
self._stderr = sys.stderr
self._stringioerr = StringIO()
sys.stderr = self._stringioerr
return self
def __exit__(self, *args):
"""Exit the context where all output is captured."""
self.stdout_lines.extend(self._stringioout.getvalue().splitlines())
sys.stdout = self._stdout
del self._stringioout # free up some memory
if self._capture_stderr:
self.stderr_lines.extend(self._stringioerr.getvalue().splitlines())
sys.stderr = self._stderr
del self._stringioerr # free up some memory
def __str__(self):
return str(self.stdout_lines)
def __iter__(self):
return iter(self.stdout_lines)
class ErrorAccumulator(object): # pylint: disable=useless-object-inheritance
"""
Allows to run a number of functions and collect all the errors they raise
This allows to validate multiple things and tell the user about all the
errors encountered at once. Works best if the individual functions do not depend on each other.
Does not allow to trace the stack of each error, therefore do not use for debugging, but for
semantical checking with user friendly error messages.
"""
def __init__(self, *error_cls):
self.error_cls = error_cls
self.errors = {k: [] for k in self.error_cls}
def run(self, function, *args, **kwargs):
try:
function(*args, **kwargs)
except self.error_cls as err:
self.errors[err.__class__].append(err)
def success(self):
return bool(not any(self.errors.values()))
def result(self, raise_error=Exception):
if raise_error:
self.raise_errors(raise_error)
return self.success(), self.errors
def raise_errors(self, raise_cls):
if not self.success():
raise raise_cls('The following errors were encountered: {}'.format(self.errors))
|
the-stack_0_12502 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import datetime
import decimal
import itertools
import math
import re
import hypothesis as h
import numpy as np
import pytz
import pytest
from pyarrow.pandas_compat import _pandas_api # noqa
import pyarrow as pa
import pyarrow.tests.strategies as past
int_type_pairs = [
(np.int8, pa.int8()),
(np.int16, pa.int16()),
(np.int32, pa.int32()),
(np.int64, pa.int64()),
(np.uint8, pa.uint8()),
(np.uint16, pa.uint16()),
(np.uint32, pa.uint32()),
(np.uint64, pa.uint64())]
np_int_types, pa_int_types = zip(*int_type_pairs)
class StrangeIterable:
def __init__(self, lst):
self.lst = lst
def __iter__(self):
return self.lst.__iter__()
class MyInt:
def __init__(self, value):
self.value = value
def __int__(self):
return self.value
class MyBrokenInt:
def __int__(self):
1/0 # MARKER
def check_struct_type(ty, expected):
"""
Check a struct type is as expected, but not taking order into account.
"""
assert pa.types.is_struct(ty)
assert set(ty) == set(expected)
def test_iterable_types():
arr1 = pa.array(StrangeIterable([0, 1, 2, 3]))
arr2 = pa.array((0, 1, 2, 3))
assert arr1.equals(arr2)
def test_empty_iterable():
arr = pa.array(StrangeIterable([]))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
def test_limited_iterator_types():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=3)
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_limited_iterator_size_overflow():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=2)
arr2 = pa.array((0, 1))
assert arr1.equals(arr2)
def test_limited_iterator_size_underflow():
arr1 = pa.array(iter(range(3)), type=pa.int64(), size=10)
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_iterator_without_size():
expected = pa.array((0, 1, 2))
arr1 = pa.array(iter(range(3)))
assert arr1.equals(expected)
# Same with explicit type
arr1 = pa.array(iter(range(3)), type=pa.int64())
assert arr1.equals(expected)
def test_infinite_iterator():
expected = pa.array((0, 1, 2))
arr1 = pa.array(itertools.count(0), size=3)
assert arr1.equals(expected)
# Same with explicit type
arr1 = pa.array(itertools.count(0), type=pa.int64(), size=3)
assert arr1.equals(expected)
def _as_list(xs):
return xs
def _as_tuple(xs):
return tuple(xs)
def _as_deque(xs):
# deque is a sequence while neither tuple nor list
return collections.deque(xs)
def _as_dict_values(xs):
# a dict values object is not a sequence, just a regular iterable
dct = {k: v for k, v in enumerate(xs)}
return dct.values()
def _as_numpy_array(xs):
arr = np.empty(len(xs), dtype=object)
arr[:] = xs
return arr
parametrize_with_iterable_types = pytest.mark.parametrize(
"seq", [_as_list, _as_tuple, _as_deque, _as_dict_values, _as_numpy_array])
@parametrize_with_iterable_types
def test_sequence_types(seq):
arr1 = pa.array(seq([1, 2, 3]))
arr2 = pa.array([1, 2, 3])
assert arr1.equals(arr2)
@parametrize_with_iterable_types
def test_sequence_boolean(seq):
expected = [True, None, False, None]
arr = pa.array(seq(expected))
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pa.bool_()
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_sequence_numpy_boolean(seq):
expected = [np.bool_(True), None, np.bool_(False), None]
arr = pa.array(seq(expected))
assert arr.type == pa.bool_()
assert arr.to_pylist() == [True, None, False, None]
@parametrize_with_iterable_types
def test_sequence_mixed_numpy_python_bools(seq):
values = np.array([True, False])
arr = pa.array(seq([values[0], None, values[1], True, False]))
assert arr.type == pa.bool_()
assert arr.to_pylist() == [True, None, False, True, False]
@parametrize_with_iterable_types
def test_empty_list(seq):
arr = pa.array(seq([]))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
@parametrize_with_iterable_types
def test_nested_lists(seq):
data = [[], [1, 2], None]
arr = pa.array(seq(data))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
# With explicit type
arr = pa.array(seq(data), type=pa.list_(pa.int32()))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int32())
assert arr.to_pylist() == data
@parametrize_with_iterable_types
def test_nested_large_lists(seq):
data = [[], [1, 2], None]
arr = pa.array(seq(data), type=pa.large_list(pa.int16()))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.large_list(pa.int16())
assert arr.to_pylist() == data
@parametrize_with_iterable_types
def test_list_with_non_list(seq):
# List types don't accept non-sequences
with pytest.raises(TypeError):
pa.array(seq([[], [1, 2], 3]), type=pa.list_(pa.int64()))
with pytest.raises(TypeError):
pa.array(seq([[], [1, 2], 3]), type=pa.large_list(pa.int64()))
@parametrize_with_iterable_types
def test_nested_arrays(seq):
arr = pa.array(seq([np.array([], dtype=np.int64),
np.array([1, 2], dtype=np.int64), None]))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == [[], [1, 2], None]
@parametrize_with_iterable_types
def test_nested_fixed_size_list(seq):
# sequence of lists
data = [[1, 2], [3, None], None]
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64(), 2)
assert arr.to_pylist() == data
# sequence of numpy arrays
data = [np.array([1, 2], dtype='int64'), np.array([3, 4], dtype='int64'),
None]
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64(), 2)
assert arr.to_pylist() == [[1, 2], [3, 4], None]
# incorrect length of the lists or arrays
data = [[1, 2, 4], [3, None], None]
for data in [[[1, 2, 3]], [np.array([1, 2, 4], dtype='int64')]]:
with pytest.raises(
ValueError, match="Length of item not correct: expected 2"):
pa.array(seq(data), type=pa.list_(pa.int64(), 2))
# with list size of 0
data = [[], [], None]
arr = pa.array(seq(data), type=pa.list_(pa.int64(), 0))
assert len(arr) == 3
assert arr.null_count == 1
assert arr.type == pa.list_(pa.int64(), 0)
assert arr.to_pylist() == [[], [], None]
@parametrize_with_iterable_types
def test_sequence_all_none(seq):
arr = pa.array(seq([None, None]))
assert len(arr) == 2
assert arr.null_count == 2
assert arr.type == pa.null()
assert arr.to_pylist() == [None, None]
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [1, None, 3, None,
np.iinfo(np_scalar).min, np.iinfo(np_scalar).max]
arr = pa.array(seq(expected), type=pa_type)
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer_np_nan(seq, np_scalar_pa_type):
# ARROW-2806: numpy.nan is a double value and thus should produce
# a double array.
_, pa_type = np_scalar_pa_type
with pytest.raises(ValueError):
pa.array(seq([np.nan]), type=pa_type, from_pandas=False)
arr = pa.array(seq([np.nan]), type=pa_type, from_pandas=True)
expected = [None]
assert len(arr) == 1
assert arr.null_count == 1
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_integer_nested_np_nan(seq, np_scalar_pa_type):
# ARROW-2806: numpy.nan is a double value and thus should produce
# a double array.
_, pa_type = np_scalar_pa_type
with pytest.raises(ValueError):
pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=False)
arr = pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=True)
expected = [[None]]
assert len(arr) == 1
assert arr.null_count == 0
assert arr.type == pa.list_(pa_type)
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_sequence_integer_inferred(seq):
expected = [1, None, 3, None]
arr = pa.array(seq(expected))
assert len(arr) == 4
assert arr.null_count == 2
assert arr.type == pa.int64()
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_numpy_integer(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [np_scalar(1), None, np_scalar(3), None,
np_scalar(np.iinfo(np_scalar).min),
np_scalar(np.iinfo(np_scalar).max)]
arr = pa.array(seq(expected), type=pa_type)
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs)
def test_sequence_numpy_integer_inferred(seq, np_scalar_pa_type):
np_scalar, pa_type = np_scalar_pa_type
expected = [np_scalar(1), None, np_scalar(3), None]
expected += [np_scalar(np.iinfo(np_scalar).min),
np_scalar(np.iinfo(np_scalar).max)]
arr = pa.array(seq(expected))
assert len(arr) == 6
assert arr.null_count == 2
assert arr.type == pa_type
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_sequence_custom_integers(seq):
expected = [0, 42, 2**33 + 1, -2**63]
data = list(map(MyInt, expected))
arr = pa.array(seq(data), type=pa.int64())
assert arr.to_pylist() == expected
@parametrize_with_iterable_types
def test_broken_integers(seq):
data = [MyBrokenInt()]
with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"):
pa.array(seq(data), type=pa.int64())
def test_numpy_scalars_mixed_type():
# ARROW-4324
data = [np.int32(10), np.float32(0.5)]
arr = pa.array(data)
expected = pa.array([10, 0.5], type="float64")
assert arr.equals(expected)
# ARROW-9490
data = [np.int8(10), np.float32(0.5)]
arr = pa.array(data)
expected = pa.array([10, 0.5], type="float32")
assert arr.equals(expected)
@pytest.mark.xfail(reason="Type inference for uint64 not implemented",
raises=OverflowError)
def test_uint64_max_convert():
data = [0, np.iinfo(np.uint64).max]
arr = pa.array(data, type=pa.uint64())
expected = pa.array(np.array(data, dtype='uint64'))
assert arr.equals(expected)
arr_inferred = pa.array(data)
assert arr_inferred.equals(expected)
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
def test_signed_integer_overflow(bits):
ty = getattr(pa, "int%d" % bits)()
# XXX ideally would always raise OverflowError
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([2 ** (bits - 1)], ty)
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([-2 ** (bits - 1) - 1], ty)
@pytest.mark.parametrize("bits", [8, 16, 32, 64])
def test_unsigned_integer_overflow(bits):
ty = getattr(pa, "uint%d" % bits)()
# XXX ideally would always raise OverflowError
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([2 ** bits], ty)
with pytest.raises((OverflowError, pa.ArrowInvalid)):
pa.array([-1], ty)
@parametrize_with_iterable_types
@pytest.mark.parametrize("typ", pa_int_types)
def test_integer_from_string_error(seq, typ):
# ARROW-9451: pa.array(['1'], type=pa.uint32()) should not succeed
with pytest.raises(pa.ArrowInvalid):
pa.array(seq(['1']), type=typ)
def test_convert_with_mask():
data = [1, 2, 3, 4, 5]
mask = np.array([False, True, False, False, True])
result = pa.array(data, mask=mask)
expected = pa.array([1, None, 3, 4, None])
assert result.equals(expected)
# Mask wrong length
with pytest.raises(ValueError):
pa.array(data, mask=mask[1:])
def test_garbage_collection():
import gc
# Force the cyclic garbage collector to run
gc.collect()
bytes_before = pa.total_allocated_bytes()
pa.array([1, None, 3, None])
gc.collect()
assert pa.total_allocated_bytes() == bytes_before
def test_sequence_double():
data = [1.5, 1., None, 2.5, None, None]
arr = pa.array(data)
assert len(arr) == 6
assert arr.null_count == 3
assert arr.type == pa.float64()
assert arr.to_pylist() == data
def test_double_auto_coerce_from_integer():
# Done as part of ARROW-2814
data = [1.5, 1., None, 2.5, None, None]
arr = pa.array(data)
data2 = [1.5, 1, None, 2.5, None, None]
arr2 = pa.array(data2)
assert arr.equals(arr2)
data3 = [1, 1.5, None, 2.5, None, None]
arr3 = pa.array(data3)
data4 = [1., 1.5, None, 2.5, None, None]
arr4 = pa.array(data4)
assert arr3.equals(arr4)
def test_double_integer_coerce_representable_range():
valid_values = [1.5, 1, 2, None, 1 << 53, -(1 << 53)]
invalid_values = [1.5, 1, 2, None, (1 << 53) + 1]
invalid_values2 = [1.5, 1, 2, None, -((1 << 53) + 1)]
# it works
pa.array(valid_values)
# it fails
with pytest.raises(ValueError):
pa.array(invalid_values)
with pytest.raises(ValueError):
pa.array(invalid_values2)
def test_float32_integer_coerce_representable_range():
f32 = np.float32
valid_values = [f32(1.5), 1 << 24, -(1 << 24)]
invalid_values = [f32(1.5), (1 << 24) + 1]
invalid_values2 = [f32(1.5), -((1 << 24) + 1)]
# it works
pa.array(valid_values, type=pa.float32())
# it fails
with pytest.raises(ValueError):
pa.array(invalid_values, type=pa.float32())
with pytest.raises(ValueError):
pa.array(invalid_values2, type=pa.float32())
def test_mixed_sequence_errors():
with pytest.raises(ValueError, match="tried to convert to boolean"):
pa.array([True, 'foo'], type=pa.bool_())
with pytest.raises(ValueError, match="tried to convert to float32"):
pa.array([1.5, 'foo'], type=pa.float32())
with pytest.raises(ValueError, match="tried to convert to double"):
pa.array([1.5, 'foo'])
@parametrize_with_iterable_types
@pytest.mark.parametrize("np_scalar,pa_type", [
(np.float16, pa.float16()),
(np.float32, pa.float32()),
(np.float64, pa.float64())
])
@pytest.mark.parametrize("from_pandas", [True, False])
def test_sequence_numpy_double(seq, np_scalar, pa_type, from_pandas):
data = [np_scalar(1.5), np_scalar(1), None, np_scalar(2.5), None, np.nan]
arr = pa.array(seq(data), from_pandas=from_pandas)
assert len(arr) == 6
if from_pandas:
assert arr.null_count == 3
else:
assert arr.null_count == 2
if from_pandas:
# The NaN is skipped in type inference, otherwise it forces a
# float64 promotion
assert arr.type == pa_type
else:
assert arr.type == pa.float64()
assert arr.to_pylist()[:4] == data[:4]
if from_pandas:
assert arr.to_pylist()[5] is None
else:
assert np.isnan(arr.to_pylist()[5])
@pytest.mark.parametrize("from_pandas", [True, False])
@pytest.mark.parametrize("inner_seq", [np.array, list])
def test_ndarray_nested_numpy_double(from_pandas, inner_seq):
# ARROW-2806
data = np.array([
inner_seq([1., 2.]),
inner_seq([1., 2., 3.]),
inner_seq([np.nan]),
None
], dtype=object)
arr = pa.array(data, from_pandas=from_pandas)
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.list_(pa.float64())
if from_pandas:
assert arr.to_pylist() == [[1.0, 2.0], [1.0, 2.0, 3.0], [None], None]
else:
np.testing.assert_equal(arr.to_pylist(),
[[1., 2.], [1., 2., 3.], [np.nan], None])
def test_nested_ndarray_in_object_array():
# ARROW-4350
arr = np.empty(2, dtype=object)
arr[:] = [np.array([1, 2], dtype=np.int64),
np.array([2, 3], dtype=np.int64)]
arr2 = np.empty(2, dtype=object)
arr2[0] = [3, 4]
arr2[1] = [5, 6]
expected_type = pa.list_(pa.list_(pa.int64()))
assert pa.infer_type([arr]) == expected_type
result = pa.array([arr, arr2])
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
type=expected_type)
assert result.equals(expected)
# test case for len-1 arrays to ensure they are interpreted as
# sublists and not scalars
arr = np.empty(2, dtype=object)
arr[:] = [np.array([1]), np.array([2])]
result = pa.array([arr, arr])
assert result.to_pylist() == [[[1], [2]], [[1], [2]]]
@pytest.mark.xfail(reason=("Type inference for multidimensional ndarray "
"not yet implemented"),
raises=AssertionError)
def test_multidimensional_ndarray_as_nested_list():
# TODO(wesm): see ARROW-5645
arr = np.array([[1, 2], [2, 3]], dtype=np.int64)
arr2 = np.array([[3, 4], [5, 6]], dtype=np.int64)
expected_type = pa.list_(pa.list_(pa.int64()))
assert pa.infer_type([arr]) == expected_type
result = pa.array([arr, arr2])
expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]],
type=expected_type)
assert result.equals(expected)
@pytest.mark.parametrize(('data', 'value_type'), [
([True, False], pa.bool_()),
([None, None], pa.null()),
([1, 2, None], pa.int8()),
([1, 2., 3., None], pa.float32()),
([datetime.date.today(), None], pa.date32()),
([None, datetime.date.today()], pa.date64()),
([datetime.time(1, 1, 1), None], pa.time32('s')),
([None, datetime.time(2, 2, 2)], pa.time64('us')),
([datetime.datetime.now(), None], pa.timestamp('us')),
([datetime.timedelta(seconds=10)], pa.duration('s')),
([b"a", b"b"], pa.binary()),
([b"aaa", b"bbb", b"ccc"], pa.binary(3)),
([b"a", b"b", b"c"], pa.large_binary()),
(["a", "b", "c"], pa.string()),
(["a", "b", "c"], pa.large_string()),
(
[{"a": 1, "b": 2}, None, {"a": 5, "b": None}],
pa.struct([('a', pa.int8()), ('b', pa.int16())])
)
])
def test_list_array_from_object_ndarray(data, value_type):
ty = pa.list_(value_type)
ndarray = np.array(data, dtype=object)
arr = pa.array([ndarray], type=ty)
assert arr.type.equals(ty)
assert arr.to_pylist() == [data]
@pytest.mark.parametrize(('data', 'value_type'), [
([[1, 2], [3]], pa.list_(pa.int64())),
([[1, 2], [3, 4]], pa.list_(pa.int64(), 2)),
([[1], [2, 3]], pa.large_list(pa.int64()))
])
def test_nested_list_array_from_object_ndarray(data, value_type):
ndarray = np.empty(len(data), dtype=object)
ndarray[:] = [np.array(item, dtype=object) for item in data]
ty = pa.list_(value_type)
arr = pa.array([ndarray], type=ty)
assert arr.type.equals(ty)
assert arr.to_pylist() == [data]
def test_array_ignore_nan_from_pandas():
# See ARROW-4324, this reverts logic that was introduced in
# ARROW-2240
with pytest.raises(ValueError):
pa.array([np.nan, 'str'])
arr = pa.array([np.nan, 'str'], from_pandas=True)
expected = pa.array([None, 'str'])
assert arr.equals(expected)
def test_nested_ndarray_different_dtypes():
data = [
np.array([1, 2, 3], dtype='int64'),
None,
np.array([4, 5, 6], dtype='uint32')
]
arr = pa.array(data)
expected = pa.array([[1, 2, 3], None, [4, 5, 6]],
type=pa.list_(pa.int64()))
assert arr.equals(expected)
t2 = pa.list_(pa.uint32())
arr2 = pa.array(data, type=t2)
expected2 = expected.cast(t2)
assert arr2.equals(expected2)
def test_sequence_unicode():
data = ['foo', 'bar', None, 'mañana']
arr = pa.array(data)
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.string()
assert arr.to_pylist() == data
def check_array_mixed_unicode_bytes(binary_type, string_type):
values = ['qux', b'foo', bytearray(b'barz')]
b_values = [b'qux', b'foo', b'barz']
u_values = ['qux', 'foo', 'barz']
arr = pa.array(values)
expected = pa.array(b_values, type=pa.binary())
assert arr.type == pa.binary()
assert arr.equals(expected)
arr = pa.array(values, type=binary_type)
expected = pa.array(b_values, type=binary_type)
assert arr.type == binary_type
assert arr.equals(expected)
arr = pa.array(values, type=string_type)
expected = pa.array(u_values, type=string_type)
assert arr.type == string_type
assert arr.equals(expected)
def test_array_mixed_unicode_bytes():
check_array_mixed_unicode_bytes(pa.binary(), pa.string())
check_array_mixed_unicode_bytes(pa.large_binary(), pa.large_string())
@pytest.mark.large_memory
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
def test_large_binary_array(ty):
# Construct a large binary array with more than 4GB of data
s = b"0123456789abcdefghijklmnopqrstuvwxyz" * 10
nrepeats = math.ceil((2**32 + 5) / len(s))
data = [s] * nrepeats
arr = pa.array(data, type=ty)
assert isinstance(arr, pa.Array)
assert arr.type == ty
assert len(arr) == nrepeats
@pytest.mark.slow
@pytest.mark.large_memory
@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()])
def test_large_binary_value(ty):
# Construct a large binary array with a single value larger than 4GB
s = b"0123456789abcdefghijklmnopqrstuvwxyz"
nrepeats = math.ceil((2**32 + 5) / len(s))
arr = pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty)
assert isinstance(arr, pa.Array)
assert arr.type == ty
assert len(arr) == 4
buf = arr[1].as_buffer()
assert len(buf) == len(s) * nrepeats
@pytest.mark.large_memory
@pytest.mark.parametrize("ty", [pa.binary(), pa.string()])
def test_string_too_large(ty):
# Construct a binary array with a single value larger than 4GB
s = b"0123456789abcdefghijklmnopqrstuvwxyz"
nrepeats = math.ceil((2**32 + 5) / len(s))
with pytest.raises(pa.ArrowCapacityError):
pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty)
def test_sequence_bytes():
u1 = b'ma\xc3\xb1ana'
data = [b'foo',
memoryview(b'dada'),
memoryview(b'd-a-t-a')[::2], # non-contiguous is made contiguous
u1.decode('utf-8'), # unicode gets encoded,
bytearray(b'bar'),
None]
for ty in [None, pa.binary(), pa.large_binary()]:
arr = pa.array(data, type=ty)
assert len(arr) == 6
assert arr.null_count == 1
assert arr.type == ty or pa.binary()
assert arr.to_pylist() == [b'foo', b'dada', b'data', u1, b'bar', None]
@pytest.mark.parametrize("ty", [pa.string(), pa.large_string()])
def test_sequence_utf8_to_unicode(ty):
# ARROW-1225
data = [b'foo', None, b'bar']
arr = pa.array(data, type=ty)
assert arr.type == ty
assert arr[0].as_py() == 'foo'
# test a non-utf8 unicode string
val = ('mañana').encode('utf-16-le')
with pytest.raises(pa.ArrowInvalid):
pa.array([val], type=ty)
def test_sequence_fixed_size_bytes():
data = [b'foof', None, bytearray(b'barb'), b'2346']
arr = pa.array(data, type=pa.binary(4))
assert len(arr) == 4
assert arr.null_count == 1
assert arr.type == pa.binary(4)
assert arr.to_pylist() == [b'foof', None, b'barb', b'2346']
def test_fixed_size_bytes_does_not_accept_varying_lengths():
data = [b'foo', None, b'barb', b'2346']
with pytest.raises(pa.ArrowInvalid):
pa.array(data, type=pa.binary(4))
def test_fixed_size_binary_length_check():
# ARROW-10193
data = [b'\x19h\r\x9e\x00\x00\x00\x00\x01\x9b\x9fA']
assert len(data[0]) == 12
ty = pa.binary(12)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == data
def test_sequence_date():
data = [datetime.date(2000, 1, 1), None, datetime.date(1970, 1, 1),
datetime.date(2040, 2, 26)]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.date32()
assert arr.null_count == 1
assert arr[0].as_py() == datetime.date(2000, 1, 1)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.date(1970, 1, 1)
assert arr[3].as_py() == datetime.date(2040, 2, 26)
@pytest.mark.parametrize('input',
[(pa.date32(), [10957, None]),
(pa.date64(), [10957 * 86400000, None])])
def test_sequence_explicit_types(input):
t, ex_values = input
data = [datetime.date(2000, 1, 1), None]
arr = pa.array(data, type=t)
arr2 = pa.array(ex_values, type=t)
for x in [arr, arr2]:
assert len(x) == 2
assert x.type == t
assert x.null_count == 1
assert x[0].as_py() == datetime.date(2000, 1, 1)
assert x[1].as_py() is None
def test_date32_overflow():
# Overflow
data3 = [2**32, None]
with pytest.raises((OverflowError, pa.ArrowException)):
pa.array(data3, type=pa.date32())
@pytest.mark.parametrize(('time_type', 'unit', 'int_type'), [
(pa.time32, 's', 'int32'),
(pa.time32, 'ms', 'int32'),
(pa.time64, 'us', 'int64'),
(pa.time64, 'ns', 'int64'),
])
def test_sequence_time_with_timezone(time_type, unit, int_type):
def expected_integer_value(t):
# only use with utc time object because it doesn't adjust with the
# offset
units = ['s', 'ms', 'us', 'ns']
multiplier = 10**(units.index(unit) * 3)
if t is None:
return None
seconds = (
t.hour * 3600 +
t.minute * 60 +
t.second +
t.microsecond * 10**-6
)
return int(seconds * multiplier)
def expected_time_value(t):
# only use with utc time object because it doesn't adjust with the
# time objects tzdata
if unit == 's':
return t.replace(microsecond=0)
elif unit == 'ms':
return t.replace(microsecond=(t.microsecond // 1000) * 1000)
else:
return t
# only timezone naive times are supported in arrow
data = [
datetime.time(8, 23, 34, 123456),
datetime.time(5, 0, 0, 1000),
None,
datetime.time(1, 11, 56, 432539),
datetime.time(23, 10, 0, 437699)
]
ty = time_type(unit)
arr = pa.array(data, type=ty)
assert len(arr) == 5
assert arr.type == ty
assert arr.null_count == 1
# test that the underlying integers are UTC values
values = arr.cast(int_type)
expected = list(map(expected_integer_value, data))
assert values.to_pylist() == expected
# test that the scalars are datetime.time objects with UTC timezone
assert arr[0].as_py() == expected_time_value(data[0])
assert arr[1].as_py() == expected_time_value(data[1])
assert arr[2].as_py() is None
assert arr[3].as_py() == expected_time_value(data[3])
assert arr[4].as_py() == expected_time_value(data[4])
def tz(hours, minutes=0):
offset = datetime.timedelta(hours=hours, minutes=minutes)
return datetime.timezone(offset)
def test_sequence_timestamp():
data = [
datetime.datetime(2007, 7, 13, 1, 23, 34, 123456),
None,
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539),
datetime.datetime(2010, 8, 13, 5, 46, 57, 437699)
]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.timestamp('us')
assert arr.null_count == 1
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
34, 56, 432539)
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
46, 57, 437699)
@pytest.mark.parametrize('timezone', [
None,
'UTC',
'Etc/GMT-1',
'Europe/Budapest',
])
@pytest.mark.parametrize('unit', [
's',
'ms',
'us',
'ns'
])
def test_sequence_timestamp_with_timezone(timezone, unit):
def expected_integer_value(dt):
units = ['s', 'ms', 'us', 'ns']
multiplier = 10**(units.index(unit) * 3)
if dt is None:
return None
else:
# avoid float precision issues
ts = decimal.Decimal(str(dt.timestamp()))
return int(ts * multiplier)
def expected_datetime_value(dt):
if dt is None:
return None
if unit == 's':
dt = dt.replace(microsecond=0)
elif unit == 'ms':
dt = dt.replace(microsecond=(dt.microsecond // 1000) * 1000)
# adjust the timezone
if timezone is None:
# make datetime timezone unaware
return dt.replace(tzinfo=None)
else:
# convert to the expected timezone
return dt.astimezone(pytz.timezone(timezone))
data = [
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
pytz.utc.localize(
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
),
None,
pytz.timezone('US/Eastern').localize(
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
),
pytz.timezone('Europe/Moscow').localize(
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
),
]
utcdata = [
pytz.utc.localize(data[0]),
data[1],
None,
data[3].astimezone(pytz.utc),
data[4].astimezone(pytz.utc),
]
ty = pa.timestamp(unit, tz=timezone)
arr = pa.array(data, type=ty)
assert len(arr) == 5
assert arr.type == ty
assert arr.null_count == 1
# test that the underlying integers are UTC values
values = arr.cast('int64')
expected = list(map(expected_integer_value, utcdata))
assert values.to_pylist() == expected
# test that the scalars are datetimes with the correct timezone
for i in range(len(arr)):
assert arr[i].as_py() == expected_datetime_value(utcdata[i])
@pytest.mark.parametrize('timezone', [
None,
'UTC',
'Etc/GMT-1',
'Europe/Budapest',
])
def test_pyarrow_ignore_timezone_environment_variable(monkeypatch, timezone):
# note that any non-empty value will evaluate to true
monkeypatch.setenv("PYARROW_IGNORE_TIMEZONE", "1")
data = [
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
pytz.utc.localize(
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
),
pytz.timezone('US/Eastern').localize(
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
),
pytz.timezone('Europe/Moscow').localize(
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
),
]
expected = [dt.replace(tzinfo=None) for dt in data]
if timezone is not None:
tzinfo = pytz.timezone(timezone)
expected = [tzinfo.fromutc(dt) for dt in expected]
ty = pa.timestamp('us', tz=timezone)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == expected
def test_sequence_timestamp_with_timezone_inference():
data = [
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
pytz.utc.localize(
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
),
None,
pytz.timezone('US/Eastern').localize(
datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)
),
pytz.timezone('Europe/Moscow').localize(
datetime.datetime(2010, 8, 13, 5, 0, 0, 437699)
),
]
expected = [
pa.timestamp('us', tz=None),
pa.timestamp('us', tz='UTC'),
pa.timestamp('us', tz=None),
pa.timestamp('us', tz='US/Eastern'),
pa.timestamp('us', tz='Europe/Moscow')
]
for dt, expected_type in zip(data, expected):
prepended = [dt] + data
arr = pa.array(prepended)
assert arr.type == expected_type
@pytest.mark.pandas
def test_sequence_timestamp_from_mixed_builtin_and_pandas_datetimes():
import pandas as pd
data = [
pd.Timestamp(1184307814123456123, tz=pytz.timezone('US/Eastern'),
unit='ns'),
datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive
pytz.utc.localize(
datetime.datetime(2008, 1, 5, 5, 0, 0, 1000)
),
None,
]
utcdata = [
data[0].astimezone(pytz.utc),
pytz.utc.localize(data[1]),
data[2].astimezone(pytz.utc),
None,
]
arr = pa.array(data)
assert arr.type == pa.timestamp('us', tz='US/Eastern')
values = arr.cast('int64')
expected = [int(dt.timestamp() * 10**6) if dt else None for dt in utcdata]
assert values.to_pylist() == expected
def test_sequence_timestamp_out_of_bounds_nanosecond():
# https://issues.apache.org/jira/browse/ARROW-9768
# datetime outside of range supported for nanosecond resolution
data = [datetime.datetime(2262, 4, 12)]
with pytest.raises(ValueError, match="out of bounds"):
pa.array(data, type=pa.timestamp('ns'))
# with microsecond resolution it works fine
arr = pa.array(data, type=pa.timestamp('us'))
assert arr.to_pylist() == data
# case where the naive is within bounds, but converted to UTC not
tz = datetime.timezone(datetime.timedelta(hours=-1))
data = [datetime.datetime(2262, 4, 11, 23, tzinfo=tz)]
with pytest.raises(ValueError, match="out of bounds"):
pa.array(data, type=pa.timestamp('ns'))
arr = pa.array(data, type=pa.timestamp('us'))
assert arr.to_pylist()[0] == datetime.datetime(2262, 4, 12)
def test_sequence_numpy_timestamp():
data = [
np.datetime64(datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)),
None,
np.datetime64(datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)),
np.datetime64(datetime.datetime(2010, 8, 13, 5, 46, 57, 437699))
]
arr = pa.array(data)
assert len(arr) == 4
assert arr.type == pa.timestamp('us')
assert arr.null_count == 1
assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
assert arr[1].as_py() is None
assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12,
34, 56, 432539)
assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5,
46, 57, 437699)
class MyDate(datetime.date):
pass
class MyDatetime(datetime.datetime):
pass
class MyTimedelta(datetime.timedelta):
pass
def test_datetime_subclassing():
data = [
MyDate(2007, 7, 13),
]
date_type = pa.date32()
arr_date = pa.array(data, type=date_type)
assert len(arr_date) == 1
assert arr_date.type == date_type
assert arr_date[0].as_py() == datetime.date(2007, 7, 13)
data = [
MyDatetime(2007, 7, 13, 1, 23, 34, 123456),
]
s = pa.timestamp('s')
ms = pa.timestamp('ms')
us = pa.timestamp('us')
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert arr_s[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 0)
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert arr_ms[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123000)
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert arr_us[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
data = [
MyTimedelta(123, 456, 1002),
]
s = pa.duration('s')
ms = pa.duration('ms')
us = pa.duration('us')
arr_s = pa.array(data)
assert len(arr_s) == 1
assert arr_s.type == us
assert arr_s[0].as_py() == datetime.timedelta(123, 456, 1002)
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert arr_s[0].as_py() == datetime.timedelta(123, 456)
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert arr_ms[0].as_py() == datetime.timedelta(123, 456, 1000)
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert arr_us[0].as_py() == datetime.timedelta(123, 456, 1002)
@pytest.mark.xfail(not _pandas_api.have_pandas,
reason="pandas required for nanosecond conversion")
def test_sequence_timestamp_nanoseconds():
inputs = [
[datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)],
[MyDatetime(2007, 7, 13, 1, 23, 34, 123456)]
]
for data in inputs:
ns = pa.timestamp('ns')
arr_ns = pa.array(data, type=ns)
assert len(arr_ns) == 1
assert arr_ns.type == ns
assert arr_ns[0].as_py() == datetime.datetime(2007, 7, 13, 1,
23, 34, 123456)
@pytest.mark.pandas
def test_sequence_timestamp_from_int_with_unit():
# TODO(wesm): This test might be rewritten to assert the actual behavior
# when pandas is not installed
data = [1]
s = pa.timestamp('s')
ms = pa.timestamp('ms')
us = pa.timestamp('us')
ns = pa.timestamp('ns')
arr_s = pa.array(data, type=s)
assert len(arr_s) == 1
assert arr_s.type == s
assert repr(arr_s[0]) == (
"<pyarrow.TimestampScalar: datetime.datetime(1970, 1, 1, 0, 0, 1)>"
)
assert str(arr_s[0]) == "1970-01-01 00:00:01"
arr_ms = pa.array(data, type=ms)
assert len(arr_ms) == 1
assert arr_ms.type == ms
assert repr(arr_ms[0].as_py()) == (
"datetime.datetime(1970, 1, 1, 0, 0, 0, 1000)"
)
assert str(arr_ms[0]) == "1970-01-01 00:00:00.001000"
arr_us = pa.array(data, type=us)
assert len(arr_us) == 1
assert arr_us.type == us
assert repr(arr_us[0].as_py()) == (
"datetime.datetime(1970, 1, 1, 0, 0, 0, 1)"
)
assert str(arr_us[0]) == "1970-01-01 00:00:00.000001"
arr_ns = pa.array(data, type=ns)
assert len(arr_ns) == 1
assert arr_ns.type == ns
assert repr(arr_ns[0].as_py()) == (
"Timestamp('1970-01-01 00:00:00.000000001')"
)
assert str(arr_ns[0]) == "1970-01-01 00:00:00.000000001"
expected_exc = TypeError
class CustomClass():
pass
for ty in [ns, pa.date32(), pa.date64()]:
with pytest.raises(expected_exc):
pa.array([1, CustomClass()], type=ty)
@pytest.mark.parametrize('np_scalar', [True, False])
def test_sequence_duration(np_scalar):
td1 = datetime.timedelta(2, 3601, 1)
td2 = datetime.timedelta(1, 100, 1000)
if np_scalar:
data = [np.timedelta64(td1), None, np.timedelta64(td2)]
else:
data = [td1, None, td2]
arr = pa.array(data)
assert len(arr) == 3
assert arr.type == pa.duration('us')
assert arr.null_count == 1
assert arr[0].as_py() == td1
assert arr[1].as_py() is None
assert arr[2].as_py() == td2
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
def test_sequence_duration_with_unit(unit):
data = [
datetime.timedelta(3, 22, 1001),
]
expected = {'s': datetime.timedelta(3, 22),
'ms': datetime.timedelta(3, 22, 1000),
'us': datetime.timedelta(3, 22, 1001),
'ns': datetime.timedelta(3, 22, 1001)}
ty = pa.duration(unit)
arr_s = pa.array(data, type=ty)
assert len(arr_s) == 1
assert arr_s.type == ty
assert arr_s[0].as_py() == expected[unit]
@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])
def test_sequence_duration_from_int_with_unit(unit):
data = [5]
ty = pa.duration(unit)
arr = pa.array(data, type=ty)
assert len(arr) == 1
assert arr.type == ty
assert arr[0].value == 5
def test_sequence_duration_nested_lists():
td1 = datetime.timedelta(1, 1, 1000)
td2 = datetime.timedelta(1, 100)
data = [[td1, None], [td1, td2]]
arr = pa.array(data)
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('us'))
assert arr.to_pylist() == data
arr = pa.array(data, type=pa.list_(pa.duration('ms')))
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('ms'))
assert arr.to_pylist() == data
def test_sequence_duration_nested_lists_numpy():
td1 = datetime.timedelta(1, 1, 1000)
td2 = datetime.timedelta(1, 100)
data = [[np.timedelta64(td1), None],
[np.timedelta64(td1), np.timedelta64(td2)]]
arr = pa.array(data)
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('us'))
assert arr.to_pylist() == [[td1, None], [td1, td2]]
data = [np.array([np.timedelta64(td1), None], dtype='timedelta64[us]'),
np.array([np.timedelta64(td1), np.timedelta64(td2)])]
arr = pa.array(data)
assert len(arr) == 2
assert arr.type == pa.list_(pa.duration('us'))
assert arr.to_pylist() == [[td1, None], [td1, td2]]
def test_sequence_nesting_levels():
data = [1, 2, None]
arr = pa.array(data)
assert arr.type == pa.int64()
assert arr.to_pylist() == data
data = [[1], [2], None]
arr = pa.array(data)
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
data = [[1], [2, 3, 4], [None]]
arr = pa.array(data)
assert arr.type == pa.list_(pa.int64())
assert arr.to_pylist() == data
data = [None, [[None, 1]], [[2, 3, 4], None], [None]]
arr = pa.array(data)
assert arr.type == pa.list_(pa.list_(pa.int64()))
assert arr.to_pylist() == data
exceptions = (pa.ArrowInvalid, pa.ArrowTypeError)
# Mixed nesting levels are rejected
with pytest.raises(exceptions):
pa.array([1, 2, [1]])
with pytest.raises(exceptions):
pa.array([1, 2, []])
with pytest.raises(exceptions):
pa.array([[1], [2], [None, [1]]])
def test_sequence_mixed_types_fails():
data = ['a', 1, 2.0]
with pytest.raises(pa.ArrowTypeError):
pa.array(data)
def test_sequence_mixed_types_with_specified_type_fails():
data = ['-10', '-5', {'a': 1}, '0', '5', '10']
type = pa.string()
with pytest.raises(TypeError):
pa.array(data, type=type)
def test_sequence_decimal():
data = [decimal.Decimal('1234.183'), decimal.Decimal('8094.234')]
for type in [pa.decimal128, pa.decimal256]:
arr = pa.array(data, type=type(precision=7, scale=3))
assert arr.to_pylist() == data
def test_sequence_decimal_different_precisions():
data = [
decimal.Decimal('1234234983.183'), decimal.Decimal('80943244.234')
]
for type in [pa.decimal128, pa.decimal256]:
arr = pa.array(data, type=type(precision=13, scale=3))
assert arr.to_pylist() == data
def test_sequence_decimal_no_scale():
data = [decimal.Decimal('1234234983'), decimal.Decimal('8094324')]
for type in [pa.decimal128, pa.decimal256]:
arr = pa.array(data, type=type(precision=10))
assert arr.to_pylist() == data
def test_sequence_decimal_negative():
data = [decimal.Decimal('-1234.234983'), decimal.Decimal('-8.094324')]
for type in [pa.decimal128, pa.decimal256]:
arr = pa.array(data, type=type(precision=10, scale=6))
assert arr.to_pylist() == data
def test_sequence_decimal_no_whole_part():
data = [decimal.Decimal('-.4234983'), decimal.Decimal('.0103943')]
for type in [pa.decimal128, pa.decimal256]:
arr = pa.array(data, type=type(precision=7, scale=7))
assert arr.to_pylist() == data
def test_sequence_decimal_large_integer():
data = [decimal.Decimal('-394029506937548693.42983'),
decimal.Decimal('32358695912932.01033')]
for type in [pa.decimal128, pa.decimal256]:
arr = pa.array(data, type=type(precision=23, scale=5))
assert arr.to_pylist() == data
def test_sequence_decimal_from_integers():
data = [0, 1, -39402950693754869342983]
expected = [decimal.Decimal(x) for x in data]
for type in [pa.decimal128, pa.decimal256]:
arr = pa.array(data, type=type(precision=28, scale=5))
assert arr.to_pylist() == expected
def test_sequence_decimal_too_high_precision():
# ARROW-6989 python decimal has too high precision
with pytest.raises(ValueError, match="precision out of range"):
pa.array([decimal.Decimal('1' * 80)])
def test_sequence_decimal_infer():
for data, typ in [
# simple case
(decimal.Decimal('1.234'), pa.decimal128(4, 3)),
# trailing zeros
(decimal.Decimal('12300'), pa.decimal128(5, 0)),
(decimal.Decimal('12300.0'), pa.decimal128(6, 1)),
# scientific power notation
(decimal.Decimal('1.23E+4'), pa.decimal128(5, 0)),
(decimal.Decimal('123E+2'), pa.decimal128(5, 0)),
(decimal.Decimal('123E+4'), pa.decimal128(7, 0)),
# leading zeros
(decimal.Decimal('0.0123'), pa.decimal128(4, 4)),
(decimal.Decimal('0.01230'), pa.decimal128(5, 5)),
(decimal.Decimal('1.230E-2'), pa.decimal128(5, 5)),
]:
assert pa.infer_type([data]) == typ
arr = pa.array([data])
assert arr.type == typ
assert arr.to_pylist()[0] == data
def test_sequence_decimal_infer_mixed():
# ARROW-12150 - ensure mixed precision gets correctly inferred to
# common type that can hold all input values
cases = [
([decimal.Decimal('1.234'), decimal.Decimal('3.456')],
pa.decimal128(4, 3)),
([decimal.Decimal('1.234'), decimal.Decimal('456.7')],
pa.decimal128(6, 3)),
([decimal.Decimal('123.4'), decimal.Decimal('4.567')],
pa.decimal128(6, 3)),
([decimal.Decimal('123e2'), decimal.Decimal('4567e3')],
pa.decimal128(7, 0)),
([decimal.Decimal('123e4'), decimal.Decimal('4567e2')],
pa.decimal128(7, 0)),
([decimal.Decimal('0.123'), decimal.Decimal('0.04567')],
pa.decimal128(5, 5)),
([decimal.Decimal('0.001'), decimal.Decimal('1.01E5')],
pa.decimal128(9, 3)),
]
for data, typ in cases:
assert pa.infer_type(data) == typ
arr = pa.array(data)
assert arr.type == typ
assert arr.to_pylist() == data
def test_sequence_decimal_given_type():
for data, typs, wrong_typs in [
# simple case
(
decimal.Decimal('1.234'),
[pa.decimal128(4, 3), pa.decimal128(5, 3), pa.decimal128(5, 4)],
[pa.decimal128(4, 2), pa.decimal128(4, 4)]
),
# trailing zeros
(
decimal.Decimal('12300'),
[pa.decimal128(5, 0), pa.decimal128(6, 0), pa.decimal128(3, -2)],
[pa.decimal128(4, 0), pa.decimal128(3, -3)]
),
# scientific power notation
(
decimal.Decimal('1.23E+4'),
[pa.decimal128(5, 0), pa.decimal128(6, 0), pa.decimal128(3, -2)],
[pa.decimal128(4, 0), pa.decimal128(3, -3)]
),
]:
for typ in typs:
arr = pa.array([data], type=typ)
assert arr.type == typ
assert arr.to_pylist()[0] == data
for typ in wrong_typs:
with pytest.raises(ValueError):
pa.array([data], type=typ)
def test_range_types():
arr1 = pa.array(range(3))
arr2 = pa.array((0, 1, 2))
assert arr1.equals(arr2)
def test_empty_range():
arr = pa.array(range(0))
assert len(arr) == 0
assert arr.null_count == 0
assert arr.type == pa.null()
assert arr.to_pylist() == []
def test_structarray():
arr = pa.StructArray.from_arrays([], names=[])
assert arr.type == pa.struct([])
assert len(arr) == 0
assert arr.to_pylist() == []
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array(['a', None, 'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = [
{'ints': None, 'strs': 'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': 'c', 'bools': None},
]
pylist = arr.to_pylist()
assert pylist == expected, (pylist, expected)
# len(names) != len(arrays)
with pytest.raises(ValueError):
pa.StructArray.from_arrays([ints], ['ints', 'strs'])
def test_struct_from_dicts():
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
arr = pa.array([], type=ty)
assert arr.to_pylist() == []
data = [{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == data
# With omitted values
data = [{'a': 5, 'c': True},
None,
{},
{'a': None, 'b': 'bar'}]
arr = pa.array(data, type=ty)
expected = [{'a': 5, 'b': None, 'c': True},
None,
{'a': None, 'b': None, 'c': None},
{'a': None, 'b': 'bar', 'c': None}]
assert arr.to_pylist() == expected
def test_struct_from_dicts_bytes_keys():
# ARROW-6878
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
arr = pa.array([], type=ty)
assert arr.to_pylist() == []
data = [{b'a': 5, b'b': 'foo'},
{b'a': 6, b'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [
{'a': 5, 'b': 'foo', 'c': None},
{'a': 6, 'b': None, 'c': False},
]
def test_struct_from_tuples():
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [(5, 'foo', True),
(6, 'bar', False)]
expected = [{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False}]
arr = pa.array(data, type=ty)
data_as_ndarray = np.empty(len(data), dtype=object)
data_as_ndarray[:] = data
arr2 = pa.array(data_as_ndarray, type=ty)
assert arr.to_pylist() == expected
assert arr.equals(arr2)
# With omitted values
data = [(5, 'foo', None),
None,
(6, None, False)]
expected = [{'a': 5, 'b': 'foo', 'c': None},
None,
{'a': 6, 'b': None, 'c': False}]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == expected
# Invalid tuple size
for tup in [(5, 'foo'), (), ('5', 'foo', True, None)]:
with pytest.raises(ValueError, match="(?i)tuple size"):
pa.array([tup], type=ty)
def test_struct_from_list_of_pairs():
ty = pa.struct([
pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())
])
data = [
[('a', 5), ('b', 'foo'), ('c', True)],
[('a', 6), ('b', 'bar'), ('c', False)],
None
]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [
{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False},
None
]
# test with duplicated field names
ty = pa.struct([
pa.field('a', pa.int32()),
pa.field('a', pa.string()),
pa.field('b', pa.bool_())
])
data = [
[('a', 5), ('a', 'foo'), ('b', True)],
[('a', 6), ('a', 'bar'), ('b', False)],
]
arr = pa.array(data, type=ty)
with pytest.raises(KeyError):
# TODO(kszucs): ARROW-9997
arr.to_pylist()
# test with empty elements
ty = pa.struct([
pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())
])
data = [
[],
[('a', 5), ('b', 'foo'), ('c', True)],
[('a', 2), ('b', 'baz')],
[('a', 1), ('b', 'bar'), ('c', False), ('d', 'julia')],
]
expected = [
{'a': None, 'b': None, 'c': None},
{'a': 5, 'b': 'foo', 'c': True},
{'a': 2, 'b': 'baz', 'c': None},
{'a': 1, 'b': 'bar', 'c': False},
]
arr = pa.array(data, type=ty)
assert arr.to_pylist() == expected
def test_struct_from_list_of_pairs_errors():
ty = pa.struct([
pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())
])
# test that it raises if the key doesn't match the expected field name
data = [
[],
[('a', 5), ('c', True), ('b', None)],
]
msg = "The expected field name is `b` but `c` was given"
with pytest.raises(ValueError, match=msg):
pa.array(data, type=ty)
# test various errors both at the first position and after because of key
# type inference
template = (
r"Could not convert {} with type {}: was expecting tuple of "
r"(key, value) pair"
)
cases = [
tuple(), # empty key-value pair
tuple('a',), # missing value
tuple('unknown-key',), # not known field name
'string', # not a tuple
]
for key_value_pair in cases:
msg = re.escape(template.format(
repr(key_value_pair), type(key_value_pair).__name__
))
with pytest.raises(TypeError, match=msg):
pa.array([
[key_value_pair],
[('a', 5), ('b', 'foo'), ('c', None)],
], type=ty)
with pytest.raises(TypeError, match=msg):
pa.array([
[('a', 5), ('b', 'foo'), ('c', None)],
[key_value_pair],
], type=ty)
def test_struct_from_mixed_sequence():
# It is forbidden to mix dicts and tuples when initializing a struct array
ty = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [(5, 'foo', True),
{'a': 6, 'b': 'bar', 'c': False}]
with pytest.raises(TypeError):
pa.array(data, type=ty)
def test_struct_from_dicts_inference():
expected_type = pa.struct([pa.field('a', pa.int64()),
pa.field('b', pa.string()),
pa.field('c', pa.bool_())])
data = [{'a': 5, 'b': 'foo', 'c': True},
{'a': 6, 'b': 'bar', 'c': False}]
arr = pa.array(data)
check_struct_type(arr.type, expected_type)
assert arr.to_pylist() == data
# With omitted values
data = [{'a': 5, 'c': True},
None,
{},
{'a': None, 'b': 'bar'}]
expected = [{'a': 5, 'b': None, 'c': True},
None,
{'a': None, 'b': None, 'c': None},
{'a': None, 'b': 'bar', 'c': None}]
arr = pa.array(data)
data_as_ndarray = np.empty(len(data), dtype=object)
data_as_ndarray[:] = data
arr2 = pa.array(data)
check_struct_type(arr.type, expected_type)
assert arr.to_pylist() == expected
assert arr.equals(arr2)
# Nested
expected_type = pa.struct([
pa.field('a', pa.struct([pa.field('aa', pa.list_(pa.int64())),
pa.field('ab', pa.bool_())])),
pa.field('b', pa.string())])
data = [{'a': {'aa': [5, 6], 'ab': True}, 'b': 'foo'},
{'a': {'aa': None, 'ab': False}, 'b': None},
{'a': None, 'b': 'bar'}]
arr = pa.array(data)
assert arr.to_pylist() == data
# Edge cases
arr = pa.array([{}])
assert arr.type == pa.struct([])
assert arr.to_pylist() == [{}]
# Mixing structs and scalars is rejected
with pytest.raises((pa.ArrowInvalid, pa.ArrowTypeError)):
pa.array([1, {'a': 2}])
def test_structarray_from_arrays_coerce():
# ARROW-1706
ints = [None, 2, 3]
strs = ['a', None, 'c']
bools = [True, False, None]
ints_nonnull = [1, 2, 3]
arrays = [ints, strs, bools, ints_nonnull]
result = pa.StructArray.from_arrays(arrays,
['ints', 'strs', 'bools',
'int_nonnull'])
expected = pa.StructArray.from_arrays(
[pa.array(ints, type='int64'),
pa.array(strs, type='utf8'),
pa.array(bools),
pa.array(ints_nonnull, type='int64')],
['ints', 'strs', 'bools', 'int_nonnull'])
with pytest.raises(ValueError):
pa.StructArray.from_arrays(arrays)
assert result.equals(expected)
def test_decimal_array_with_none_and_nan():
values = [decimal.Decimal('1.234'), None, np.nan, decimal.Decimal('nan')]
with pytest.raises(TypeError):
# ARROW-6227: Without from_pandas=True, NaN is considered a float
array = pa.array(values)
array = pa.array(values, from_pandas=True)
assert array.type == pa.decimal128(4, 3)
assert array.to_pylist() == values[:2] + [None, None]
array = pa.array(values, type=pa.decimal128(10, 4), from_pandas=True)
assert array.to_pylist() == [decimal.Decimal('1.2340'), None, None, None]
def test_map_from_dicts():
data = [[{'key': b'a', 'value': 1}, {'key': b'b', 'value': 2}],
[{'key': b'c', 'value': 3}],
[{'key': b'd', 'value': 4}, {'key': b'e', 'value': 5},
{'key': b'f', 'value': None}],
[{'key': b'g', 'value': 7}]]
expected = [[(d['key'], d['value']) for d in entry] for entry in data]
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# With omitted values
data[1] = None
expected[1] = None
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# Invalid dictionary
for entry in [[{'value': 5}], [{}], [{'k': 1, 'v': 2}]]:
with pytest.raises(ValueError, match="Invalid Map"):
pa.array([entry], type=pa.map_('i4', 'i4'))
# Invalid dictionary types
for entry in [[{'key': '1', 'value': 5}], [{'key': {'value': 2}}]]:
with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"):
pa.array([entry], type=pa.map_('i4', 'i4'))
def test_map_from_tuples():
expected = [[(b'a', 1), (b'b', 2)],
[(b'c', 3)],
[(b'd', 4), (b'e', 5), (b'f', None)],
[(b'g', 7)]]
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# With omitted values
expected[1] = None
arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32()))
assert arr.to_pylist() == expected
# Invalid tuple size
for entry in [[(5,)], [()], [('5', 'foo', True)]]:
with pytest.raises(ValueError, match="(?i)tuple size"):
pa.array([entry], type=pa.map_('i4', 'i4'))
def test_dictionary_from_boolean():
typ = pa.dictionary(pa.int8(), value_type=pa.bool_())
a = pa.array([False, False, True, False, True], type=typ)
assert isinstance(a.type, pa.DictionaryType)
assert a.type.equals(typ)
expected_indices = pa.array([0, 0, 1, 0, 1], type=pa.int8())
expected_dictionary = pa.array([False, True], type=pa.bool_())
assert a.indices.equals(expected_indices)
assert a.dictionary.equals(expected_dictionary)
@pytest.mark.parametrize('value_type', [
pa.int8(),
pa.int16(),
pa.int32(),
pa.int64(),
pa.uint8(),
pa.uint16(),
pa.uint32(),
pa.uint64(),
pa.float32(),
pa.float64(),
])
def test_dictionary_from_integers(value_type):
typ = pa.dictionary(pa.int8(), value_type=value_type)
a = pa.array([1, 2, 1, 1, 2, 3], type=typ)
assert isinstance(a.type, pa.DictionaryType)
assert a.type.equals(typ)
expected_indices = pa.array([0, 1, 0, 0, 1, 2], type=pa.int8())
expected_dictionary = pa.array([1, 2, 3], type=value_type)
assert a.indices.equals(expected_indices)
assert a.dictionary.equals(expected_dictionary)
@pytest.mark.parametrize('input_index_type', [
pa.int8(),
pa.int16(),
pa.int32(),
pa.int64()
])
def test_dictionary_index_type(input_index_type):
# dictionary array is constructed using adaptive index type builder,
# but the input index type is considered as the minimal width type to use
typ = pa.dictionary(input_index_type, value_type=pa.int64())
arr = pa.array(range(10), type=typ)
assert arr.type.equals(typ)
def test_dictionary_is_always_adaptive():
# dictionary array is constructed using adaptive index type builder,
# meaning that the output index type may be wider than the given index type
# since it depends on the input data
typ = pa.dictionary(pa.int8(), value_type=pa.int64())
a = pa.array(range(2**7), type=typ)
expected = pa.dictionary(pa.int8(), pa.int64())
assert a.type.equals(expected)
a = pa.array(range(2**7 + 1), type=typ)
expected = pa.dictionary(pa.int16(), pa.int64())
assert a.type.equals(expected)
def test_dictionary_from_strings():
for value_type in [pa.binary(), pa.string()]:
typ = pa.dictionary(pa.int8(), value_type)
a = pa.array(["", "a", "bb", "a", "bb", "ccc"], type=typ)
assert isinstance(a.type, pa.DictionaryType)
expected_indices = pa.array([0, 1, 2, 1, 2, 3], type=pa.int8())
expected_dictionary = pa.array(["", "a", "bb", "ccc"], type=value_type)
assert a.indices.equals(expected_indices)
assert a.dictionary.equals(expected_dictionary)
# fixed size binary type
typ = pa.dictionary(pa.int8(), pa.binary(3))
a = pa.array(["aaa", "aaa", "bbb", "ccc", "bbb"], type=typ)
assert isinstance(a.type, pa.DictionaryType)
expected_indices = pa.array([0, 0, 1, 2, 1], type=pa.int8())
expected_dictionary = pa.array(["aaa", "bbb", "ccc"], type=pa.binary(3))
assert a.indices.equals(expected_indices)
assert a.dictionary.equals(expected_dictionary)
@pytest.mark.parametrize(('unit', 'expected'), [
('s', datetime.timedelta(seconds=-2147483000)),
('ms', datetime.timedelta(milliseconds=-2147483000)),
('us', datetime.timedelta(microseconds=-2147483000)),
('ns', datetime.timedelta(microseconds=-2147483))
])
def test_duration_array_roundtrip_corner_cases(unit, expected):
# Corner case discovered by hypothesis: there were implicit conversions to
# unsigned values resulting wrong values with wrong signs.
ty = pa.duration(unit)
arr = pa.array([-2147483000], type=ty)
restored = pa.array(arr.to_pylist(), type=ty)
assert arr.equals(restored)
expected_list = [expected]
if unit == 'ns':
# if pandas is available then a pandas Timedelta is returned
try:
import pandas as pd
except ImportError:
pass
else:
expected_list = [pd.Timedelta(-2147483000, unit='ns')]
assert restored.to_pylist() == expected_list
@pytest.mark.pandas
def test_roundtrip_nanosecond_resolution_pandas_temporal_objects():
# corner case discovered by hypothesis: preserving the nanoseconds on
# conversion from a list of Timedelta and Timestamp objects
import pandas as pd
ty = pa.duration('ns')
arr = pa.array([9223371273709551616], type=ty)
data = arr.to_pylist()
assert isinstance(data[0], pd.Timedelta)
restored = pa.array(data, type=ty)
assert arr.equals(restored)
assert restored.to_pylist() == [
pd.Timedelta(9223371273709551616, unit='ns')
]
ty = pa.timestamp('ns')
arr = pa.array([9223371273709551616], type=ty)
data = arr.to_pylist()
assert isinstance(data[0], pd.Timestamp)
restored = pa.array(data, type=ty)
assert arr.equals(restored)
assert restored.to_pylist() == [
pd.Timestamp(9223371273709551616, unit='ns')
]
ty = pa.timestamp('ns', tz='US/Eastern')
value = 1604119893000000000
arr = pa.array([value], type=ty)
data = arr.to_pylist()
assert isinstance(data[0], pd.Timestamp)
restored = pa.array(data, type=ty)
assert arr.equals(restored)
assert restored.to_pylist() == [
pd.Timestamp(value, unit='ns').tz_localize(
"UTC").tz_convert('US/Eastern')
]
@h.given(past.all_arrays)
def test_array_to_pylist_roundtrip(arr):
seq = arr.to_pylist()
restored = pa.array(seq, type=arr.type)
assert restored.equals(arr)
@pytest.mark.large_memory
def test_auto_chunking_binary_like():
# single chunk
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# single chunk
one_chunk_data = [v1] * 20 + [b'', None, v2]
arr = pa.array(one_chunk_data, type=pa.binary())
assert isinstance(arr, pa.Array)
assert len(arr) == 23
assert arr[20].as_py() == b''
assert arr[21].as_py() is None
assert arr[22].as_py() == v2
# two chunks
two_chunk_data = one_chunk_data + [b'two']
arr = pa.array(two_chunk_data, type=pa.binary())
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
assert len(arr.chunk(0)) == 23
assert len(arr.chunk(1)) == 1
assert arr.chunk(0)[20].as_py() == b''
assert arr.chunk(0)[21].as_py() is None
assert arr.chunk(0)[22].as_py() == v2
assert arr.chunk(1).to_pylist() == [b'two']
# three chunks
three_chunk_data = one_chunk_data * 2 + [b'three', b'three']
arr = pa.array(three_chunk_data, type=pa.binary())
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 3
assert len(arr.chunk(0)) == 23
assert len(arr.chunk(1)) == 23
assert len(arr.chunk(2)) == 2
for i in range(2):
assert arr.chunk(i)[20].as_py() == b''
assert arr.chunk(i)[21].as_py() is None
assert arr.chunk(i)[22].as_py() == v2
assert arr.chunk(2).to_pylist() == [b'three', b'three']
@pytest.mark.large_memory
def test_auto_chunking_list_of_binary():
# ARROW-6281
vals = [['x' * 1024]] * ((2 << 20) + 1)
arr = pa.array(vals)
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
assert len(arr.chunk(0)) == 2**21 - 1
assert len(arr.chunk(1)) == 2
assert arr.chunk(1).to_pylist() == [['x' * 1024]] * 2
@pytest.mark.large_memory
def test_auto_chunking_list_like():
item = np.ones((2**28,), dtype='uint8')
data = [item] * (2**3 - 1)
arr = pa.array(data, type=pa.list_(pa.uint8()))
assert isinstance(arr, pa.Array)
assert len(arr) == 7
item = np.ones((2**28,), dtype='uint8')
data = [item] * 2**3
arr = pa.array(data, type=pa.list_(pa.uint8()))
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
assert len(arr.chunk(0)) == 7
assert len(arr.chunk(1)) == 1
chunk = arr.chunk(1)
scalar = chunk[0]
assert isinstance(scalar, pa.ListScalar)
expected = pa.array(item, type=pa.uint8())
assert scalar.values == expected
@pytest.mark.slow
@pytest.mark.large_memory
def test_auto_chunking_map_type():
# takes ~20 minutes locally
ty = pa.map_(pa.int8(), pa.int8())
item = [(1, 1)] * 2**28
data = [item] * 2**3
arr = pa.array(data, type=ty)
assert isinstance(arr, pa.ChunkedArray)
assert len(arr.chunk(0)) == 7
assert len(arr.chunk(1)) == 1
@pytest.mark.large_memory
@pytest.mark.parametrize(('ty', 'char'), [
(pa.string(), 'x'),
(pa.binary(), b'x'),
])
def test_nested_auto_chunking(ty, char):
v1 = char * 100000000
v2 = char * 147483646
struct_type = pa.struct([
pa.field('bool', pa.bool_()),
pa.field('integer', pa.int64()),
pa.field('string-like', ty),
])
data = [{'bool': True, 'integer': 1, 'string-like': v1}] * 20
data.append({'bool': True, 'integer': 1, 'string-like': v2})
arr = pa.array(data, type=struct_type)
assert isinstance(arr, pa.Array)
data.append({'bool': True, 'integer': 1, 'string-like': char})
arr = pa.array(data, type=struct_type)
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
assert len(arr.chunk(0)) == 21
assert len(arr.chunk(1)) == 1
assert arr.chunk(1)[0].as_py() == {
'bool': True,
'integer': 1,
'string-like': char
}
@pytest.mark.large_memory
def test_array_from_pylist_data_overflow():
# Regression test for ARROW-12983
# Data buffer overflow - should result in chunked array
items = [b'a' * 4096] * (2 ** 19)
arr = pa.array(items, type=pa.string())
assert isinstance(arr, pa.ChunkedArray)
assert len(arr) == 2**19
assert len(arr.chunks) > 1
mask = np.zeros(2**19, bool)
arr = pa.array(items, mask=mask, type=pa.string())
assert isinstance(arr, pa.ChunkedArray)
assert len(arr) == 2**19
assert len(arr.chunks) > 1
arr = pa.array(items, type=pa.binary())
assert isinstance(arr, pa.ChunkedArray)
assert len(arr) == 2**19
assert len(arr.chunks) > 1
@pytest.mark.slow
@pytest.mark.large_memory
def test_array_from_pylist_offset_overflow():
# Regression test for ARROW-12983
# Offset buffer overflow - should result in chunked array
# Note this doesn't apply to primitive arrays
items = [b'a'] * (2 ** 31)
arr = pa.array(items, type=pa.string())
assert isinstance(arr, pa.ChunkedArray)
assert len(arr) == 2**31
assert len(arr.chunks) > 1
mask = np.zeros(2**31, bool)
arr = pa.array(items, mask=mask, type=pa.string())
assert isinstance(arr, pa.ChunkedArray)
assert len(arr) == 2**31
assert len(arr.chunks) > 1
arr = pa.array(items, type=pa.binary())
assert isinstance(arr, pa.ChunkedArray)
assert len(arr) == 2**31
assert len(arr.chunks) > 1
|
the-stack_0_12508 | #!/usr/bin/env python
"""
.. Workspace sub-command
.. codeauthor:: Rich Plevin <[email protected]>
.. Copyright (c) 2016 Richard Plevin
See the https://opensource.org/licenses/MIT for license details.
"""
from __future__ import print_function
from ..subcommand import SubcommandABC, clean_help
def driver(args, tool):
# lazy imports to avoid loading anything that's not used by gcamtool
import os
import subprocess
from ..config import getParam, pathjoin
from ..error import CommandlineError
from ..scenarioSetup import createSandbox
from ..log import getLogger
from ..utils import removeTreeSafely
_logger = getLogger(__name__)
project = args.projectName or getParam('GCAM.DefaultProject')
if not project:
raise CommandlineError("sandbox: must specify project name or set config parameter GCAM.DefaultProject")
if not (args.scenario or args.groupDir):
raise CommandlineError("sandbox: must specify scenario and/or group name")
sandboxProjectDir = getParam('GCAM.SandboxProjectDir')
sandbox = pathjoin(sandboxProjectDir, args.groupDir, args.scenario)
# handle ~ in pathname
sandbox = pathjoin(sandbox, expanduser=True, abspath=True, normpath=True)
if args.path:
print(sandbox)
execute = not args.noExecute
if args.recreate:
args.delete = args.create = True
if args.delete:
_logger.info('Removing ' + sandbox)
try:
if execute:
if os.path.islink(sandbox):
os.remove(sandbox)
else:
removeTreeSafely(sandbox)
else:
print("Would remove", sandbox)
except Exception as e:
_logger.warn("Can't remove '%s': %s" % (sandbox, e))
if args.create:
if execute:
_logger.info('Creating ' + sandbox)
createSandbox(sandbox)
else:
print("Would create", sandbox)
if args.run:
cmdStr = 'cd ' + sandbox + '; ' + args.run
if execute:
_logger.info(cmdStr)
os.chdir(sandbox)
subprocess.call(args.run, shell=True)
else:
print("Would run:", cmdStr)
class SandboxCommand(SubcommandABC):
def __init__(self, subparsers):
kwargs = {'help' : '''Perform operations on a sandbox.'''}
super(SandboxCommand, self).__init__('sandbox', subparsers, kwargs, group='utils')
def addArgs(self, parser):
parser.add_argument('--create', action='store_true',
help=clean_help('''Create the identified sandbox. If used with --delete,
the deletion occurs first.'''))
parser.add_argument('--delete', action='store_true',
help=clean_help('''Delete the identified sandbox' If used with --create, the
deletion occurs first.'''))
parser.add_argument('--recreate', action='store_true',
help=clean_help('''Recreate the identified sandbox. Equivalent to using the
--delete and --create options together.'''))
parser.add_argument('-g', '--groupDir', default='', metavar='NAME',
help=clean_help('''The name of the scenario group subdir'''))
parser.add_argument('-n', '--noExecute', action='store_true',
help=clean_help('''Print the command that would be executed by --run, but
don't execute it.'''))
parser.add_argument('-p', '--path', action='store_true',
help=clean_help('''Print the absolute path to the identified sandbox.'''))
parser.add_argument('-r', '--run', metavar='CMD',
help=clean_help('''Run the given command in the identified sandbox.'''))
parser.add_argument('-s', '--scenario', default='',
help=clean_help('''The scenario for the computed sandbox root.'''))
return parser # for auto-doc generation
def run(self, args, tool):
driver(args, tool)
|
the-stack_0_12509 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
from __future__ import unicode_literals
from optparse import OptionParser
import os
import re
import sys
from xml.etree import ElementTree
# Make sure we're using Babel source, and not some previously installed version
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..'))
from babel import dates, numbers
from babel.compat import pickle, text_type
from babel.plural import PluralRule
from babel.localedata import Alias
parse = ElementTree.parse
weekdays = {'mon': 0, 'tue': 1, 'wed': 2, 'thu': 3, 'fri': 4, 'sat': 5,
'sun': 6}
def _text(elem):
buf = [elem.text or '']
for child in elem:
buf.append(_text(child))
buf.append(elem.tail or '')
return ''.join([_f for _f in buf if _f]).strip()
NAME_RE = re.compile(r"^\w+$")
TYPE_ATTR_RE = re.compile(r"^\w+\[@type='(.*?)'\]$")
NAME_MAP = {
'dateFormats': 'date_formats',
'dateTimeFormats': 'datetime_formats',
'eraAbbr': 'abbreviated',
'eraNames': 'wide',
'eraNarrow': 'narrow',
'timeFormats': 'time_formats'
}
def _translate_alias(ctxt, path):
parts = path.split('/')
keys = ctxt[:]
for part in parts:
if part == '..':
keys.pop()
else:
match = TYPE_ATTR_RE.match(part)
if match:
keys.append(match.group(1))
else:
assert NAME_RE.match(part)
keys.append(NAME_MAP.get(part, part))
return keys
def main():
parser = OptionParser(usage='%prog path/to/cldr')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
srcdir = args[0]
destdir = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),
'..', 'babel')
sup = parse(os.path.join(srcdir, 'supplemental', 'supplementalData.xml'))
# Import global data from the supplemental files
global_data = {}
territory_zones = global_data.setdefault('territory_zones', {})
zone_aliases = global_data.setdefault('zone_aliases', {})
zone_territories = global_data.setdefault('zone_territories', {})
for elem in sup.findall('.//timezoneData/zoneFormatting/zoneItem'):
tzid = elem.attrib['type']
territory_zones.setdefault(elem.attrib['territory'], []).append(tzid)
zone_territories[tzid] = elem.attrib['territory']
if 'aliases' in elem.attrib:
for alias in elem.attrib['aliases'].split():
zone_aliases[alias] = tzid
# Import Metazone mapping
meta_zones = global_data.setdefault('meta_zones', {})
tzsup = parse(os.path.join(srcdir, 'supplemental', 'metazoneInfo.xml'))
for elem in tzsup.findall('.//timezone'):
for child in elem.findall('usesMetazone'):
if 'to' not in child.attrib: # FIXME: support old mappings
meta_zones[elem.attrib['type']] = child.attrib['mzone']
outfile = open(os.path.join(destdir, 'global.dat'), 'wb')
try:
pickle.dump(global_data, outfile, 2)
finally:
outfile.close()
# build a territory containment mapping for inheritance
regions = {}
for elem in sup.findall('.//territoryContainment/group'):
regions[elem.attrib['type']] = elem.attrib['contains'].split()
# Resolve territory containment
territory_containment = {}
region_items = sorted(regions.items())
for group, territory_list in region_items:
for territory in territory_list:
containers = territory_containment.setdefault(territory, set([]))
if group in territory_containment:
containers |= territory_containment[group]
containers.add(group)
# prepare the per-locale plural rules definitions
plural_rules = {}
prsup = parse(os.path.join(srcdir, 'supplemental', 'plurals.xml'))
for elem in prsup.findall('.//plurals/pluralRules'):
rules = []
for rule in elem.findall('pluralRule'):
rules.append((rule.attrib['count'], text_type(rule.text)))
pr = PluralRule(rules)
for locale in elem.attrib['locales'].split():
plural_rules[locale] = pr
filenames = os.listdir(os.path.join(srcdir, 'main'))
filenames.remove('root.xml')
filenames.sort(key=lambda a: len(a))
filenames.insert(0, 'root.xml')
for filename in filenames:
stem, ext = os.path.splitext(filename)
if ext != '.xml':
continue
sys.stderr.write('Processing input file %r\n' % filename)
tree = parse(os.path.join(srcdir, 'main', filename))
data = {}
language = None
elem = tree.find('.//identity/language')
if elem is not None:
language = elem.attrib['type']
sys.stderr.write(' Language: %r\n' % language)
territory = None
elem = tree.find('.//identity/territory')
if elem is not None:
territory = elem.attrib['type']
else:
territory = '001' # world
sys.stderr.write(' Territory: %r\n' % territory)
regions = territory_containment.get(territory, [])
sys.stderr.write(' Regions: %r\n' % regions)
# plural rules
locale_id = '_'.join([_f for _f in [
language,
territory != '001' and territory or None
] if _f])
if locale_id in plural_rules:
data['plural_form'] = plural_rules[locale_id]
# <localeDisplayNames>
territories = data.setdefault('territories', {})
for elem in tree.findall('.//territories/territory'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in territories:
continue
territories[elem.attrib['type']] = _text(elem)
languages = data.setdefault('languages', {})
for elem in tree.findall('.//languages/language'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in languages:
continue
languages[elem.attrib['type']] = _text(elem)
variants = data.setdefault('variants', {})
for elem in tree.findall('.//variants/variant'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in variants:
continue
variants[elem.attrib['type']] = _text(elem)
scripts = data.setdefault('scripts', {})
for elem in tree.findall('.//scripts/script'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in scripts:
continue
scripts[elem.attrib['type']] = _text(elem)
# <dates>
week_data = data.setdefault('week_data', {})
supelem = sup.find('.//weekData')
for elem in supelem.findall('minDays'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['min_days'] = int(elem.attrib['count'])
for elem in supelem.findall('firstDay'):
if 'alt' not in elem.attrib: # ignore alternatives
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['first_day'] = weekdays[elem.attrib['day']]
for elem in supelem.findall('weekendStart'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['weekend_start'] = weekdays[elem.attrib['day']]
for elem in supelem.findall('weekendEnd'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['weekend_end'] = weekdays[elem.attrib['day']]
zone_formats = data.setdefault('zone_formats', {})
for elem in tree.findall('.//timeZoneNames/gmtFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['gmt'] = text_type(elem.text).replace('{0}', '%s')
break
for elem in tree.findall('.//timeZoneNames/regionFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['region'] = text_type(elem.text).replace('{0}', '%s')
break
for elem in tree.findall('.//timeZoneNames/fallbackFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['fallback'] = text_type(elem.text) \
.replace('{0}', '%(0)s').replace('{1}', '%(1)s')
break
time_zones = data.setdefault('time_zones', {})
for elem in tree.findall('.//timeZoneNames/zone'):
info = {}
city = elem.findtext('exemplarCity')
if city:
info['city'] = text_type(city)
for child in elem.findall('long/*'):
info.setdefault('long', {})[child.tag] = text_type(child.text)
for child in elem.findall('short/*'):
info.setdefault('short', {})[child.tag] = text_type(child.text)
time_zones[elem.attrib['type']] = info
meta_zones = data.setdefault('meta_zones', {})
for elem in tree.findall('.//timeZoneNames/metazone'):
info = {}
city = elem.findtext('exemplarCity')
if city:
info['city'] = text_type(city)
for child in elem.findall('long/*'):
info.setdefault('long', {})[child.tag] = text_type(child.text)
for child in elem.findall('short/*'):
info.setdefault('short', {})[child.tag] = text_type(child.text)
info['common'] = elem.findtext('commonlyUsed') == 'true'
meta_zones[elem.attrib['type']] = info
for calendar in tree.findall('.//calendars/calendar'):
if calendar.attrib['type'] != 'gregorian':
# TODO: support other calendar types
continue
months = data.setdefault('months', {})
for ctxt in calendar.findall('months/monthContext'):
ctxt_type = ctxt.attrib['type']
ctxts = months.setdefault(ctxt_type, {})
for width in ctxt.findall('monthWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'month':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib.get('type'))] = text_type(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['months', ctxt_type, width_type],
elem.attrib['path'])
)
days = data.setdefault('days', {})
for ctxt in calendar.findall('days/dayContext'):
ctxt_type = ctxt.attrib['type']
ctxts = days.setdefault(ctxt_type, {})
for width in ctxt.findall('dayWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'day':
dtype = weekdays[elem.attrib['type']]
if ('draft' in elem.attrib or 'alt' not in elem.attrib) \
and dtype in widths:
continue
widths[dtype] = text_type(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['days', ctxt_type, width_type],
elem.attrib['path'])
)
quarters = data.setdefault('quarters', {})
for ctxt in calendar.findall('quarters/quarterContext'):
ctxt_type = ctxt.attrib['type']
ctxts = quarters.setdefault(ctxt.attrib['type'], {})
for width in ctxt.findall('quarterWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'quarter':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib['type'])] = text_type(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['quarters', ctxt_type, width_type],
elem.attrib['path'])
)
eras = data.setdefault('eras', {})
for width in calendar.findall('eras/*'):
width_type = NAME_MAP[width.tag]
widths = eras.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'era':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib.get('type'))] = text_type(elem.text)
elif elem.tag == 'alias':
eras[width_type] = Alias(
_translate_alias(['eras', width_type],
elem.attrib['path'])
)
# AM/PM
periods = data.setdefault('periods', {})
for elem in calendar.findall('am'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.tag in periods:
continue
periods[elem.tag] = text_type(elem.text)
for elem in calendar.findall('pm'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.tag in periods:
continue
periods[elem.tag] = text_type(elem.text)
date_formats = data.setdefault('date_formats', {})
for format in calendar.findall('dateFormats'):
for elem in format.getiterator():
if elem.tag == 'dateFormatLength':
if 'draft' in elem.attrib and \
elem.attrib.get('type') in date_formats:
continue
try:
date_formats[elem.attrib.get('type')] = \
dates.parse_pattern(text_type(elem.findtext('dateFormat/pattern')))
except ValueError:
sys.stderr.write('ERROR: %s\n' % sys.exc_info()[1])
elif elem.tag == 'alias':
date_formats = Alias(_translate_alias(
['date_formats'], elem.attrib['path'])
)
time_formats = data.setdefault('time_formats', {})
for format in calendar.findall('timeFormats'):
for elem in format.getiterator():
if elem.tag == 'timeFormatLength':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in time_formats:
continue
try:
time_formats[elem.attrib.get('type')] = \
dates.parse_pattern(text_type(elem.findtext('timeFormat/pattern')))
except ValueError:
sys.stderr.write('ERROR: %s\n' % sys.exc_info()[1])
elif elem.tag == 'alias':
time_formats = Alias(_translate_alias(
['time_formats'], elem.attrib['path'])
)
datetime_formats = data.setdefault('datetime_formats', {})
for format in calendar.findall('dateTimeFormats'):
for elem in format.getiterator():
if elem.tag == 'dateTimeFormatLength':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in datetime_formats:
continue
try:
datetime_formats[elem.attrib.get('type')] = \
text_type(elem.findtext('dateTimeFormat/pattern'))
except ValueError:
sys.stderr.write('ERROR: %s\n' % sys.exc_info()[1])
elif elem.tag == 'alias':
datetime_formats = Alias(_translate_alias(
['datetime_formats'], elem.attrib['path'])
)
# <numbers>
number_symbols = data.setdefault('number_symbols', {})
for elem in tree.findall('.//numbers/symbols/*'):
if ('draft' in elem.attrib or 'alt' in elem.attrib):
continue
number_symbols[elem.tag] = text_type(elem.text)
decimal_formats = data.setdefault('decimal_formats', {})
for elem in tree.findall('.//decimalFormats/decimalFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in decimal_formats:
continue
pattern = text_type(elem.findtext('decimalFormat/pattern'))
decimal_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
scientific_formats = data.setdefault('scientific_formats', {})
for elem in tree.findall('.//scientificFormats/scientificFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in scientific_formats:
continue
pattern = text_type(elem.findtext('scientificFormat/pattern'))
scientific_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
currency_formats = data.setdefault('currency_formats', {})
for elem in tree.findall('.//currencyFormats/currencyFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in currency_formats:
continue
pattern = text_type(elem.findtext('currencyFormat/pattern'))
currency_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
percent_formats = data.setdefault('percent_formats', {})
for elem in tree.findall('.//percentFormats/percentFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in percent_formats:
continue
pattern = text_type(elem.findtext('percentFormat/pattern'))
percent_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
currency_names = data.setdefault('currency_names', {})
currency_symbols = data.setdefault('currency_symbols', {})
for elem in tree.findall('.//currencies/currency'):
code = elem.attrib['type']
# TODO: support plural rules for currency name selection
for name in elem.findall('displayName'):
if ('draft' in name.attrib or 'count' in name.attrib) \
and code in currency_names:
continue
currency_names[code] = text_type(name.text)
# TODO: support choice patterns for currency symbol selection
symbol = elem.find('symbol')
if symbol is not None and 'draft' not in symbol.attrib \
and 'choice' not in symbol.attrib:
currency_symbols[code] = text_type(symbol.text)
# <units>
unit_patterns = data.setdefault('unit_patterns', {})
for elem in tree.findall('.//units/unit'):
unit_type = elem.attrib['type']
unit_pattern = unit_patterns.setdefault(unit_type, {})
for pattern in elem.findall('unitPattern'):
unit_patterns[unit_type][pattern.attrib['count']] = \
text_type(pattern.text)
outfile = open(os.path.join(destdir, 'localedata', stem + '.dat'), 'wb')
try:
pickle.dump(data, outfile, 2)
finally:
outfile.close()
if __name__ == '__main__':
main()
|
the-stack_0_12510 |
#!/usr/bin/env python3
"""Script for Tkinter GUI chat client."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import tkinter
# crypto imports
# import nacl, nacl.secret, nacl.utils
# from nacl.public import PrivateKey, SealedBox
def receive():
"""Handles receiving of messages."""
while True:
try:
msg = client_socket.recv(BUFSIZ).decode("utf8")
msg_list.insert(tkinter.END, msg)
except OSError: # Possibly client has left the chat.
break
def send(event=None): # event is passed by binders.
"""Handles sending of messages."""
msg = my_msg.get()
my_msg.set("") # Clears input field.
client_socket.send(bytes(msg, "utf8"))
if msg == "{quit}":
client_socket.close()
top.quit()
def on_closing(event=None):
"""This function is to be called when the window is closed."""
my_msg.set("{quit}")
send()
top = tkinter.Tk()
top.title("Encrypted chat")
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar() # For the messages to be sent.
my_msg.set("")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tkinter.Entry(top, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
top.protocol("WM_DELETE_WINDOW", on_closing)
#----Now comes the sockets part----
HOST = input('Enter host: ')
PORT = input('Enter port: ')
if not PORT:
PORT = 33000
else:
PORT = int(PORT)
BUFSIZ = 1024
ADDR = (HOST, PORT)
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(ADDR)
receive_thread = Thread(target=receive)
receive_thread.start()
tkinter.mainloop() # Starts GUI execution.
|
the-stack_0_12511 | """Regresssion tests for urllib"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from unittest.mock import patch
from test import support
import os
import sys
import tempfile
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
with support.check_warnings(
('FancyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
opener = urllib.request.FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
class FakeHTTPMixin(object):
def fakehttp(self, fakedata):
class FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
if self.io_refs == 0:
io.BytesIO.close(self)
class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = None
def connect(self):
self.sock = FakeSocket(fakedata)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = FakeHTTPConnection
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
class FakeFTPMixin(object):
def fakeftp(self):
class FakeFtpWrapper(object):
def __init__(self, user, passwd, host, port, dirs, timeout=None,
persistent=True):
pass
def retrfile(self, file, type):
return io.BytesIO(), 0
def close(self):
pass
self._ftpwrapper_class = urllib.request.ftpwrapper
urllib.request.ftpwrapper = FakeFtpWrapper
def unfakeftp(self):
urllib.request.ftpwrapper = self._ftpwrapper_class
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison.
# Use the iterator in the usual implicit way to test for ticket #4608.
for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com')
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
resp = urlopen("http://www.python.org")
self.assertTrue(resp.fp.will_close)
finally:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but not "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(OSError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises OSError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(OSError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
# Test for #10836
with self.assertRaises(urllib.error.URLError) as e:
urlopen('file://localhost/a/file/which/doesnot/exists.py')
self.assertTrue(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
with urlopen(tmp_fileurl) as fobj:
self.assertTrue(fobj)
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
with self.assertRaises(urllib.error.URLError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
with self.assertRaises(urllib.error.URLError) as e:
urlopen(test_ftp_url)
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_ftp_nonexisting(self):
with self.assertRaises(urllib.error.URLError) as e:
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
@patch.object(urllib.request, 'MAXFTPCACHE', 0)
def test_ftp_cache_pruning(self):
self.fakeftp()
try:
urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, [])
urlopen('ftp://localhost')
finally:
self.unfakeftp()
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://user:[email protected]/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:[email protected]/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_URLopener_deprecation(self):
with support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
class urlopen_DataTests(unittest.TestCase):
"""Test urlopen() opening a data URL."""
def setUp(self):
# text containing URL special- and unicode-characters
self.text = "test data URLs :;,%=& \u00f6 \u00c4 "
# 2x1 pixel RGB PNG image with one black and one white pixel
self.image = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00'
b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae'
b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00'
b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82')
self.text_url = (
"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3"
"D%26%20%C3%B6%20%C3%84%20")
self.text_url_base64 = (
"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs"
"sJT0mIPYgxCA%3D")
# base64 encoded data URL that contains ignorable spaces,
# such as "\n", " ", "%0A", and "%20".
self.image_url = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n"
"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 "
"vHgAAAABJRU5ErkJggg%3D%3D%0A%20")
self.text_url_resp = urllib.request.urlopen(self.text_url)
self.text_url_base64_resp = urllib.request.urlopen(
self.text_url_base64)
self.image_url_resp = urllib.request.urlopen(self.image_url)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.text_url_resp, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_info(self):
self.assertIsInstance(self.text_url_resp.info(), email.message.Message)
self.assertEqual(self.text_url_base64_resp.info().get_params(),
[('text/plain', ''), ('charset', 'ISO-8859-1')])
self.assertEqual(self.image_url_resp.info()['content-length'],
str(len(self.image)))
self.assertEqual(urllib.request.urlopen("data:,").info().get_params(),
[('text/plain', ''), ('charset', 'US-ASCII')])
def test_geturl(self):
self.assertEqual(self.text_url_resp.geturl(), self.text_url)
self.assertEqual(self.text_url_base64_resp.geturl(),
self.text_url_base64)
self.assertEqual(self.image_url_resp.geturl(), self.image_url)
def test_read_text(self):
self.assertEqual(self.text_url_resp.read().decode(
dict(self.text_url_resp.info().get_params())['charset']), self.text)
def test_read_text_base64(self):
self.assertEqual(self.text_url_base64_resp.read().decode(
dict(self.text_url_base64_resp.info().get_params())['charset']),
self.text)
def test_read_image(self):
self.assertEqual(self.image_url_resp.read(), self.image)
def test_missing_comma(self):
self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')
def test_invalid_base64_data(self):
# missing padding character
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
try:
filePath.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filePath is not encodable to utf8")
return "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did not get a email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/',
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/')
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
"""Some of password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
self.assertEqual(('user', 'ab'),urllib.parse.splitpasswd('user:ab'))
self.assertEqual(('user', 'a\nb'),urllib.parse.splitpasswd('user:a\nb'))
self.assertEqual(('user', 'a\tb'),urllib.parse.splitpasswd('user:a\tb'))
self.assertEqual(('user', 'a\rb'),urllib.parse.splitpasswd('user:a\rb'))
self.assertEqual(('user', 'a\fb'),urllib.parse.splitpasswd('user:a\fb'))
self.assertEqual(('user', 'a\vb'),urllib.parse.splitpasswd('user:a\vb'))
self.assertEqual(('user', 'a:b'),urllib.parse.splitpasswd('user:a:b'))
self.assertEqual(('user', 'a b'),urllib.parse.splitpasswd('user:a b'))
self.assertEqual(('user 2', 'ab'),urllib.parse.splitpasswd('user 2:ab'))
self.assertEqual(('user+1', 'a+b'),urllib.parse.splitpasswd('user+1:a+b'))
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
with support.check_warnings(
('DummyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
class RequestTests(unittest.TestCase):
"""Unit tests for urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
class URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string in \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
for path in list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
class PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
for path in list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_12513 | from gtts import *
from playsound import playsound
import random
import os
r1 = random.randint(1,10000000)
r2 = random.randint(1,10000000)
f=open("C:\\Python\\myfile.txt","w")
f.write(input())
f.close()
f=open("C:\\Python\\myfile.txt","r")
x=f.read()
language='en'
audio=gTTS(text=x,lang=language,slow=False)
filename = str(r2)+"randomtext"+str(r1) +".mp3"
audio.save(filename)
os.system(filename)
playsound(filename)
os.remove(filename) |
the-stack_0_12514 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ExpressRouteCircuit(Resource):
"""ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param sku: The SKU.
:type sku: ~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitSku
:param allow_classic_operations: Allow classic operations
:type allow_classic_operations: bool
:param circuit_provisioning_state: The CircuitProvisioningState state of
the resource.
:type circuit_provisioning_state: str
:param service_provider_provisioning_state: The
ServiceProviderProvisioningState state of the resource. Possible values
are 'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'.
Possible values include: 'NotProvisioned', 'Provisioning', 'Provisioned',
'Deprovisioning'
:type service_provider_provisioning_state: str or
~azure.mgmt.network.v2016_12_01.models.ServiceProviderProvisioningState
:param authorizations: The list of authorizations.
:type authorizations:
list[~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitAuthorization]
:param peerings: The list of peerings.
:type peerings:
list[~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitPeering]
:param service_key: The ServiceKey.
:type service_key: str
:param service_provider_notes: The ServiceProviderNotes.
:type service_provider_notes: str
:param service_provider_properties: The ServiceProviderProperties.
:type service_provider_properties:
~azure.mgmt.network.v2016_12_01.models.ExpressRouteCircuitServiceProviderProperties
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'ExpressRouteCircuitSku'},
'allow_classic_operations': {'key': 'properties.allowClassicOperations', 'type': 'bool'},
'circuit_provisioning_state': {'key': 'properties.circuitProvisioningState', 'type': 'str'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'authorizations': {'key': 'properties.authorizations', 'type': '[ExpressRouteCircuitAuthorization]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'service_key': {'key': 'properties.serviceKey', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'service_provider_properties': {'key': 'properties.serviceProviderProperties', 'type': 'ExpressRouteCircuitServiceProviderProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, sku=None, allow_classic_operations: bool=None, circuit_provisioning_state: str=None, service_provider_provisioning_state=None, authorizations=None, peerings=None, service_key: str=None, service_provider_notes: str=None, service_provider_properties=None, provisioning_state: str=None, gateway_manager_etag: str=None, **kwargs) -> None:
super(ExpressRouteCircuit, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.sku = sku
self.allow_classic_operations = allow_classic_operations
self.circuit_provisioning_state = circuit_provisioning_state
self.service_provider_provisioning_state = service_provider_provisioning_state
self.authorizations = authorizations
self.peerings = peerings
self.service_key = service_key
self.service_provider_notes = service_provider_notes
self.service_provider_properties = service_provider_properties
self.provisioning_state = provisioning_state
self.gateway_manager_etag = gateway_manager_etag
self.etag = None
|
the-stack_0_12515 | import torch.nn as nn
from src.models.layers import ConvBnAct
from .utils import FeatureHooks
class BackboneBase(nn.Module):
def create_hooks(self):
self.stage_names = [i['module'] for i in self.feature_info]
self.encoder_channels = [i['num_chs'] for i in self.feature_info]
hooks = [dict(module=name, type='forward') for name in self.stage_names]
self.feature_hooks = FeatureHooks(hooks, self.named_modules())
def create_neck(self, set_neck):
self.set_neck = set_neck
if set_neck:
# Neck (Self-Distillation)
modules = []
for in_c, out_c in zip(self.encoder_channels[:-1], self.encoder_channels[1:]):
modules.append(ConvBnAct(in_c, out_c, kernel_size=3, stride=2))
self.neck = nn.ModuleList(modules)
else:
self.neck = nn.Identity()
def forward_features(self, x):
raise NotImplementedError
def forward_neck(self, x):
if self.set_neck:
for i, module in enumerate(self.neck):
if x.size(1) == self.encoder_channels[i]:
x = module(x)
return x
def forward(self, x):
# Return features for classification.
y = self.forward_features(x)
y = self.forward_neck(y)
return y
def forward_backbone_features(self, x):
# Return intermediate features (for down-stream tasks).
last_features = self.forward_features(x)
backbone_features = self.feature_hooks.get_output(x.device)
backbone_features = list(backbone_features.values())
backbone_features = [x] + backbone_features
return last_features, backbone_features
def forward_stage_features(self, x):
# Return intermediate features (for self-distillation).
x = self.forward_features(x)
return list(self.feature_hooks.get_output(x.device).values())
def init_weights(self):
# #------- init weights --------
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
|
the-stack_0_12517 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.ios.ios import ios_provider_spec
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
del self._task.args['provider']
elif self._play_context.connection == 'local':
provider = load_provider(ios_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'ios'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
|
the-stack_0_12518 | #Railway API - http://railwayapi.com/api/
from flask import Flask
from flask_ask import Ask, statement, question, session
import json
import requests
import time
import unidecode
import datetime
app = Flask(__name__)
ask = Ask(app, "/train_route")
api_key = '6dg3lgmg'
def date_convert(date):
year1 = datetime.datetime.strptime(date, '%Y-%m-%d').year
month1 = datetime.datetime.strptime(date, '%Y-%m-%d').month
day1 = datetime.datetime.strptime(date, '%Y-%m-%d').day
newdata1 = " "
if (month1<10):
newdate1 = retstr = "{}0{}{}".format(year1, month1, day1)
else:
newdata1 = "{}{}{}".format(year1, month1, day1)
return newdate1
def get_live_train_status(trainnumber, doj):
r = requests.get("http://api.railwayapi.com/live/train/{}/doj/{}/apikey/{}/".format(trainnumber,doj,api_key))
try:
data = json.loads(r.content.decode('utf-8'))
if (data['response_code']==200):
return data['position']
elif (data['response_code']==510):
return "Train not scheduled to run on the given date"
elif (data['response_code']==204):
return "Empty response. Not able to fetch required data"
else:
return "Sorry, services not available at this moment"
except Exception:
return "Servers are busy"
def get_train_route(trainnumber):
cities = []
r = requests.get('http://api.railwayapi.com/route/train/{}/apikey/{}/'.format(trainnumber, api_key))
try:
data = json.loads(r.content.decode('utf-8'))
if (data['response_code']==200):
for listing in data['route']:
cities.append(listing['fullname'])
cities = "...then...".join(l for l in cities)
cities = "The train goes through..." + cities
return cities
elif (data['response_code']==204):
return "Empty response. Not able to fetch required data"
else:
return "Sorry, services not available at this moment"
except Exception:
return "Servers are busy"
def get_train_number(trainname):
r = requests.get("http://api.railwayapi.com/name_number/train/{}/apikey/{}/".format(trainname, api_key))
try:
data = json.loads(r.content.decode('utf-8'))
if (data['response_code']==200):
info1 = "Train number for..." + data['name'] + "...is..." + data['number']
return info1
elif (data['response_code']==204):
return "Empty response. Not able to fetch required data"
else:
return "Sorry, services not available at this moment"
except Exception:
return "Servers are busy"
@app.route('/')
def homepage():
return 'Hello World'
@ask.launch
def start_skill():
welcome = 'Welcome to Indian Railways Information Services...You can know about train routes and live train status...For live train status, say train status for train number followed by number on followed by date of journey...For train route, say check route for train number followed by number...For getting train number for a train name, say get train number for followed by train name...'
return question(welcome)
@ask.intent("LiveTrainStatusIntent")
def share_live_train_status(trainnumber,doj):
return statement(get_live_train_status(trainnumber, date_convert(doj)))
@ask.intent("TrainRouteIntent")
def share_train_route(trainnumber):
return statement(get_train_route(trainnumber))
@ask.intent("GetTrainNumberIntent")
def share_train_number(trainname):
return statement(get_train_number(trainname))
@ask.intent('AMAZON.HelpIntent')
def help():
help_text = render_template('For live train status, say train status for train number followed by number on followed by date of journey...For train route, say check route for train number followed by number...For getting train number for a train name, say get train number for followed by train name...')
return question(help_text).reprompt(help_text)
@ask.intent('AMAZON.StopIntent')
def stop():
bye_text = render_template('bye')
return statement(bye_text)
@ask.intent('AMAZON.CancelIntent')
def cancel():
bye_text = render_template('bye')
return statement(bye_text)
if __name__ == '__main__':
app.run(debug=True)
|
the-stack_0_12519 | # Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Support for automatic stack components.
"""
import asyncio
import collections
from inspect import isawaitable
from typing import Callable, Any, Dict, List
from ..resource import ComponentResource, Resource, ResourceTransformation
from .settings import get_project, get_stack, get_root_resource, is_dry_run, set_root_resource
from .rpc_manager import RPC_MANAGER
from .sync_await import _all_tasks, _get_current_task
from .. import log
from . import known_types
from ..output import Output
async def run_pulumi_func(func: Callable):
try:
func()
finally:
log.debug("Waiting for outstanding RPCs to complete")
# Pump the event loop, giving all of the RPCs that we just queued up time to fully execute.
# The asyncio scheduler does not expose a "yield" primitive, so this will have to do.
#
# Note that "asyncio.sleep(0)" is the blessed way to do this:
# https://github.com/python/asyncio/issues/284#issuecomment-154180935
#
# We await each RPC in turn so that this loop will actually block rather than busy-wait.
while True:
await asyncio.sleep(0)
if len(RPC_MANAGER.rpcs) == 0:
break
log.debug(f"waiting for quiescence; {len(RPC_MANAGER.rpcs)} RPCs outstanding")
await RPC_MANAGER.rpcs.pop()
# Asyncio event loops require that all outstanding tasks be completed by the time that the
# event loop closes. If we're at this point and there are no outstanding RPCs, we should
# just cancel all outstanding tasks.
#
# We will occasionally start tasks deliberately that we know will never complete. We must
# cancel them before shutting down the event loop.
log.debug("Canceling all outstanding tasks")
for task in _all_tasks():
# Don't kill ourselves, that would be silly.
if task == _get_current_task():
continue
task.cancel()
# Pump the event loop again. Task.cancel is delivered asynchronously to all running tasks
# and each task needs to get scheduled in order to acknowledge the cancel and exit.
await asyncio.sleep(0)
# Once we get scheduled again, all tasks have exited and we're good to go.
log.debug("run_pulumi_func completed")
if RPC_MANAGER.unhandled_exception is not None:
raise RPC_MANAGER.unhandled_exception.with_traceback(RPC_MANAGER.exception_traceback)
async def run_in_stack(func: Callable):
"""
Run the given function inside of a new stack resource. This ensures that any stack export calls
will end up as output properties on the resulting stack component in the checkpoint file. This
is meant for internal runtime use only and is used by the Python SDK entrypoint program.
"""
await run_pulumi_func(lambda: Stack(func))
@known_types.stack
class Stack(ComponentResource):
"""
A synthetic stack component that automatically parents resources as the program runs.
"""
outputs: Dict[str, Any]
def __init__(self, func: Callable) -> None:
# Ensure we don't already have a stack registered.
if get_root_resource() is not None:
raise Exception('Only one root Pulumi Stack may be active at once')
# Now invoke the registration to begin creating this resource.
name = '%s-%s' % (get_project(), get_stack())
super(Stack, self).__init__('pulumi:pulumi:Stack', name, None, None)
# Invoke the function while this stack is active and then register its outputs.
self.outputs = dict()
set_root_resource(self)
try:
func()
finally:
self.register_outputs(massage(self.outputs, []))
# Intentionally leave this resource installed in case subsequent async work uses it.
def output(self, name: str, value: Any):
"""
Export a stack output with a given name and value.
"""
self.outputs[name] = value
# Note: we use a List here instead of a set as many objects are unhashable. This is inefficient,
# but python seems to offer no alternative.
def massage(attr: Any, seen: List[Any]):
"""
massage takes an arbitrary python value and attempts to *deeply* convert it into
plain-old-python-value that can registered as an output. In general, this means leaving alone
things like strings, ints, bools. However, it does mean trying to make other values into either
lists or dictionaries as appropriate. In general, iterable things are turned into lists, and
dictionary-like things are turned into dictionaries.
"""
# Basic primitive types (numbers, booleans, strings, etc.) don't need any special handling.
if is_primitive(attr):
return attr
# from this point on, we have complex objects. If we see them again, we don't want to emit them
# again fully or else we'd loop infinitely.
if reference_contains(attr, seen):
# Note: for Resources we hit again, emit their urn so cycles can be easily understood in
# the popo objects.
if isinstance(attr, Resource):
return attr.urn
# otherwise just emit as nothing to stop the looping.
return None
seen.append(attr)
# first check if the value is an actual dictionary. If so, massage the values of it to deeply
# make sure this is a popo.
if isinstance(attr, dict):
result = {}
for key, value in attr.items():
# ignore private keys
if not key.startswith("_"):
result[key] = massage(value, seen)
return result
if isinstance(attr, Output):
return attr.apply(lambda v: massage(v, seen))
if isawaitable(attr):
return Output.from_input(attr).apply(lambda v: massage(v, seen))
if isinstance(attr, Resource):
result = massage(attr.__dict__, seen)
# In preview only, we mark the result with "@isPulumiResource" to indicate that it is derived
# from a resource. This allows the engine to perform resource-specific filtering of unknowns
# from output diffs during a preview. This filtering is not necessary during an update because
# all property values are known.
if is_dry_run():
result["@isPulumiResource"] = True
return result
if hasattr(attr, "__dict__"):
# recurse on the dictionary itself. It will be handled above.
return massage(attr.__dict__, seen)
# finally, recurse through iterables, converting into a list of massaged values.
return [massage(a, seen) for a in attr]
def reference_contains(val1: Any, seen: List[Any]) -> bool:
for val2 in seen:
if val1 is val2:
return True
return False
def is_primitive(attr: Any) -> bool:
if attr is None:
return True
if isinstance(attr, str):
return True
# dictionaries, lists and dictionary-like things are not primitive.
if isinstance(attr, dict):
return False
if hasattr(attr, "__dict__"):
return False
try:
iter(attr)
return False
except TypeError:
pass
return True
def register_stack_transformation(t: ResourceTransformation):
"""
Add a transformation to all future resources constructed in this Pulumi stack.
"""
root_resource = get_root_resource()
if root_resource is None:
raise Exception("The root stack resource was referenced before it was initialized.")
if root_resource._transformations is None:
root_resource._transformations = [t]
else:
root_resource._transformations = root_resource._transformations + [t]
|
the-stack_0_12520 | def addupto(n):
# 1 assignmnet
total = 0
# loop runs n times
for i in range(1, n+1):
# 1 assignment
total += i
return total
# runs 1 times
print(addupto(int(input("Enter value of n: "))))
# Each statement outside a loop will have 1 time complexity.
# And time complexity of statements inside a loop is n. where n is no. of times a loop runs.
# Big O upper bounds the time complexity.
# wich means an algo can have Big O or less complexity.
# total complexity is n + 2 but in Big O we look into big picture.
# And we neglect all constants.
# So complexity is O(n).
# O(n^2 + 2n) in this case we neglect 2n because it's lot less then compared to n^2 for bigger input. |
the-stack_0_12523 | import csv
import sys
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
TEST_SIZE = 0.4
def main():
# Check command-line arguments
if len(sys.argv) != 2:
sys.exit("Usage: python shopping.py data")
# Load data from spreadsheet and split into train and test sets
evidence, labels = load_data(sys.argv[1])
X_train, X_test, y_train, y_test = train_test_split(
evidence, labels, test_size=TEST_SIZE
)
# Train model and make predictions
model = train_model(X_train, y_train)
predictions = model.predict(X_test)
sensitivity, specificity = evaluate(y_test, predictions)
# Print results
print(f"Correct: {(y_test == predictions).sum()}")
print(f"Incorrect: {(y_test != predictions).sum()}")
print(f"True Positive Rate: {100 * sensitivity:.2f}%")
print(f"True Negative Rate: {100 * specificity:.2f}%")
def load_data(filename):
"""
Load shopping data from a CSV file `filename` and convert into a list of
evidence lists and a list of labels. Return a tuple (evidence, labels).
evidence should be a list of lists, where each list contains the
following values, in order:
- Administrative, an integer
- Administrative_Duration, a floating point number
- Informational, an integer
- Informational_Duration, a floating point number
- ProductRelated, an integer
- ProductRelated_Duration, a floating point number
- BounceRates, a floating point number
- ExitRates, a floating point number
- PageValues, a floating point number
- SpecialDay, a floating point number
- Month, an index from 0 (January) to 11 (December)
- OperatingSystems, an integer
- Browser, an integer
- Region, an integer
- TrafficType, an integer
- VisitorType, an integer 0 (not returning) or 1 (returning)
- Weekend, an integer 0 (if false) or 1 (if true)
labels should be the corresponding list of labels, where each label
is 1 if Revenue is true, and 0 otherwise.
"""
with open("shopping.csv") as f:
reader = csv.reader(f)
next(reader)
months = ["Jan", "Feb", "Mar", "Apr", "May", "June",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
data = []
for row in reader:
data.append({
"evidence": [int(row[0]), float(row[1]), int(row[2]), float(row[3]), int(row[4]), float(row[5]), float(row[6]), float(row[7]), float(row[8]), float(row[9]),
months.index(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), 0 if row[15] == "New_Visitor" else 1, 0 if row[16] == "FALSE" else 1],
"label": 0 if row[17] == "FALSE" else 1
})
evidence = [row["evidence"] for row in data]
labels = [row["label"] for row in data]
return (evidence, labels)
def train_model(evidence, labels):
"""
Given a list of evidence lists and a list of labels, return a
fitted k-nearest neighbor model (k=1) trained on the data.
"""
model = KNeighborsClassifier(n_neighbors=1)
model.fit(evidence, labels)
return model
def evaluate(labels, predictions):
"""
Given a list of actual labels and a list of predicted labels,
return a tuple (sensitivity, specificty).
Assume each label is either a 1 (positive) or 0 (negative).
`sensitivity` should be a floating-point value from 0 to 1
representing the "true positive rate": the proportion of
actual positive labels that were accurately identified.
`specificity` should be a floating-point value from 0 to 1
representing the "true negative rate": the proportion of
actual negative labels that were accurately identified.
"""
cm = confusion_matrix(labels, predictions)
tp = cm[1][1]
tn = cm[0][0]
actual_positive = cm[1][1] + cm[1][0]
actual_negative = cm[0][0] + cm[0][1]
return (tp / actual_positive, tn / actual_negative)
if __name__ == "__main__":
main()
|
the-stack_0_12524 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from base_test import BaseTest
from classroom_snippets import ClassroomSnippets
class SnippetsTest(BaseTest):
@classmethod
def setUpClass(cls):
super(SnippetsTest, cls).setUpClass()
cls.snippets = ClassroomSnippets(cls.service)
def test_create_course(self):
course = self.snippets.create_course()
self.assertIsNotNone(course)
self.delete_course_on_cleanup(course.get('id'))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_12525 | from typing import List, Optional, Union
from achi.consensus.block_record import BlockRecord
from achi.consensus.blockchain_interface import BlockchainInterface
from achi.consensus.constants import ConsensusConstants
from achi.consensus.deficit import calculate_deficit
from achi.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from achi.consensus.make_sub_epoch_summary import make_sub_epoch_summary
from achi.consensus.pot_iterations import is_overflow_block
from achi.types.blockchain_format.classgroup import ClassgroupElement
from achi.types.blockchain_format.sized_bytes import bytes32
from achi.types.blockchain_format.slots import ChallengeBlockInfo
from achi.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from achi.types.full_block import FullBlock
from achi.types.header_block import HeaderBlock
from achi.util.ints import uint8, uint32, uint64
def block_to_block_record(
constants: ConsensusConstants,
blocks: BlockchainInterface,
required_iters: uint64,
full_block: Optional[Union[FullBlock, HeaderBlock]],
header_block: Optional[HeaderBlock],
) -> BlockRecord:
if full_block is None:
assert header_block is not None
block: Union[HeaderBlock, FullBlock] = header_block
else:
block = full_block
prev_b = blocks.try_block_record(block.prev_header_hash)
if block.height > 0:
assert prev_b is not None
sub_slot_iters, _ = get_next_sub_slot_iters_and_difficulty(
constants, len(block.finished_sub_slots) > 0, prev_b, blocks
)
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
deficit = calculate_deficit(
constants,
block.height,
prev_b,
overflow,
len(block.finished_sub_slots),
)
found_ses_hash: Optional[bytes32] = None
ses: Optional[SubEpochSummary] = None
if len(block.finished_sub_slots) > 0:
for sub_slot in block.finished_sub_slots:
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
found_ses_hash = sub_slot.challenge_chain.subepoch_summary_hash
if found_ses_hash:
assert prev_b is not None
assert len(block.finished_sub_slots) > 0
ses = make_sub_epoch_summary(
constants,
blocks,
block.height,
blocks.block_record(prev_b.prev_hash),
block.finished_sub_slots[0].challenge_chain.new_difficulty,
block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters,
)
assert ses.get_hash() == found_ses_hash
prev_transaction_block_height = uint32(0)
curr: Optional[BlockRecord] = blocks.try_block_record(block.prev_header_hash)
while curr is not None and not curr.is_transaction_block:
curr = blocks.try_block_record(curr.prev_hash)
if curr is not None and curr.is_transaction_block:
prev_transaction_block_height = curr.height
return header_block_to_sub_block_record(
constants,
required_iters,
block,
sub_slot_iters,
overflow,
deficit,
prev_transaction_block_height,
ses,
)
def header_block_to_sub_block_record(
constants: ConsensusConstants,
required_iters: uint64,
block: Union[FullBlock, HeaderBlock],
sub_slot_iters: uint64,
overflow: bool,
deficit: uint8,
prev_transaction_block_height: uint32,
ses: Optional[SubEpochSummary],
) -> BlockRecord:
reward_claims_incorporated = (
block.transactions_info.reward_claims_incorporated if block.transactions_info is not None else None
)
cbi = ChallengeBlockInfo(
block.reward_chain_block.proof_of_space,
block.reward_chain_block.challenge_chain_sp_vdf,
block.reward_chain_block.challenge_chain_sp_signature,
block.reward_chain_block.challenge_chain_ip_vdf,
)
if block.reward_chain_block.infused_challenge_chain_ip_vdf is not None:
icc_output: Optional[ClassgroupElement] = block.reward_chain_block.infused_challenge_chain_ip_vdf.output
else:
icc_output = None
if len(block.finished_sub_slots) > 0:
finished_challenge_slot_hashes: Optional[List[bytes32]] = [
sub_slot.challenge_chain.get_hash() for sub_slot in block.finished_sub_slots
]
finished_reward_slot_hashes: Optional[List[bytes32]] = [
sub_slot.reward_chain.get_hash() for sub_slot in block.finished_sub_slots
]
finished_infused_challenge_slot_hashes: Optional[List[bytes32]] = [
sub_slot.infused_challenge_chain.get_hash()
for sub_slot in block.finished_sub_slots
if sub_slot.infused_challenge_chain is not None
]
elif block.height == 0:
finished_challenge_slot_hashes = [constants.GENESIS_CHALLENGE]
finished_reward_slot_hashes = [constants.GENESIS_CHALLENGE]
finished_infused_challenge_slot_hashes = None
else:
finished_challenge_slot_hashes = None
finished_reward_slot_hashes = None
finished_infused_challenge_slot_hashes = None
prev_transaction_block_hash = (
block.foliage_transaction_block.prev_transaction_block_hash
if block.foliage_transaction_block is not None
else None
)
timestamp = block.foliage_transaction_block.timestamp if block.foliage_transaction_block is not None else None
fees = block.transactions_info.fees if block.transactions_info is not None else None
return BlockRecord(
block.header_hash,
block.prev_header_hash,
block.height,
block.weight,
block.total_iters,
block.reward_chain_block.signage_point_index,
block.reward_chain_block.challenge_chain_ip_vdf.output,
icc_output,
block.reward_chain_block.get_hash(),
cbi.get_hash(),
sub_slot_iters,
block.foliage.foliage_block_data.pool_target.puzzle_hash,
block.foliage.foliage_block_data.farmer_reward_puzzle_hash,
block.foliage.foliage_block_data.timelord_reward_puzzle_hash,
required_iters,
deficit,
overflow,
prev_transaction_block_height,
timestamp,
prev_transaction_block_hash,
fees,
reward_claims_incorporated,
finished_challenge_slot_hashes,
finished_infused_challenge_slot_hashes,
finished_reward_slot_hashes,
ses,
)
|
the-stack_0_12526 | from pathlib import Path
from invoke import task
from nox.virtualenv import VirtualEnv
# Configuration values.
DUMP_DIR = '.dump'
VENV = 'venv'
project_name = 'scrapd'
docker_org = 'scrapd'
docker_repo = f'{docker_org}/{project_name}'
@task
def build_docker(c):
"""Build a docker image."""
tag = c.run('git describe', hide=True)
docker_img = f'{docker_repo}:{tag.stdout.strip()}'
c.run(f'docker build -t {docker_img} .')
@task
def clean(c):
"""Remove unwanted files and artifacts in this project (!DESTRUCTIVE!)."""
clean_docker(c)
clean_repo(c)
@task
def clean_docker(c):
"""Remove all docker images built for this project (!DESTRUCTIVE!)."""
c.run(f'docker image rm -f $(docker image ls --filter reference={docker_repo} -q) || true')
@task
def clean_repo(c):
"""Remove unwanted files in project (!DESTRUCTIVE!)."""
c.run('git clean -ffdx')
c.run('git reset --hard')
@task
def dump_json(c):
"""Dump errors and create JSON data set."""
c.run(f'mkdir -p {DUMP_DIR}')
c.run('scrapd -vvv --dump 1>.dump/dump.json 2>.dump/dump.json.log')
@task
def dump_csv(c):
"""Dump errors and create CSV data set."""
c.run(f'mkdir -p {DUMP_DIR}')
c.run('scrapd -vvv --dump --format csv 1>.dump/dump.csv 2>.dump/dump.csv.log')
@task
def flame_graph(c):
"""Create an interactive CPU flame graph."""
_, venv_bin, _ = get_venv(VENV)
pyspy = venv_bin / 'py-spy'
c.run(
f'sudo {pyspy.resolve()} record -i -F -o profile.svg -- {(venv_bin /project_name ).resolve()} -v --pages 5 --format count'
)
@task
def nox(c, s=''):
"""Wrapper for the nox tasks (`inv nox list` for details)."""
if not s:
c.run('nox --list')
else:
c.run(f'nox -s {s}')
@task
def profile(c):
"""Create an interactive CPU flame graph."""
_, venv_bin, _ = get_venv(VENV)
pyinstrument = venv_bin / 'pyinstrument'
c.run(f'{pyinstrument.resolve()} --renderer html {(venv_bin /project_name ).resolve()} -v --format count --pages 5',
pty=True)
@task
def publish(c):
"""Publish the documentation."""
c.run('./.circleci/publish.sh')
@task(default=True)
def setup(c):
"""Setup the developper environment."""
c.run('nox --envdir .')
def get_venv(venv):
"""
Return `Path` objects from the venv.
:param str venv: venv name
:return: the venv `Path`, the `bin` folder `Path` within the venv, and if specified, the `Path` object of the
activate script within the venv.
:rtype: a tuple of 3 `Path` objects.
"""
location = Path(venv)
venv = VirtualEnv(location.resolve())
venv_bin = Path(venv.bin)
activate = venv_bin / 'activate'
return venv, venv_bin, activate
|
the-stack_0_12527 | from save_exp import save_experiment_info, save_acc
import argparse
import os
import torch
import time
from networks.nonlinear_nets import NonlinearNet
from utils import get_hms, TrainingObject
from optim import get_optim
from tensorboardX import SummaryWriter
import py3nvml
from math import ceil
# Training settings
parser = argparse.ArgumentParser(description='Nonlinear example')
parser.add_argument('outdir', type=str, help='experiment directory')
parser.add_argument('--type', default=None, type=str, nargs='+')
parser.add_argument('-C', type=int, default=96, help='number channels')
parser.add_argument('--seed', type=int, default=None, metavar='S',
help='random seed (default: None)')
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--smoke-test', action="store_true",
help="Finish quickly for testing")
parser.add_argument('--datadir', type=str, default='/scratch/share/cifar',
help='Default location for the dataset')
parser.add_argument('--dataset', default='cifar100', type=str,
help='which dataset to use',
choices=['cifar10', 'cifar100', 'tiny_imagenet'])
parser.add_argument('--resume', action='store_true',
help='Rerun from a checkpoint')
parser.add_argument('--no-comment', action='store_true',
help='Turns off prompt to enter comments about run.')
parser.add_argument('--nsamples', type=int, default=0,
help='The number of runs to test.')
parser.add_argument('--exist-ok', action='store_true',
help='If true, is ok if output directory already exists')
parser.add_argument('--epochs', default=120, type=int, help='num epochs')
parser.add_argument('--cpu', action='store_true', help='Do not run on gpus')
parser.add_argument('--num-gpus', type=float, default=0.5)
parser.add_argument('--no-scheduler', action='store_true')
# Optimizer hyperparameters
parser.add_argument('--lr', default=0.5, type=float, help='learning rate')
parser.add_argument('--lr1', default=None, type=float,
help='learning rate for wavelet domain')
parser.add_argument('--mom', default=0.85, type=float, help='momentum')
parser.add_argument('--mom1', default=None, type=float,
help='momentum for wavelet domain')
parser.add_argument('--wd', default=1e-4, type=float, help='weight decay')
parser.add_argument('--wd1', default=1e-5, type=float, help='l1 weight decay')
parser.add_argument('--reg', default='l2', type=str, help='regularization term')
parser.add_argument('--steps', default=[60,80,100], type=int, nargs='+')
parser.add_argument('--gamma', default=0.2, type=float, help='Lr decay')
# Network hyperparameters
parser.add_argument('--pixel-k', default=5, type=int,
help='pixel kernel spatial support. typically 3 or 5')
parser.add_argument('--lp-k', default=3, type=int,
help='lowpass kernel spatial support. typically 1, 3 or 5')
parser.add_argument('--bp-ks', default=(1,), type=int, nargs='+',
help='bandpass kernel spatial support. typically 1 or 3')
parser.add_argument('--pixel-nl', default='relu', type=str,
choices=['none', 'relu'],
help='pixel nonlinearity')
parser.add_argument('--lp-nl', default='none', type=str,
choices=['none', 'relu', 'relu2', 'softshrink'],
help='lowpass nonlinearity')
parser.add_argument('--bp-nl', default='none', type=str,
choices=['none', 'relu', 'relu2', 'softshrink'],
help='bandpass nonlinearity')
if __name__ == "__main__":
args = parser.parse_args()
# Create reporting objects
args.verbose = True
outdir = os.path.join(os.environ['HOME'], 'gainlayer_results', args.outdir)
tr_writer = SummaryWriter(os.path.join(outdir, 'train'))
val_writer = SummaryWriter(os.path.join(outdir, 'val'))
if not os.path.exists(outdir):
os.mkdir(outdir)
if args.type is None:
type = 'ref'
else:
type = args.type[0]
py3nvml.grab_gpus(ceil(args.num_gpus))
model = NonlinearNet(args.dataset, type, num_channels=args.C,
wd=args.wd, wd1=args.wd1,
pixel_k=args.pixel_k, lp_k=args.lp_k,
bp_ks=args.bp_ks, pixel_nl=args.pixel_nl,
lp_nl=args.lp_nl, bp_nl=args.bp_nl)
# ######################################################################
# Build the optimizer - use separate parameter groups for the gain
# and convolutional layers
default_params = model.parameters()
wave_params = model.wave_parameters()
optim, sched = get_optim(
'sgd', default_params, init_lr=args.lr,
steps=args.steps, wd=0, gamma=args.gamma, momentum=args.mom,
max_epochs=args.epochs)
if len(wave_params) > 0:
if args.lr1 is None:
args.lr1 = args.lr
if args.mom1 is None:
args.mom1 = args.mom
optim2, sched2 = get_optim(
'sgd', wave_params, init_lr=args.lr1,
steps=args.steps, wd=0, gamma=args.gamma, momentum=args.mom1,
max_epochs=args.epochs)
else:
optim2, sched2 = None, None
trn = TrainingObject(model, args.dataset, args.datadir, optim, sched,
optim2, sched2, args.batch_size, args.seed,
args.num_gpus, args.verbose)
trn._final_epoch = args.epochs
# Copy this source file to the output directory for record keeping
if args.resume:
trn._restore(os.path.join(outdir, 'model_last.pth'))
else:
save_experiment_info(outdir, args.seed, args.no_comment, trn.model)
if args.seed is not None and trn.use_cuda:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Train for set number of epochs
elapsed_time = 0
best_acc = 0
trn.step_lr()
for epoch in range(trn.last_epoch, trn.final_epoch):
print("\n| Training Epoch #{}".format(epoch))
print('| Learning rate: {}'.format(
trn.optimizer.param_groups[0]['lr']))
print('| Momentum : {}'.format(
trn.optimizer.param_groups[0]['momentum']))
start_time = time.time()
# Train for one iteration and update
trn_results = trn._train_iteration()
tr_writer.add_scalar('loss', trn_results['mean_loss'], epoch)
tr_writer.add_scalar('acc', trn_results['mean_accuracy'], epoch)
tr_writer.add_scalar('acc5', trn_results['acc5'], epoch)
# Validate
val_results = trn._test()
val_writer.add_scalar('loss', val_results['mean_loss'], epoch)
val_writer.add_scalar('acc', val_results['mean_accuracy'], epoch)
val_writer.add_scalar('acc5', val_results['acc5'], epoch)
acc = val_results['mean_accuracy']
if acc > best_acc:
print('| Saving Best model...\t\t\tTop1 = {:.2f}%'.format(acc))
trn._save(outdir, 'model_best.pth')
best_acc = acc
trn._save(outdir, name='model_last.pth')
epoch_time = time.time() - start_time
elapsed_time += epoch_time
print('| Elapsed time : %d:%02d:%02d\t Epoch time: %.1fs' % (
get_hms(elapsed_time) + (epoch_time,)))
# Update the scheduler
trn.step_lr()
save_acc(outdir, best_acc, acc)
|
the-stack_0_12528 | from gzip import GzipFile
from StringIO import StringIO
from django.conf import settings
import mock
from olympia.amo.tests import TestCase, addon_factory, reverse_ns
from olympia.api.middleware import (
GZipMiddlewareForAPIOnly, IdentifyAPIRequestMiddleware)
class TestIdentifyAPIRequestMiddleware(TestCase):
def test_api_identified(self):
request = mock.Mock()
request.path_info = '/api/v3/lol/'
IdentifyAPIRequestMiddleware().process_request(request)
assert request.is_api
def test_disabled_for_the_rest(self):
"""Test that we don't tag the request as API on "regular" pages."""
request = mock.Mock()
request.path_info = '/'
IdentifyAPIRequestMiddleware().process_request(request)
assert not request.is_api
request.path = '/en-US/firefox/'
IdentifyAPIRequestMiddleware().process_request(request)
assert not request.is_api
class TestGzipMiddleware(TestCase):
@mock.patch('django.middleware.gzip.GZipMiddleware.process_response')
def test_enabled_for_api(self, django_gzip_middleware):
"""Test that we call the gzip middleware for API pages."""
request = mock.Mock()
request.is_api = True
GZipMiddlewareForAPIOnly().process_response(request, mock.Mock())
assert django_gzip_middleware.call_count == 1
@mock.patch('django.middleware.gzip.GZipMiddleware.process_response')
def test_disabled_for_the_rest(self, django_gzip_middleware):
"""Test that we don't call gzip middleware for "regular" pages."""
request = mock.Mock()
request.is_api = False
GZipMiddlewareForAPIOnly().process_response(request, mock.Mock())
assert django_gzip_middleware.call_count == 0
def test_settings(self):
"""Test that gzip middleware is near the top of the settings list."""
# Gzip middleware should be near the top of the list, so that it runs
# last in the process_response phase, in case the response body has
# been modified by another middleware.
# Sadly, raven inserts 2 middlewares before, but luckily the ones it
# automatically inserts not modify the response.
assert (
settings.MIDDLEWARE[3] ==
'olympia.api.middleware.GZipMiddlewareForAPIOnly')
def test_api_endpoint_gzipped(self):
"""Test a simple API endpoint to make sure gzip is active there."""
addon = addon_factory()
url = reverse_ns('addon-detail', kwargs={'pk': addon.pk})
response = self.client.get(url)
assert response.status_code == 200
assert response.content
assert 'Content-Encoding' not in response
response_gzipped = self.client.get(
url, HTTP_ACCEPT_ENCODING='gzip',
# Pretend that this happened over https, to test that this is still
# enabled even for https.
**{'wsgi.url_scheme': 'https'})
assert response_gzipped.status_code == 200
assert response_gzipped.content
assert response_gzipped['Content-Encoding'] == 'gzip'
assert len(response_gzipped.content) < len(response.content)
ungzipped_content = GzipFile(
'', 'r', 0, StringIO(response_gzipped.content)).read()
assert ungzipped_content == response.content
|
the-stack_0_12529 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import *
from .entities import *
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
class FarmerCreationForm(UserCreationForm):
class Meta(UserCreationForm):
model = Farmer
fields = ('username', 'email')
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.is_farmer = True
user.save()
farmer = farmer.objects.create(user=user)
farmer.email.add(*self.cleaned_data.get('email'))
return user
class SalesOutletCreationForm(UserCreationForm):
class Meta(UserCreationForm):
model = SalesOutlet
fields = ('username', 'email')
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.is_sales = True
user.save()
sales = sales.objects.create(user=user)
sales.email.add(*self.cleaned_data.get('email'))
sales.username.add(*self.cleaned_data.get('username'))
return user
class QA_LabCreationForm(UserCreationForm):
class Meta(UserCreationForm):
model = QA_Lab
fields = ('username', 'email', 'license_no', 'license_exp_date', 'current_station_of_work')
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.is_lab = True
user.save()
lab = lab.objects.create(user=user)
lab.email.add(*self.cleaned_data.get('email'))
lab.license_no.add(*self.cleaned_data.get('license_no'))
lab.license_exp_date.add(*self.cleaned_data.get('license_exp_date'))
lab.current_station_of_work.add(*self.cleaned_data.get('current_station_of_work'))
return user
class VetMedicineDistributerCreationForm(UserCreationForm):
class Meta(UserCreationForm):
model = VetMedicineDistributer
fields = ('username', 'email', 'license_no', 'license_exp_date', 'current_station_of_work')
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.is_vmd = True
user.save()
vmd = vmd.objects.create(user=user)
vmd.email.add(*self.cleaned_data.get('email'))
vmd.license_no.add(*self.cleaned_data.get('license_no'))
vmd.license_exp_date.add(*self.cleaned_data.get('license_exp_date'))
vmd.current_station_of_work.add(*self.cleaned_data.get('current_station_of_work'))
return user
|
the-stack_0_12530 | """Defines an object which manages resources defined from packages."""
from rpg.data import resource as r
import typing
if typing.TYPE_CHECKING:
from rpg.data.resource import Resource, ResourceType
from typing import Dict, List, Optional
_fmtReplaceError = "cannot replace {} {}; master {} not in allowed list {}"
class ResourceAlreadyDefinedError(KeyError):
def __init__(self, resource_id: str, name: str) -> None:
KeyError("resource '{}' already defined in {} collection".format(
resource_id, name
))
class Resources(object):
"""Collection which manages resources loaded from packages, as well as
looking resources up by unique id and type.
"""
def __init__(self) -> None:
"""Initialize this Resources collection.
This function creates ResourceType.COUNT dictionaries in a
list which may be indexed by a ResourceType value to get the dictionary
of that type of resource.
"""
self._map: 'List[Dict[str, Resource]]' = [
dict() for _ in range(r.ResourceType.COUNT)
]
self._package_name: 'Optional[str]' = None
def add(self, item: 'r.Resource') -> None:
"""Add a resource to the current collection of resources.
:param item: The resource to add
"""
type_id = item.type_id()
resource_id = item.resource_id()
collection = self._map[type_id]
if resource_id in collection:
raise ResourceAlreadyDefinedError(resource_id, type_id.name)
collection[resource_id] = item
def get(self, type_id: 'r.ResourceType',
resource_id: str) -> 'Optional[r.Resource]':
"""Get a resource of the given type with the given resource_id.
:param type_id: The ResourceType of the resource to look up
:param resource_id: The string id of the resource to get
:return: The resource if found, otherwise None
"""
return self._map[type_id].get(resource_id, None)
def set_package(self, package_name: str):
"""Set the name of the controlling package on this resources
collection.
:param package_name: The name of the package for which
"""
self._package_name = package_name
for sub_map in self._map:
for key, value in sub_map.items():
value._package = package_name
def enumerate(self, resource_type: 'Optional[ResourceType]' = None):
"""Create a generator which returns each item in this collection.
:return: A tuple of (ResourceType, str, Resource) for each item in this
collection.
"""
if resource_type is not None:
for key, value in self._map[resource_type].items():
yield value.type_id(), key, value
else:
for collection in self._map:
for key, value in collection.items():
yield value.type_id(), key, value
def count(self, t_id: 'Optional[ResourceType]' = None) -> int:
"""Get the count of the given resource type in this collection.
If the t_id parameter is None (the default), this method will return
the total count of all resource types in this collection.
:param t_id: The ResourceType to count, or None for every ResourceType
:return: The number of the given resource type in this collection
"""
if t_id is None:
total = 0
for collection in self._map:
total += len(collection)
return total
else:
return len(self._map[t_id])
def merge(self, other: 'Resources',
masters: 'Optional[List[str]]' = None) -> 'Optional[str]':
"""Add all resources defined in the other Resources collection to this
collection.
:param other: The other Resources collection to take all objects from
:param masters: A list of master packages from which resources may be
replaced
:return: None if no errors happen during the merge, otherwise a string
with a description of the errors
"""
if masters is None:
masters = list() # type: List[str]
_error_str = ""
for t_id, key, value in other.enumerate():
old_obj = self._map[t_id].get(key, None)
if old_obj is not None:
old_pkg = old_obj.package()
if old_pkg in masters:
self._map[t_id][key] = value
else:
_error_str += _fmtReplaceError.format(
t_id.name, key, old_pkg, masters
)
else:
self._map[t_id][key] = value
_error_str = _error_str.strip()
return _error_str if _error_str != "" else None
def clear(self):
"""Remove all resources from this collection."""
for collection in self._map:
collection.clear()
|
the-stack_0_12532 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy import optimize
from scipy.sparse import linalg as sp_linalg
from ._base import LinearClassifierMixin, LinearModel
from ._base import _deprecate_normalize, _rescale_data
from ._sag import sag_solver
from ..base import MultiOutputMixin, RegressorMixin, is_classifier
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..utils.validation import check_is_fitted
from ..utils.validation import _check_sample_weight
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..metrics import check_scoring
from ..exceptions import ConvergenceWarning
from ..utils.sparsefuncs import mean_variance_axis
def _solve_sparse_cg(
X, y, alpha, max_iter=None, tol=1e-3, verbose=0, X_offset=None, X_scale=None
):
def _get_rescaled_operator(X):
X_offset_scale = X_offset / X_scale
def matvec(b):
return X.dot(b) - b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * np.sum(b)
X1 = sparse.linalg.LinearOperator(shape=X.shape, matvec=matvec, rmatvec=rmatvec)
return X1
n_samples, n_features = X.shape
if X_offset is None or X_scale is None:
X1 = sp_linalg.aslinearoperator(X)
else:
X1 = _get_rescaled_operator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype
)
# FIXME atol
try:
coef, info = sp_linalg.cg(C, y_column, tol=tol, atol="legacy")
except TypeError:
# old scipy
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype
)
# FIXME atol
try:
coefs[i], info = sp_linalg.cg(
C, y_column, maxiter=max_iter, tol=tol, atol="legacy"
)
except TypeError:
# old scipy
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn(
"sparse_cg did not converge after %d iterations." % info,
ConvergenceWarning,
)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(
X, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter
)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_features = X.shape[1]
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[:: n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True, overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[:: n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True, overwrite_a=False).ravel()
A.flat[:: n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[:: n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True, overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn(
"Singular matrix in solving dual problem. Using "
"least-squares solution instead."
)
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[:: n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[:: n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(
K, target, sym_pos=True, overwrite_a=False
).ravel()
K.flat[:: n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _solve_lbfgs(
X, y, alpha, positive=True, max_iter=None, tol=1e-3, X_offset=None, X_scale=None
):
"""Solve ridge regression with LBFGS.
The main purpose is fitting with forcing coefficients to be positive.
For unconstrained ridge regression, there are faster dedicated solver methods.
Note that with positive bounds on the coefficients, LBFGS seems faster
than scipy.optimize.lsq_linear.
"""
n_samples, n_features = X.shape
options = {}
if max_iter is not None:
options["maxiter"] = max_iter
config = {
"method": "L-BFGS-B",
"tol": tol,
"jac": True,
"options": options,
}
if positive:
config["bounds"] = [(0, np.inf)] * n_features
if X_offset is not None and X_scale is not None:
X_offset_scale = X_offset / X_scale
else:
X_offset_scale = None
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
for i in range(y.shape[1]):
x0 = np.zeros((n_features,))
y_column = y[:, i]
def func(w):
residual = X.dot(w) - y_column
if X_offset_scale is not None:
residual -= w.dot(X_offset_scale)
f = 0.5 * residual.dot(residual) + 0.5 * alpha[i] * w.dot(w)
grad = X.T @ residual + alpha[i] * w
if X_offset_scale is not None:
grad -= X_offset_scale * np.sum(residual)
return f, grad
result = optimize.minimize(func, x0, **config)
if not result["success"]:
warnings.warn(
"The lbfgs solver did not converge. Try increasing max_iter "
f"or tol. Currently: max_iter={max_iter} and tol={tol}",
ConvergenceWarning,
)
coefs[i] = result["x"]
return coefs
def _get_valid_accept_sparse(is_X_sparse, solver):
if is_X_sparse and solver in ["auto", "sag", "saga"]:
return "csr"
else:
return ["csr", "csc", "coo"]
def ridge_regression(
X,
y,
alpha,
*,
sample_weight=None,
solver="auto",
max_iter=None,
tol=1e-3,
verbose=0,
positive=False,
random_state=None,
return_n_iter=False,
return_intercept=False,
check_input=True,
):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {ndarray, sparse matrix, LinearOperator} of shape \
(n_samples, n_features)
Training data
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values
alpha : float or array-like of shape (n_targets,)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
sample_weight : float or array-like of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
All last six solvers support both dense and sparse data. However, only
'sag', 'sparse_cg', and 'lbfgs' support sparse input when `fit_intercept`
is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000. For 'lbfgs' solver, the default value is 15000.
tol : float, default=1e-3
Precision of the solution.
verbose : int, default=0
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
return_n_iter : bool, default=False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : bool, default=False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
.. versionadded:: 0.21
Returns
-------
coef : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or ndarray of shape (n_targets,)
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
return _ridge_regression(
X,
y,
alpha,
sample_weight=sample_weight,
solver=solver,
max_iter=max_iter,
tol=tol,
verbose=verbose,
positive=positive,
random_state=random_state,
return_n_iter=return_n_iter,
return_intercept=return_intercept,
X_scale=None,
X_offset=None,
check_input=check_input,
)
def _ridge_regression(
X,
y,
alpha,
sample_weight=None,
solver="auto",
max_iter=None,
tol=1e-3,
verbose=0,
positive=False,
random_state=None,
return_n_iter=False,
return_intercept=False,
X_scale=None,
X_offset=None,
check_input=True,
):
has_sw = sample_weight is not None
if solver == "auto":
if positive:
solver = "lbfgs"
elif return_intercept:
# sag supports fitting intercept directly
solver = "sag"
elif not sparse.issparse(X):
solver = "cholesky"
else:
solver = "sparse_cg"
if solver not in ("sparse_cg", "cholesky", "svd", "lsqr", "sag", "saga", "lbfgs"):
raise ValueError(
"Known solvers are 'sparse_cg', 'cholesky', 'svd'"
" 'lsqr', 'sag', 'saga' or 'lbfgs'. Got %s." % solver
)
if positive and solver != "lbfgs":
raise ValueError(
"When positive=True, only 'lbfgs' solver can be used. "
f"Please change solver {solver} to 'lbfgs' "
"or set positive=False."
)
if solver == "lbfgs" and not positive:
raise ValueError(
"'lbfgs' solver can be used only when positive=True. "
"Please use another solver."
)
if return_intercept and solver != "sag":
raise ValueError(
"In Ridge, only 'sag' solver can directly fit the "
"intercept. Please change solver to 'sag' or set "
"return_intercept=False."
)
if check_input:
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype, order="C")
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError(
"Number of samples in X and y does not correspond: %d != %d"
% (n_samples, n_samples_)
)
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if solver not in ["sag", "saga"]:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError(
"Number of targets and number of penalties do not correspond: %d != %d"
% (alpha.size, n_targets)
)
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
n_iter = None
if solver == "sparse_cg":
coef = _solve_sparse_cg(
X,
y,
alpha,
max_iter=max_iter,
tol=tol,
verbose=verbose,
X_offset=X_offset,
X_scale=X_scale,
)
elif solver == "lsqr":
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == "cholesky":
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = "svd"
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = "svd"
elif solver in ["sag", "saga"]:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1],), dtype=X.dtype)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {
"coef": np.zeros((n_features + int(return_intercept), 1), dtype=X.dtype)
}
coef_, n_iter_, _ = sag_solver(
X,
target.ravel(),
sample_weight,
"squared",
alpha_i,
0,
max_iter,
tol,
verbose,
random_state,
False,
max_squared_sum,
init,
is_saga=solver == "saga",
)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
elif solver == "lbfgs":
coef = _solve_lbfgs(
X,
y,
alpha,
positive=positive,
tol=tol,
max_iter=max_iter,
X_offset=X_offset,
X_scale=X_scale,
)
if solver == "svd":
if sparse.issparse(X):
raise TypeError("SVD solver does not support sparse inputs currently")
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(LinearModel, metaclass=ABCMeta):
@abstractmethod
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
normalize="deprecated",
copy_X=True,
max_iter=None,
tol=1e-3,
solver="auto",
positive=False,
random_state=None,
):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.positive = positive
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
self._normalize = _deprecate_normalize(
self.normalize, default=False, estimator_name=self.__class__.__name__
)
if self.solver == "lbfgs" and not self.positive:
raise ValueError(
"'lbfgs' solver can be used only when positive=True. "
"Please use another solver."
)
if self.positive:
if self.solver not in ["auto", "lbfgs"]:
raise ValueError(
f"solver='{self.solver}' does not support positive fitting. Please"
" set the solver to 'auto' or 'lbfgs', or set `positive=False`"
)
else:
solver = self.solver
elif sparse.issparse(X) and self.fit_intercept:
if self.solver not in ["auto", "sparse_cg", "sag", "lbfgs"]:
raise ValueError(
"solver='{}' does not support fitting the intercept "
"on sparse data. Please set the solver to 'auto' or "
"'sparse_cg', 'sag', 'lbfgs' "
"or set `fit_intercept=False`".format(self.solver)
)
if self.solver == "lbfgs":
solver = "lbfgs"
elif self.solver == "sag" and self.max_iter is None and self.tol > 1e-4:
warnings.warn(
'"sag" solver requires many iterations to fit '
"an intercept with sparse inputs. Either set the "
'solver to "auto" or "sparse_cg", or set a low '
'"tol" and a high "max_iter" (especially if inputs are '
"not standardized)."
)
solver = "sag"
else:
solver = "sparse_cg"
else:
solver = self.solver
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# when X is sparse we only remove offset from y
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X,
y,
self.fit_intercept,
self._normalize,
self.copy_X,
sample_weight=sample_weight,
return_mean=True,
)
if solver == "sag" and sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = _ridge_regression(
X,
y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver="sag",
positive=self.positive,
random_state=self.random_state,
return_n_iter=True,
return_intercept=True,
check_input=False,
)
# add the offset which was subtracted by _preprocess_data
self.intercept_ += y_offset
else:
if sparse.issparse(X) and self.fit_intercept:
# required to fit intercept with sparse_cg solver
params = {"X_offset": X_offset, "X_scale": X_scale}
else:
# for dense matrices or when intercept is set to 0
params = {}
self.coef_, self.n_iter_ = _ridge_regression(
X,
y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=solver,
positive=self.positive,
random_state=self.random_state,
return_n_iter=True,
return_intercept=False,
check_input=False,
**params,
)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(MultiOutputMixin, RegressorMixin, _BaseRidge):
"""Linear least squares with l2 regularization.
Minimizes the objective function::
||y - Xw||^2_2 + alpha * ||w||^2_2
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape (n_samples, n_targets)).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, ndarray of shape (n_targets,)}, default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
fit_intercept : bool, default=True
Whether to fit the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. ``X`` and ``y`` are expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and
will be removed in 1.2.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
For 'lbfgs' solver, the default value is 15000.
tol : float, default=1e-3
Precision of the solution.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
All last six solvers support both dense and sparse data. However, only
'sag', 'sparse_cg', and 'lbfgs' support sparse input when `fit_intercept`
is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
.. versionadded:: 0.17
`random_state` to support Stochastic Average Gradient.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : None or ndarray of shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
RidgeClassifier : Ridge classifier.
RidgeCV : Ridge regression with built-in cross validation.
:class:`~sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression
combines ridge regression with the kernel trick.
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y)
Ridge()
"""
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
normalize="deprecated",
copy_X=True,
max_iter=None,
tol=1e-3,
solver="auto",
positive=False,
random_state=None,
):
super().__init__(
alpha=alpha,
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
max_iter=max_iter,
tol=tol,
solver=solver,
positive=positive,
random_state=random_state,
)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
"""
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), self.solver)
X, y = self._validate_data(
X,
y,
accept_sparse=_accept_sparse,
dtype=[np.float64, np.float32],
multi_output=True,
y_numeric=True,
)
return super().fit(X, y, sample_weight=sample_weight)
class _RidgeClassifierMixin(LinearClassifierMixin):
def _prepare_data(self, X, y, sample_weight, solver):
"""Validate `X` and `y` and binarize `y`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
solver : str
The solver used in `Ridge` to know which sparse format to support.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Validated training data.
y : ndarray of shape (n_samples,)
Validated target values.
sample_weight : ndarray of shape (n_samples,)
Validated sample weights.
Y : ndarray of shape (n_samples, n_classes)
The binarized version of `y`.
"""
accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
X, y = self._validate_data(
X,
y,
accept_sparse=accept_sparse,
multi_output=True,
y_numeric=False,
)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith("multilabel"):
y = column_or_1d(y, warn=True)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if self.class_weight:
sample_weight = sample_weight * compute_sample_weight(self.class_weight, y)
return X, y, sample_weight, Y
def predict(self, X):
"""Predict class labels for samples in `X`.
Parameters
----------
X : {array-like, spare matrix} of shape (n_samples, n_features)
The data matrix for which we want to predict the targets.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Vector or matrix containing the predictions. In binary and
multiclass problems, this is a vector containing `n_samples`. In
a multilabel problem, it returns a matrix of shape
`(n_samples, n_outputs)`.
"""
check_is_fitted(self, attributes=["_label_binarizer"])
if self._label_binarizer.y_type_.startswith("multilabel"):
# Threshold such that the negative label is -1 and positive label
# is 1 to use the inverse transform of the label binarizer fitted
# during fit.
scores = 2 * (self.decision_function(X) > 0) - 1
return self._label_binarizer.inverse_transform(scores)
return super().predict(X)
@property
def classes_(self):
"""Classes labels."""
return self._label_binarizer.classes_
def _more_tags(self):
return {"multilabel": True}
class RidgeClassifier(_RidgeClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
This classifier first converts the target values into ``{-1, 1}`` and
then treats the problem as a regression task (multi-output regression in
the multiclass case).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float, default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and
will be removed in 1.2.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
tol : float, default=1e-3
Precision of the solution.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', \
'sag', 'saga', 'lbfgs'}, default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its unbiased and more flexible version named SAGA. Both methods
use an iterative procedure, and are often faster than other solvers
when both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
- 'lbfgs' uses L-BFGS-B algorithm implemented in
`scipy.optimize.minimize`. It can be used only when `positive`
is True.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
Only 'lbfgs' solver is supported in this case.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
Attributes
----------
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : None or ndarray of shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
Ridge : Ridge regression.
RidgeClassifierCV : Ridge classifier with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifier
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifier().fit(X, y)
>>> clf.score(X, y)
0.9595...
"""
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
normalize="deprecated",
copy_X=True,
max_iter=None,
tol=1e-3,
class_weight=None,
solver="auto",
positive=False,
random_state=None,
):
super().__init__(
alpha=alpha,
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
max_iter=max_iter,
tol=tol,
solver=solver,
positive=positive,
random_state=random_state,
)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
.. versionadded:: 0.17
*sample_weight* support to RidgeClassifier.
Returns
-------
self : object
Instance of the estimator.
"""
X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, self.solver)
super().fit(X, Y, sample_weight=sample_weight)
return self
def _check_gcv_mode(X, gcv_mode):
possible_gcv_modes = [None, "auto", "svd", "eigen"]
if gcv_mode not in possible_gcv_modes:
raise ValueError(
"Unknown value for 'gcv_mode'. Got {} instead of one of {}".format(
gcv_mode, possible_gcv_modes
)
)
if gcv_mode in ["eigen", "svd"]:
return gcv_mode
# if X has more rows than columns, use decomposition of X^T.X,
# otherwise X.X^T
if X.shape[0] > X.shape[1]:
return "svd"
return "eigen"
def _find_smallest_angle(query, vectors):
"""Find the column of vectors that is most aligned with the query.
Both query and the columns of vectors must have their l2 norm equal to 1.
Parameters
----------
query : ndarray of shape (n_samples,)
Normalized query vector.
vectors : ndarray of shape (n_samples, n_features)
Vectors to which we compare query, as columns. Must be normalized.
"""
abs_cosine = np.abs(query.dot(vectors))
index = np.argmax(abs_cosine)
return index
class _X_CenterStackOp(sparse.linalg.LinearOperator):
"""Behaves as centered and scaled X with an added intercept column.
This operator behaves as
np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]])
"""
def __init__(self, X, X_mean, sqrt_sw):
n_samples, n_features = X.shape
super().__init__(X.dtype, (n_samples, n_features + 1))
self.X = X
self.X_mean = X_mean
self.sqrt_sw = sqrt_sw
def _matvec(self, v):
v = v.ravel()
return (
safe_sparse_dot(self.X, v[:-1], dense_output=True)
- self.sqrt_sw * self.X_mean.dot(v[:-1])
+ v[-1] * self.sqrt_sw
)
def _matmat(self, v):
return (
safe_sparse_dot(self.X, v[:-1], dense_output=True)
- self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1])
+ v[-1] * self.sqrt_sw[:, None]
)
def _transpose(self):
return _XT_CenterStackOp(self.X, self.X_mean, self.sqrt_sw)
class _XT_CenterStackOp(sparse.linalg.LinearOperator):
"""Behaves as transposed centered and scaled X with an intercept column.
This operator behaves as
np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T
"""
def __init__(self, X, X_mean, sqrt_sw):
n_samples, n_features = X.shape
super().__init__(X.dtype, (n_features + 1, n_samples))
self.X = X
self.X_mean = X_mean
self.sqrt_sw = sqrt_sw
def _matvec(self, v):
v = v.ravel()
n_features = self.shape[0]
res = np.empty(n_features, dtype=self.X.dtype)
res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - (
self.X_mean * self.sqrt_sw.dot(v)
)
res[-1] = np.dot(v, self.sqrt_sw)
return res
def _matmat(self, v):
n_features = self.shape[0]
res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype)
res[:-1] = safe_sparse_dot(self.X.T, v, dense_output=True) - self.X_mean[
:, None
] * self.sqrt_sw.dot(v)
res[-1] = np.dot(self.sqrt_sw, v)
return res
class _IdentityRegressor:
"""Fake regressor which will directly output the prediction."""
def decision_function(self, y_predict):
return y_predict
def predict(self, y_predict):
return y_predict
class _IdentityClassifier(LinearClassifierMixin):
"""Fake classifier which will directly output the prediction.
We inherit from LinearClassifierMixin to get the proper shape for the
output `y`.
"""
def __init__(self, classes):
self.classes_ = classes
def decision_function(self, y_predict):
return y_predict
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Leave-one-out Cross-Validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id).
Dual solution: c = G^-1y
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G^-1 = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KG^-1Y - diag(KG^-1)Y) / diag(I-KG^-1)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G^-1)
The best score (negative mean squared error or user-provided scoring) is
stored in the `best_score_` attribute, and the selected hyperparameter in
`alpha_`.
References
----------
http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf
https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(
self,
alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
normalize="deprecated",
scoring=None,
copy_X=True,
gcv_mode=None,
store_cv_values=False,
is_clf=False,
alpha_per_target=False,
):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
self.is_clf = is_clf
self.alpha_per_target = alpha_per_target
@staticmethod
def _decomp_diag(v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
@staticmethod
def _diag_dot(D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None),) + (np.newaxis,) * (len(B.shape) - 1)]
return D * B
def _compute_gram(self, X, sqrt_sw):
"""Computes the Gram matrix XX^T with possible centering.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
gram : ndarray of shape (n_samples, n_samples)
The Gram matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
When X is dense the centering has been done in preprocessing
so the mean is 0 and we just compute XX^T.
When X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
center = self.fit_intercept and sparse.issparse(X)
if not center:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X, X.T, dense_output=True), X_mean
# X is sparse
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples)
)
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean *= n_samples / sqrt_sw.dot(sqrt_sw)
X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True)
X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean)
return (
safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T,
X_mean,
)
def _compute_covariance(self, X, sqrt_sw):
"""Computes covariance matrix X^TX with possible centering.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
covariance : ndarray of shape (n_features, n_features)
The covariance matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
Since X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
if not self.fit_intercept:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X.T, X, dense_output=True), X_mean
# this function only gets called for sparse X
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples)
)
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw)
weight_sum = sqrt_sw.dot(sqrt_sw)
return (
safe_sparse_dot(X.T, X, dense_output=True)
- weight_sum * np.outer(X_mean, X_mean),
X_mean,
)
def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw):
"""Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T)
without explicitly centering X nor computing X.dot(A)
when X is sparse.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
A : ndarray of shape (n_features, n_features)
X_mean : ndarray of shape (n_features,)
sqrt_sw : ndarray of shape (n_features,)
square roots of sample weights
Returns
-------
diag : np.ndarray, shape (n_samples,)
The computed diagonal.
"""
intercept_col = scale = sqrt_sw
batch_size = X.shape[1]
diag = np.empty(X.shape[0], dtype=X.dtype)
for start in range(0, X.shape[0], batch_size):
batch = slice(start, min(X.shape[0], start + batch_size), 1)
X_batch = np.empty(
(X[batch].shape[0], X.shape[1] + self.fit_intercept), dtype=X.dtype
)
if self.fit_intercept:
X_batch[:, :-1] = X[batch].A - X_mean * scale[batch][:, None]
X_batch[:, -1] = intercept_col[batch]
else:
X_batch = X[batch].A
diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1)
return diag
def _eigen_decompose_gram(self, X, y, sqrt_sw):
"""Eigendecomposition of X.X^T, used when n_samples <= n_features."""
# if X is dense it has already been centered in preprocessing
K, X_mean = self._compute_gram(X, sqrt_sw)
if self.fit_intercept:
# to emulate centering X with sample weights,
# ie removing the weighted average, we add a column
# containing the square roots of the sample weights.
# by centering, it is orthogonal to the other columns
K += np.outer(sqrt_sw, sqrt_sw)
eigvals, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return X_mean, eigvals, Q, QT_y
def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X.X^T (n_samples <= n_features).
"""
w = 1.0 / (eigvals + alpha)
if self.fit_intercept:
# the vector containing the square roots of the sample weights (1
# when no sample weights) is the eigenvector of XX^T which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight).
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, Q)
w[intercept_dim] = 0 # cancel regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_inverse_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
def _eigen_decompose_covariance(self, X, y, sqrt_sw):
"""Eigendecomposition of X^T.X, used when n_samples > n_features
and X is sparse.
"""
n_samples, n_features = X.shape
cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype)
cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw)
if not self.fit_intercept:
cov = cov[:-1, :-1]
# to emulate centering X with sample weights,
# ie removing the weighted average, we add a column
# containing the square roots of the sample weights.
# by centering, it is orthogonal to the other columns
# when all samples have the same weight we add a column of 1
else:
cov[-1] = 0
cov[:, -1] = 0
cov[-1, -1] = sqrt_sw.dot(sqrt_sw)
nullspace_dim = max(0, n_features - n_samples)
eigvals, V = linalg.eigh(cov)
# remove eigenvalues and vectors in the null space of X^T.X
eigvals = eigvals[nullspace_dim:]
V = V[:, nullspace_dim:]
return X_mean, eigvals, V, X
def _solve_eigen_covariance_no_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse), and not fitting an intercept.
"""
w = 1 / (eigvals + alpha)
A = (V * w).dot(V.T)
AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True))
y_hat = safe_sparse_dot(X, AXy, dense_output=True)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
def _solve_eigen_covariance_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X
):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse),
and we are fitting an intercept.
"""
# the vector [0, 0, ..., 0, 1]
# is the eigenvector of X^TX which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight), e.g. n when uniform sample weights.
intercept_sv = np.zeros(V.shape[0])
intercept_sv[-1] = 1
intercept_dim = _find_smallest_angle(intercept_sv, V)
w = 1 / (eigvals + alpha)
w[intercept_dim] = 1 / eigvals[intercept_dim]
A = (V * w).dot(V.T)
# add a column to X containing the square roots of sample weights
X_op = _X_CenterStackOp(X, X_mean, sqrt_sw)
AXy = A.dot(X_op.T.dot(y))
y_hat = X_op.dot(AXy)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
# return (1 - hat_diag), (y - y_hat)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
def _solve_eigen_covariance(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse).
"""
if self.fit_intercept:
return self._solve_eigen_covariance_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X
)
return self._solve_eigen_covariance_no_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X
)
def _svd_decompose_design_matrix(self, X, y, sqrt_sw):
# X already centered
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
if self.fit_intercept:
# to emulate fit_intercept=True situation, add a column
# containing the square roots of the sample weights
# by centering, the other columns are orthogonal to that one
intercept_column = sqrt_sw[:, None]
X = np.hstack((X, intercept_column))
U, singvals, _ = linalg.svd(X, full_matrices=0)
singvals_sq = singvals ** 2
UT_y = np.dot(U.T, y)
return X_mean, singvals_sq, U, UT_y
def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have an SVD decomposition of X
(n_samples > n_features and X is dense).
"""
w = ((singvals_sq + alpha) ** -1) - (alpha ** -1)
if self.fit_intercept:
# detect intercept column
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, U)
# cancel the regularization for the intercept
w[intercept_dim] = -(alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_inverse_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model with gcv.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data. Will be cast to float64 if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to float64 if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
"""
_normalize = _deprecate_normalize(
self.normalize, default=False, estimator_name=self.__class__.__name__
)
X, y = self._validate_data(
X,
y,
accept_sparse=["csr", "csc", "coo"],
dtype=[np.float64],
multi_output=True,
y_numeric=True,
)
# alpha_per_target cannot be used in classifier mode. All subclasses
# of _RidgeGCV that are classifiers keep alpha_per_target at its
# default value: False, so the condition below should never happen.
assert not (self.is_clf and self.alpha_per_target)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self.alphas = np.asarray(self.alphas)
if np.any(self.alphas <= 0):
raise ValueError(
"alphas must be strictly positive. Got {} containing some "
"negative or null value instead.".format(self.alphas)
)
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X,
y,
self.fit_intercept,
_normalize,
self.copy_X,
sample_weight=sample_weight,
)
gcv_mode = _check_gcv_mode(X, self.gcv_mode)
if gcv_mode == "eigen":
decompose = self._eigen_decompose_gram
solve = self._solve_eigen_gram
elif gcv_mode == "svd":
if sparse.issparse(X):
decompose = self._eigen_decompose_covariance
solve = self._solve_eigen_covariance
else:
decompose = self._svd_decompose_design_matrix
solve = self._solve_svd_design_matrix
n_samples = X.shape[0]
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
sqrt_sw = np.sqrt(sample_weight)
else:
sqrt_sw = np.ones(n_samples, dtype=X.dtype)
X_mean, *decomposition = decompose(X, y, sqrt_sw)
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
n_y = 1 if len(y.shape) == 1 else y.shape[1]
n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)
if self.store_cv_values:
self.cv_values_ = np.empty((n_samples * n_y, n_alphas), dtype=X.dtype)
best_coef, best_score, best_alpha = None, None, None
for i, alpha in enumerate(np.atleast_1d(self.alphas)):
G_inverse_diag, c = solve(float(alpha), y, sqrt_sw, X_mean, *decomposition)
if error:
squared_errors = (c / G_inverse_diag) ** 2
if self.alpha_per_target:
alpha_score = -squared_errors.mean(axis=0)
else:
alpha_score = -squared_errors.mean()
if self.store_cv_values:
self.cv_values_[:, i] = squared_errors.ravel()
else:
predictions = y - (c / G_inverse_diag)
if self.store_cv_values:
self.cv_values_[:, i] = predictions.ravel()
if self.is_clf:
identity_estimator = _IdentityClassifier(classes=np.arange(n_y))
alpha_score = scorer(
identity_estimator, predictions, y.argmax(axis=1)
)
else:
identity_estimator = _IdentityRegressor()
if self.alpha_per_target:
alpha_score = np.array(
[
scorer(identity_estimator, predictions[:, j], y[:, j])
for j in range(n_y)
]
)
else:
alpha_score = scorer(
identity_estimator, predictions.ravel(), y.ravel()
)
# Keep track of the best model
if best_score is None:
# initialize
if self.alpha_per_target and n_y > 1:
best_coef = c
best_score = np.atleast_1d(alpha_score)
best_alpha = np.full(n_y, alpha)
else:
best_coef = c
best_score = alpha_score
best_alpha = alpha
else:
# update
if self.alpha_per_target and n_y > 1:
to_update = alpha_score > best_score
best_coef[:, to_update] = c[:, to_update]
best_score[to_update] = alpha_score[to_update]
best_alpha[to_update] = alpha
elif alpha_score > best_score:
best_coef, best_score, best_alpha = c, alpha_score, alpha
self.alpha_ = best_alpha
self.best_score_ = best_score
self.dual_coef_ = best_coef
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
X_offset += X_mean * X_scale
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, n_alphas
else:
cv_values_shape = n_samples, n_y, n_alphas
self.cv_values_ = self.cv_values_.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(
self,
alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
normalize="deprecated",
scoring=None,
cv=None,
gcv_mode=None,
store_cv_values=False,
alpha_per_target=False,
):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
self.alpha_per_target = alpha_per_target
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data. If using GCV, will be cast to float64
if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
Notes
-----
When sample_weight is provided, the selected hyperparameter may depend
on whether we use leave-one-out cross-validation (cv=None or cv='auto')
or another form of cross-validation, because only leave-one-out
cross-validation takes the sample weights into account when computing
the validation score.
"""
cv = self.cv
if cv is None:
estimator = _RidgeGCV(
self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values,
is_clf=is_classifier(self),
alpha_per_target=self.alpha_per_target,
)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
self.best_score_ = estimator.best_score_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True are incompatible")
if self.alpha_per_target:
raise ValueError("cv!=None and alpha_per_target=True are incompatible")
parameters = {"alpha": self.alphas}
solver = "sparse_cg" if sparse.issparse(X) else "auto"
model = RidgeClassifier if is_classifier(self) else Ridge
gs = GridSearchCV(
model(
fit_intercept=self.fit_intercept,
normalize=self.normalize,
solver=solver,
),
parameters,
cv=cv,
scoring=self.scoring,
)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.best_score_ = gs.best_score_
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
self.n_features_in_ = estimator.n_features_in_
if hasattr(estimator, "feature_names_in_"):
self.feature_names_in_ = estimator.feature_names_in_
return self
class RidgeCV(MultiOutputMixin, RegressorMixin, _BaseRidgeCV):
"""Ridge regression with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs efficient Leave-One-Out Cross-Validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
If using Leave-One-Out cross-validation, alphas must be positive.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and will be removed in
1.2.
scoring : str, callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If None, the negative mean squared error if cv is 'auto' or None
(i.e. when using leave-one-out cross-validation), and r2 score
otherwise.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {'auto', 'svd', 'eigen'}, default='auto'
Flag indicating which strategy to use when performing
Leave-One-Out Cross-Validation. Options are::
'auto' : use 'svd' if n_samples > n_features, otherwise use 'eigen'
'svd' : force use of singular value decomposition of X when X is
dense, eigenvalue decomposition of X^T.X when X is sparse.
'eigen' : force computation via eigendecomposition of X.X^T
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending on the shape of the training data.
store_cv_values : bool, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Leave-One-Out Cross-Validation).
alpha_per_target : bool, default=False
Flag indicating whether to optimize the alpha value (picked from the
`alphas` parameter list) for each target separately (for multi-output
settings: multiple prediction targets). When set to `True`, after
fitting, the `alpha_` attribute will contain a value for each target.
When set to `False`, a single alpha is used for all targets.
.. versionadded:: 0.24
Attributes
----------
cv_values_ : ndarray of shape (n_samples, n_alphas) or \
shape (n_samples, n_targets, n_alphas), optional
Cross-validation values for each alpha (only available if
``store_cv_values=True`` and ``cv=None``). After ``fit()`` has been
called, this attribute will contain the mean squared errors if
`scoring is None` otherwise it will contain standardized per point
prediction values.
coef_ : ndarray of shape (n_features) or (n_targets, n_features)
Weight vector(s).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float or ndarray of shape (n_targets,)
Estimated regularization parameter, or, if ``alpha_per_target=True``,
the estimated regularization parameter for each target.
best_score_ : float or ndarray of shape (n_targets,)
Score of base estimator with best alpha, or, if
``alpha_per_target=True``, a score for each target.
.. versionadded:: 0.23
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
Ridge : Ridge regression.
RidgeClassifier : Classifier based on ridge regression on {-1, 1} labels.
RidgeClassifierCV : Ridge classifier with built-in cross validation.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import RidgeCV
>>> X, y = load_diabetes(return_X_y=True)
>>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y)
0.5166...
"""
class RidgeClassifierCV(_RidgeClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Leave-One-Out Cross-Validation. Currently,
only the n_features > n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and
will be removed in 1.2.
scoring : str, callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
store_cv_values : bool, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Leave-One-Out Cross-Validation).
Attributes
----------
cv_values_ : ndarray of shape (n_samples, n_targets, n_alphas), optional
Cross-validation values for each alpha (only if ``store_cv_values=True`` and
``cv=None``). After ``fit()`` has been called, this attribute will
contain the mean squared errors if `scoring is None` otherwise it
will contain standardized per point prediction values.
coef_ : ndarray of shape (1, n_features) or (n_targets, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
best_score_ : float
Score of base estimator with best alpha.
.. versionadded:: 0.23
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
Ridge : Ridge regression.
RidgeClassifier : Ridge classifier.
RidgeCV : Ridge regression with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifierCV
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y)
0.9630...
"""
def __init__(
self,
alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
normalize="deprecated",
scoring=None,
cv=None,
class_weight=None,
store_cv_values=False,
):
super().__init__(
alphas=alphas,
fit_intercept=fit_intercept,
normalize=normalize,
scoring=scoring,
cv=cv,
store_cv_values=store_cv_values,
)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features. When using GCV,
will be cast to float64 if necessary.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
"""
# `RidgeClassifier` does not accept "sag" or "saga" solver and thus support
# csr, csc, and coo sparse matrices. By using solver="eigen" we force to accept
# all sparse format.
X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, solver="eigen")
# If cv is None, gcv mode will be used and we used the binarized Y
# since y will not be binarized in _RidgeGCV estimator.
# If cv is not None, a GridSearchCV with some RidgeClassifier
# estimators are used where y will be binarized. Thus, we pass y
# instead of the binarized Y.
target = Y if self.cv is None else y
super().fit(X, target, sample_weight=sample_weight)
return self
def _more_tags(self):
return {
"multilabel": True,
"_xfail_checks": {
"check_sample_weights_invariance": (
"zero sample_weight is not equivalent to removing samples"
),
},
}
|
the-stack_0_12533 | # coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The standard DQN replay memory.
This implementation is an out-of-graph replay memory + in-graph wrapper. It
supports vanilla n-step updates of the form typically found in the literature,
i.e. where rewards are accumulated for n steps and the intermediate trajectory
is not exposed to the agent. This does not allow, for example, performing
off-policy corrections.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import math
import os
import pickle
import numpy as np
import tensorflow.compat.v1 as tf
import gin.tf
from tensorflow.contrib import staging as contrib_staging
# Defines a type describing part of the tuple returned by the replay
# memory. Each element of the tuple is a tensor of shape [batch, ...] where
# ... is defined the 'shape' field of ReplayElement. The tensor type is
# given by the 'type' field. The 'name' field is for convenience and ease of
# debugging.
ReplayElement = (
collections.namedtuple('shape_type', ['name', 'shape', 'type']))
# A prefix that can not collide with variable names for checkpoint files.
STORE_FILENAME_PREFIX = '$store$_'
# This constant determines how many iterations a checkpoint is kept for.
CHECKPOINT_DURATION = 4
def invalid_range(cursor, replay_capacity, stack_size, update_horizon):
"""Returns a array with the indices of cursor-related invalid transitions.
There are update_horizon + stack_size invalid indices:
- The update_horizon indices before the cursor, because we do not have a
valid N-step transition (including the next state).
- The stack_size indices on or immediately after the cursor.
If N = update_horizon, K = stack_size, and the cursor is at c, invalid
indices are:
c - N, c - N + 1, ..., c, c + 1, ..., c + K - 1.
It handles special cases in a circular buffer in the beginning and the end.
Args:
cursor: int, the position of the cursor.
replay_capacity: int, the size of the replay memory.
stack_size: int, the size of the stacks returned by the replay memory.
update_horizon: int, the agent's update horizon.
Returns:
np.array of size stack_size with the invalid indices.
"""
assert cursor < replay_capacity
return np.array(
[(cursor - update_horizon + i) % replay_capacity
for i in range(stack_size + update_horizon)])
class OutOfGraphReplayBuffer(object):
"""A simple out-of-graph Replay Buffer.
Stores transitions, state, action, reward, next_state, terminal (and any
extra contents specified) in a circular buffer and provides a uniform
transition sampling function.
When the states consist of stacks of observations storing the states is
inefficient. This class writes observations and constructs the stacked states
at sample time.
Attributes:
add_count: int, counter of how many transitions have been added (including
the blank ones at the beginning of an episode).
invalid_range: np.array, an array with the indices of cursor-related invalid
transitions
"""
def __init__(self,
observation_shape,
stack_size,
replay_capacity,
batch_size,
update_horizon=1,
gamma=0.99,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
terminal_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32):
"""Initializes OutOfGraphReplayBuffer.
Args:
observation_shape: tuple of ints.
stack_size: int, number of frames to use in state stack.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
max_sample_attempts: int, the maximum number of attempts allowed to
get a sample.
extra_storage_types: list of ReplayElements defining the type of the extra
contents that will be stored and returned by sample_transition_batch.
observation_dtype: np.dtype, type of the observations. Defaults to
np.uint8 for Atari 2600.
terminal_dtype: np.dtype, type of the terminals. Defaults to np.uint8 for
Atari 2600.
action_shape: tuple of ints, the shape for the action vector. Empty tuple
means the action is a scalar.
action_dtype: np.dtype, type of elements in the action.
reward_shape: tuple of ints, the shape of the reward vector. Empty tuple
means the reward is a scalar.
reward_dtype: np.dtype, type of elements in the reward.
Raises:
ValueError: If replay_capacity is too small to hold at least one
transition.
"""
assert isinstance(observation_shape, tuple)
if replay_capacity < update_horizon + stack_size:
raise ValueError('There is not enough capacity to cover '
'update_horizon and stack_size.')
tf.logging.info(
'Creating a %s replay memory with the following parameters:',
self.__class__.__name__)
tf.logging.info('\t observation_shape: %s', str(observation_shape))
tf.logging.info('\t observation_dtype: %s', str(observation_dtype))
tf.logging.info('\t terminal_dtype: %s', str(terminal_dtype))
tf.logging.info('\t stack_size: %d', stack_size)
tf.logging.info('\t replay_capacity: %d', replay_capacity)
tf.logging.info('\t batch_size: %d', batch_size)
tf.logging.info('\t update_horizon: %d', update_horizon)
tf.logging.info('\t gamma: %f', gamma)
self._action_shape = action_shape
self._action_dtype = action_dtype
self._reward_shape = reward_shape
self._reward_dtype = reward_dtype
self._observation_shape = observation_shape
self._stack_size = stack_size
self._state_shape = self._observation_shape + (self._stack_size,)
self._replay_capacity = replay_capacity
self._batch_size = batch_size
self._update_horizon = update_horizon
self._gamma = gamma
self._observation_dtype = observation_dtype
self._terminal_dtype = terminal_dtype
self._max_sample_attempts = max_sample_attempts
if extra_storage_types:
self._extra_storage_types = extra_storage_types
else:
self._extra_storage_types = []
self._create_storage()
self.add_count = np.array(0)
self.invalid_range = np.zeros((self._stack_size))
# When the horizon is > 1, we compute the sum of discounted rewards as a dot
# product using the precomputed vector <gamma^0, gamma^1, ..., gamma^{n-1}>.
self._cumulative_discount_vector = np.array(
[math.pow(self._gamma, n) for n in range(update_horizon)],
dtype=np.float32)
def _create_storage(self):
"""Creates the numpy arrays used to store transitions.
"""
self._store = {}
for storage_element in self.get_storage_signature():
array_shape = [self._replay_capacity] + list(storage_element.shape)
self._store[storage_element.name] = np.empty(
array_shape, dtype=storage_element.type)
def get_add_args_signature(self):
"""The signature of the add function.
Note - Derived classes may return a different signature.
Returns:
list of ReplayElements defining the type of the argument signature needed
by the add function.
"""
return self.get_storage_signature()
def get_storage_signature(self):
"""Returns a default list of elements to be stored in this replay memory.
Note - Derived classes may return a different signature.
Returns:
list of ReplayElements defining the type of the contents stored.
"""
storage_elements = [
ReplayElement('observation', self._observation_shape,
self._observation_dtype),
ReplayElement('action', self._action_shape, self._action_dtype),
ReplayElement('reward', self._reward_shape, self._reward_dtype),
ReplayElement('terminal', (), self._terminal_dtype)
]
for extra_replay_element in self._extra_storage_types:
storage_elements.append(extra_replay_element)
return storage_elements
def _add_zero_transition(self):
"""Adds a padding transition filled with zeros (Used in episode beginnings).
"""
zero_transition = []
for element_type in self.get_add_args_signature():
zero_transition.append(
np.zeros(element_type.shape, dtype=element_type.type))
self._add(*zero_transition)
def add(self, observation, action, reward, terminal, *args):
"""Adds a transition to the replay memory.
This function checks the types and handles the padding at the beginning of
an episode. Then it calls the _add function.
Since the next_observation in the transition will be the observation added
next there is no need to pass it.
If the replay memory is at capacity the oldest transition will be discarded.
Args:
observation: np.array with shape observation_shape.
action: int, the action in the transition.
reward: float, the reward received in the transition.
terminal: np.dtype, acts as a boolean indicating whether the transition
was terminal (1) or not (0).
*args: extra contents with shapes and dtypes according to
extra_storage_types.
"""
self._check_add_types(observation, action, reward, terminal, *args)
if self.is_empty() or self._store['terminal'][self.cursor() - 1] == 1:
for _ in range(self._stack_size - 1):
# Child classes can rely on the padding transitions being filled with
# zeros. This is useful when there is a priority argument.
self._add_zero_transition()
self._add(observation, action, reward, terminal, *args)
def _add(self, *args):
"""Internal add method to add to the storage arrays.
Args:
*args: All the elements in a transition.
"""
self._check_args_length(*args)
transition = {e.name: args[idx]
for idx, e in enumerate(self.get_add_args_signature())}
self._add_transition(transition)
def _add_transition(self, transition):
"""Internal add method to add transition dictionary to storage arrays.
Args:
transition: The dictionary of names and values of the transition
to add to the storage.
"""
cursor = self.cursor()
for arg_name in transition:
self._store[arg_name][cursor] = transition[arg_name]
self.add_count += 1
self.invalid_range = invalid_range(
self.cursor(), self._replay_capacity, self._stack_size,
self._update_horizon)
def _check_args_length(self, *args):
"""Check if args passed to the add method have the same length as storage.
Args:
*args: Args for elements used in storage.
Raises:
ValueError: If args have wrong length.
"""
if len(args) != len(self.get_add_args_signature()):
raise ValueError('Add expects {} elements, received {}'.format(
len(self.get_add_args_signature()), len(args)))
def _check_add_types(self, *args):
"""Checks if args passed to the add method match those of the storage.
Args:
*args: Args whose types need to be validated.
Raises:
ValueError: If args have wrong shape or dtype.
"""
self._check_args_length(*args)
for arg_element, store_element in zip(args, self.get_add_args_signature()):
if isinstance(arg_element, np.ndarray):
arg_shape = arg_element.shape
elif isinstance(arg_element, tuple) or isinstance(arg_element, list):
# TODO(b/80536437). This is not efficient when arg_element is a list.
arg_shape = np.array(arg_element).shape
else:
# Assume it is scalar.
arg_shape = tuple()
store_element_shape = tuple(store_element.shape)
if arg_shape != store_element_shape:
raise ValueError('arg has shape {}, expected {}'.format(
arg_shape, store_element_shape))
def is_empty(self):
"""Is the Replay Buffer empty?"""
return self.add_count == 0
def is_full(self):
"""Is the Replay Buffer full?"""
return self.add_count >= self._replay_capacity
def cursor(self):
"""Index to the location where the next transition will be written."""
return self.add_count % self._replay_capacity
def get_range(self, array, start_index, end_index):
"""Returns the range of array at the index handling wraparound if necessary.
Args:
array: np.array, the array to get the stack from.
start_index: int, index to the start of the range to be returned. Range
will wraparound if start_index is smaller than 0.
end_index: int, exclusive end index. Range will wraparound if end_index
exceeds replay_capacity.
Returns:
np.array, with shape [end_index - start_index, array.shape[1:]].
"""
assert end_index > start_index, 'end_index must be larger than start_index'
assert end_index >= 0
assert start_index < self._replay_capacity
if not self.is_full():
assert end_index <= self.cursor(), (
'Index {} has not been added.'.format(start_index))
# Fast slice read when there is no wraparound.
if start_index % self._replay_capacity < end_index % self._replay_capacity:
return_array = array[start_index:end_index, ...]
# Slow list read.
else:
indices = [(start_index + i) % self._replay_capacity
for i in range(end_index - start_index)]
return_array = array[indices, ...]
return return_array
def get_observation_stack(self, index):
return self._get_element_stack(index, 'observation')
def _get_element_stack(self, index, element_name):
state = self.get_range(self._store[element_name],
index - self._stack_size + 1, index + 1)
# The stacking axis is 0 but the agent expects as the last axis.
return np.moveaxis(state, 0, -1)
def get_terminal_stack(self, index):
return self.get_range(self._store['terminal'], index - self._stack_size + 1,
index + 1)
def is_valid_transition(self, index):
"""Checks if the index contains a valid transition.
Checks for collisions with the end of episodes and the current position
of the cursor.
Args:
index: int, the index to the state in the transition.
Returns:
Is the index valid: Boolean.
"""
# Check the index is in the valid range
if index < 0 or index >= self._replay_capacity:
return False
if not self.is_full():
# The indices and next_indices must be smaller than the cursor.
if index >= self.cursor() - self._update_horizon:
return False
# The first few indices contain the padding states of the first episode.
if index < self._stack_size - 1:
return False
# Skip transitions that straddle the cursor.
if index in set(self.invalid_range):
return False
# If there are terminal flags in any other frame other than the last one
# the stack is not valid, so don't sample it.
if self.get_terminal_stack(index)[:-1].any():
return False
return True
def _create_batch_arrays(self, batch_size):
"""Create a tuple of arrays with the type of get_transition_elements.
When using the WrappedReplayBuffer with staging enabled it is important to
create new arrays every sample because StaginArea keeps a pointer to the
returned arrays.
Args:
batch_size: (int) number of transitions returned. If None the default
batch_size will be used.
Returns:
Tuple of np.arrays with the shape and type of get_transition_elements.
"""
transition_elements = self.get_transition_elements(batch_size)
batch_arrays = []
for element in transition_elements:
batch_arrays.append(np.empty(element.shape, dtype=element.type))
return tuple(batch_arrays)
def sample_index_batch(self, batch_size):
"""Returns a batch of valid indices sampled uniformly.
Args:
batch_size: int, number of indices returned.
Returns:
list of ints, a batch of valid indices sampled uniformly.
Raises:
RuntimeError: If the batch was not constructed after maximum number of
tries.
"""
if self.is_full():
# add_count >= self._replay_capacity > self._stack_size
min_id = self.cursor() - self._replay_capacity + self._stack_size - 1
max_id = self.cursor() - self._update_horizon
else:
# add_count < self._replay_capacity
min_id = self._stack_size - 1
max_id = self.cursor() - self._update_horizon
if max_id <= min_id:
raise RuntimeError('Cannot sample a batch with fewer than stack size '
'({}) + update_horizon ({}) transitions.'.
format(self._stack_size, self._update_horizon))
indices = []
attempt_count = 0
while (len(indices) < batch_size and
attempt_count < self._max_sample_attempts):
index = np.random.randint(min_id, max_id) % self._replay_capacity
if self.is_valid_transition(index):
indices.append(index)
else:
attempt_count += 1
if len(indices) != batch_size:
raise RuntimeError(
'Max sample attempts: Tried {} times but only sampled {}'
' valid indices. Batch size is {}'.
format(self._max_sample_attempts, len(indices), batch_size))
return indices
def sample_transition_batch(self, batch_size=None, indices=None):
"""Returns a batch of transitions (including any extra contents).
If get_transition_elements has been overridden and defines elements not
stored in self._store, an empty array will be returned and it will be
left to the child class to fill it. For example, for the child class
OutOfGraphPrioritizedReplayBuffer, the contents of the
sampling_probabilities are stored separately in a sum tree.
When the transition is terminal next_state_batch has undefined contents.
NOTE: This transition contains the indices of the sampled elements. These
are only valid during the call to sample_transition_batch, i.e. they may
be used by subclasses of this replay buffer but may point to different data
as soon as sampling is done.
Args:
batch_size: int, number of transitions returned. If None, the default
batch_size will be used.
indices: None or list of ints, the indices of every transition in the
batch. If None, sample the indices uniformly.
Returns:
transition_batch: tuple of np.arrays with the shape and type as in
get_transition_elements().
Raises:
ValueError: If an element to be sampled is missing from the replay buffer.
"""
if batch_size is None:
batch_size = self._batch_size
if indices is None:
indices = self.sample_index_batch(batch_size)
assert len(indices) == batch_size
transition_elements = self.get_transition_elements(batch_size)
batch_arrays = self._create_batch_arrays(batch_size)
for batch_element, state_index in enumerate(indices):
trajectory_indices = [(state_index + j) % self._replay_capacity
for j in range(self._update_horizon)]
trajectory_terminals = self._store['terminal'][trajectory_indices]
is_terminal_transition = trajectory_terminals.any()
if not is_terminal_transition:
trajectory_length = self._update_horizon
else:
# np.argmax of a bool array returns the index of the first True.
trajectory_length = np.argmax(trajectory_terminals.astype(np.bool),
0) + 1
next_state_index = state_index + trajectory_length
trajectory_discount_vector = (
self._cumulative_discount_vector[:trajectory_length])
trajectory_rewards = self.get_range(self._store['reward'], state_index,
next_state_index)
# Fill the contents of each array in the sampled batch.
assert len(transition_elements) == len(batch_arrays)
for element_array, element in zip(batch_arrays, transition_elements):
if element.name == 'state':
element_array[batch_element] = self.get_observation_stack(state_index)
elif element.name == 'reward':
# compute the discounted sum of rewards in the trajectory.
element_array[batch_element] = np.sum(
trajectory_discount_vector * trajectory_rewards, axis=0)
elif element.name == 'next_state':
element_array[batch_element] = self.get_observation_stack(
(next_state_index) % self._replay_capacity)
elif element.name in ('next_action', 'next_reward'):
element_array[batch_element] = (
self._store[element.name.lstrip('next_')][(next_state_index) %
self._replay_capacity])
elif element.name == 'terminal':
element_array[batch_element] = is_terminal_transition
elif element.name == 'indices':
element_array[batch_element] = state_index
elif element.name in self._store.keys():
element_array[batch_element] = (
self._store[element.name][state_index])
# We assume the other elements are filled in by the subclass.
return batch_arrays
def get_transition_elements(self, batch_size=None):
"""Returns a 'type signature' for sample_transition_batch.
Args:
batch_size: int, number of transitions returned. If None, the default
batch_size will be used.
Returns:
signature: A namedtuple describing the method's return type signature.
"""
batch_size = self._batch_size if batch_size is None else batch_size
transition_elements = [
ReplayElement('state', (batch_size,) + self._state_shape,
self._observation_dtype),
ReplayElement('action', (batch_size,) + self._action_shape,
self._action_dtype),
ReplayElement('reward', (batch_size,) + self._reward_shape,
self._reward_dtype),
ReplayElement('next_state', (batch_size,) + self._state_shape,
self._observation_dtype),
ReplayElement('next_action', (batch_size,) + self._action_shape,
self._action_dtype),
ReplayElement('next_reward', (batch_size,) + self._reward_shape,
self._reward_dtype),
ReplayElement('terminal', (batch_size,), self._terminal_dtype),
ReplayElement('indices', (batch_size,), np.int32)
]
for element in self._extra_storage_types:
transition_elements.append(
ReplayElement(element.name, (batch_size,) + tuple(element.shape),
element.type))
return transition_elements
def _generate_filename(self, checkpoint_dir, name, suffix):
return os.path.join(checkpoint_dir, '{}_ckpt.{}.gz'.format(name, suffix))
def _return_checkpointable_elements(self):
"""Return the dict of elements of the class for checkpointing.
Returns:
checkpointable_elements: dict containing all non private (starting with
_) members + all the arrays inside self._store.
"""
checkpointable_elements = {}
for member_name, member in self.__dict__.items():
if member_name == '_store':
for array_name, array in self._store.items():
checkpointable_elements[STORE_FILENAME_PREFIX + array_name] = array
elif not member_name.startswith('_'):
checkpointable_elements[member_name] = member
return checkpointable_elements
def save(self, checkpoint_dir, iteration_number):
"""Save the OutOfGraphReplayBuffer attributes into a file.
This method will save all the replay buffer's state in a single file.
Args:
checkpoint_dir: str, the directory where numpy checkpoint files should be
saved.
iteration_number: int, iteration_number to use as a suffix in naming
numpy checkpoint files.
"""
if not tf.gfile.Exists(checkpoint_dir):
return
checkpointable_elements = self._return_checkpointable_elements()
for attr in checkpointable_elements:
filename = self._generate_filename(checkpoint_dir, attr, iteration_number)
with tf.gfile.Open(filename, 'wb') as f:
with gzip.GzipFile(fileobj=f) as outfile:
# Checkpoint the np arrays in self._store with np.save instead of
# pickling the dictionary is critical for file size and performance.
# STORE_FILENAME_PREFIX indicates that the variable is contained in
# self._store.
if attr.startswith(STORE_FILENAME_PREFIX):
array_name = attr[len(STORE_FILENAME_PREFIX):]
np.save(outfile, self._store[array_name], allow_pickle=False)
# Some numpy arrays might not be part of storage
elif isinstance(self.__dict__[attr], np.ndarray):
np.save(outfile, self.__dict__[attr], allow_pickle=False)
else:
pickle.dump(self.__dict__[attr], outfile)
# After writing a checkpoint file, we garbage collect the checkpoint file
# that is four versions old.
stale_iteration_number = iteration_number - CHECKPOINT_DURATION
if stale_iteration_number >= 0:
stale_filename = self._generate_filename(checkpoint_dir, attr,
stale_iteration_number)
try:
tf.gfile.Remove(stale_filename)
except tf.errors.NotFoundError:
pass
def load(self, checkpoint_dir, suffix):
"""Restores the object from bundle_dictionary and numpy checkpoints.
Args:
checkpoint_dir: str, the directory where to read the numpy checkpointed
files from.
suffix: str, the suffix to use in numpy checkpoint files.
Raises:
NotFoundError: If not all expected files are found in directory.
"""
save_elements = self._return_checkpointable_elements()
# We will first make sure we have all the necessary files available to avoid
# loading a partially-specified (i.e. corrupted) replay buffer.
for attr in save_elements:
filename = self._generate_filename(checkpoint_dir, attr, suffix)
if not tf.gfile.Exists(filename):
raise tf.errors.NotFoundError(None, None,
'Missing file: {}'.format(filename))
# If we've reached this point then we have verified that all expected files
# are available.
for attr in save_elements:
filename = self._generate_filename(checkpoint_dir, attr, suffix)
with tf.gfile.Open(filename, 'rb') as f:
with gzip.GzipFile(fileobj=f) as infile:
if attr.startswith(STORE_FILENAME_PREFIX):
array_name = attr[len(STORE_FILENAME_PREFIX):]
self._store[array_name] = np.load(infile, allow_pickle=False)
elif isinstance(self.__dict__[attr], np.ndarray):
self.__dict__[attr] = np.load(infile, allow_pickle=False)
else:
self.__dict__[attr] = pickle.load(infile)
@gin.configurable(blacklist=['observation_shape', 'stack_size',
'update_horizon', 'gamma'])
class WrappedReplayBuffer(object):
"""Wrapper of OutOfGraphReplayBuffer with an in graph sampling mechanism.
Usage:
To add a transition: call the add function.
To sample a batch: Construct operations that depend on any of the
tensors is the transition dictionary. Every sess.run
that requires any of these tensors will sample a new
transition.
"""
def __init__(self,
observation_shape,
stack_size,
use_staging=True,
replay_capacity=1000000,
batch_size=32,
update_horizon=1,
gamma=0.99,
wrapped_memory=None,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
terminal_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32):
"""Initializes WrappedReplayBuffer.
Args:
observation_shape: tuple of ints.
stack_size: int, number of frames to use in state stack.
use_staging: bool, when True it would use a staging area to prefetch
the next sampling batch.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
wrapped_memory: The 'inner' memory data structure. If None,
it creates the standard DQN replay memory.
max_sample_attempts: int, the maximum number of attempts allowed to
get a sample.
extra_storage_types: list of ReplayElements defining the type of the extra
contents that will be stored and returned by sample_transition_batch.
observation_dtype: np.dtype, type of the observations. Defaults to
np.uint8 for Atari 2600.
terminal_dtype: np.dtype, type of the terminals. Defaults to np.uint8 for
Atari 2600.
action_shape: tuple of ints, the shape for the action vector. Empty tuple
means the action is a scalar.
action_dtype: np.dtype, type of elements in the action.
reward_shape: tuple of ints, the shape of the reward vector. Empty tuple
means the reward is a scalar.
reward_dtype: np.dtype, type of elements in the reward.
Raises:
ValueError: If update_horizon is not positive.
ValueError: If discount factor is not in [0, 1].
"""
if replay_capacity < update_horizon + 1:
raise ValueError(
'Update horizon ({}) should be significantly smaller '
'than replay capacity ({}).'.format(update_horizon, replay_capacity))
if not update_horizon >= 1:
raise ValueError('Update horizon must be positive.')
if not 0.0 <= gamma <= 1.0:
raise ValueError('Discount factor (gamma) must be in [0, 1].')
self.batch_size = batch_size
# Mainly used to allow subclasses to pass self.memory.
if wrapped_memory is not None:
self.memory = wrapped_memory
else:
self.memory = OutOfGraphReplayBuffer(
observation_shape,
stack_size,
replay_capacity,
batch_size,
update_horizon,
gamma,
max_sample_attempts,
observation_dtype=observation_dtype,
terminal_dtype=terminal_dtype,
extra_storage_types=extra_storage_types,
action_shape=action_shape,
action_dtype=action_dtype,
reward_shape=reward_shape,
reward_dtype=reward_dtype)
self.create_sampling_ops(use_staging)
def add(self, observation, action, reward, terminal, *args):
"""Adds a transition to the replay memory.
Since the next_observation in the transition will be the observation added
next there is no need to pass it.
If the replay memory is at capacity the oldest transition will be discarded.
Args:
observation: np.array with shape observation_shape.
action: int, the action in the transition.
reward: float, the reward received in the transition.
terminal: np.dtype, acts as a boolean indicating whether the transition
was terminal (1) or not (0).
*args: extra contents with shapes and dtypes according to
extra_storage_types.
"""
self.memory.add(observation, action, reward, terminal, *args)
def create_sampling_ops(self, use_staging):
"""Creates the ops necessary to sample from the replay buffer.
Creates the transition dictionary containing the sampling tensors.
Args:
use_staging: bool, when True it would use a staging area to prefetch
the next sampling batch.
"""
with tf.name_scope('sample_replay'):
with tf.device('/cpu:*'):
transition_type = self.memory.get_transition_elements()
transition_tensors = tf.py_func(
self.memory.sample_transition_batch, [],
[return_entry.type for return_entry in transition_type],
name='replay_sample_py_func')
self._set_transition_shape(transition_tensors, transition_type)
if use_staging:
transition_tensors = self._set_up_staging(transition_tensors)
self._set_transition_shape(transition_tensors, transition_type)
# Unpack sample transition into member variables.
self.unpack_transition(transition_tensors, transition_type)
def _set_transition_shape(self, transition, transition_type):
"""Set shape for each element in the transition.
Args:
transition: tuple of tf.Tensors.
transition_type: tuple of ReplayElements descriving the shapes of the
respective tensors.
"""
for element, element_type in zip(transition, transition_type):
element.set_shape(element_type.shape)
def _set_up_staging(self, transition):
"""Sets up staging ops for prefetching the next transition.
This allows us to hide the py_func latency. To do so we use a staging area
to pre-fetch the next batch of transitions.
Args:
transition: tuple of tf.Tensors with shape
memory.get_transition_elements().
Returns:
prefetched_transition: tuple of tf.Tensors with shape
memory.get_transition_elements() that have been previously prefetched.
"""
transition_type = self.memory.get_transition_elements()
# Create the staging area in CPU.
prefetch_area = contrib_staging.StagingArea(
[shape_with_type.type for shape_with_type in transition_type])
# Store prefetch op for tests, but keep it private -- users should not be
# calling _prefetch_batch.
self._prefetch_batch = prefetch_area.put(transition)
initial_prefetch = tf.cond(
tf.equal(prefetch_area.size(), 0),
lambda: prefetch_area.put(transition), tf.no_op)
# Every time a transition is sampled self.prefetch_batch will be
# called. If the staging area is empty, two put ops will be called.
with tf.control_dependencies([self._prefetch_batch, initial_prefetch]):
prefetched_transition = prefetch_area.get()
return prefetched_transition
def unpack_transition(self, transition_tensors, transition_type):
"""Unpacks the given transition into member variables.
Args:
transition_tensors: tuple of tf.Tensors.
transition_type: tuple of ReplayElements matching transition_tensors.
"""
self.transition = collections.OrderedDict()
for element, element_type in zip(transition_tensors, transition_type):
self.transition[element_type.name] = element
# TODO(bellemare): These are legacy and should probably be removed in
# future versions.
self.states = self.transition['state']
self.actions = self.transition['action']
self.rewards = self.transition['reward']
self.next_states = self.transition['next_state']
self.next_actions = self.transition['next_action']
self.next_rewards = self.transition['next_reward']
self.terminals = self.transition['terminal']
self.indices = self.transition['indices']
def save(self, checkpoint_dir, iteration_number):
"""Save the underlying replay buffer's contents in a file.
Args:
checkpoint_dir: str, the directory where to read the numpy checkpointed
files from.
iteration_number: int, the iteration_number to use as a suffix in naming
numpy checkpoint files.
"""
self.memory.save(checkpoint_dir, iteration_number)
def load(self, checkpoint_dir, suffix):
"""Loads the replay buffer's state from a saved file.
Args:
checkpoint_dir: str, the directory where to read the numpy checkpointed
files from.
suffix: str, the suffix to use in numpy checkpoint files.
"""
self.memory.load(checkpoint_dir, suffix)
|
the-stack_0_12534 | import numpy as np
from grabscreen import grab_screen
import cv2
import time
from getkeys import key_check
import os
def keys_to_output(keys):
'''
Convert keys to a ...multi-hot... array
[A,W,D] boolean values.
'''
output = [0, 0, 0]
if 'A' in keys:
output[0] = 1
elif 'D' in keys:
output[2] = 1
else:
output[1] = 1
return output
datafile_name = 'training_data-'
datafile_no = 1
extension = '.npy'
training_data = []
cwd = os.getcwd()
for file_name in os.listdir(cwd):
if file_name == (datafile_name + str(datafile_no) + extension):
print('{} exists, loading previous data!', file_name)
datafile_no += 1
training_data.append(np.load(file_name))
if __name__ == "__main__":
for i in list(range(4))[::-1]:
print(i + 1)
time.sleep(1)
paused = False
while True:
if not paused:
# 800x600 windowed mode
screen = grab_screen(region=(0, 40, 800, 640))
last_time = time.time()
screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
keys = key_check()
output = keys_to_output(keys)
cv2.imshow('screen', screen)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
# resize to something a bit more acceptable for a CNN
input_screen = cv2.resize(screen, (160, 120))
training_data.append([input_screen, output])
if len(training_data) % 1000 == 0:
print(len(training_data))
np.save(datafile_name + str(datafile_no), training_data)
datafile_no += 1
keys = key_check()
if 'T' in keys:
if paused:
paused = False
print('Resume!')
time.sleep(1)
else:
paused = True
print('Paused!')
time.sleep(1)
|
the-stack_0_12535 | import json
import asyncio
import websockets
from websockets.http import Headers
import asyncclick as click
import pprint
from .console import console
from .settings import get_settings
import pkg_resources
class WebsocketClient(object):
def __init__(self, host: str, token: str, proto: str, api_version: str = "v3.0"):
self.host = host
self.proto = proto
self.api_version = api_version
self.websocket = None
self.do_listen = True
self.token = token
self.queue = asyncio.Queue()
self._connected = False
@property
def user_agent(self):
return {
"user-agent":
f"threedi-ws-client/{pkg_resources.get_distribution('threedi_ws_client').version}"
}
def get_queue(self):
return self.queue
async def is_connected(self):
while self._connected is False:
await asyncio.sleep(0.5)
async def listen(self, endpoint_uri: str):
uri = f"{self.proto}://{self.host}/{self.api_version}/{endpoint_uri}"
console.print(f"Trying to connect to {uri} now...")
headers = Headers(authorization=f"{self.token}")
headers.update(**self.user_agent)
sim_time: Optional[int] = None
async with websockets.connect(uri, extra_headers=headers) as websocket:
console.print(f"Connected to {uri}")
self._connected = True
async for message in websocket:
try:
message = json.loads(message)
content = message["data"]
try:
sim_time = content["time"]
except (KeyError, TypeError):
pass
if sim_time is not None:
message["sim_time"] = sim_time
await self.queue.put(message)
except websockets.exceptions.ConnectionClosedOK:
self.do_listen = False
console.print("Websocket connection closed")
async def close(self):
self.do_listen = False
if self.websocket:
await self.websocket.close()
|
the-stack_0_12536 | #
# Copyright (C) 2016 UAVCAN Development Team <uavcan.org>
#
# This software is distributed under the terms of the MIT License.
#
# Author: Pavel Kirienko <[email protected]>
#
import time
import logging
from functools import partial
from PyQt5.QtWidgets import QMainWindow, QWidget, QVBoxLayout, QAction
from PyQt5.QtCore import QTimer, Qt
from PyQt5.QtGui import QKeySequence
from .. import get_app_icon, get_icon
from .plot_areas import PLOT_AREAS
from .plot_container import PlotContainerWidget
logger = logging.getLogger(__name__)
class PlotterWindow(QMainWindow):
def __init__(self, get_transfer_callback):
super(PlotterWindow, self).__init__()
self.setWindowTitle('UAVCAN Plotter')
self.setWindowIcon(get_app_icon())
self._active_data_types = set()
self._get_transfer = get_transfer_callback
self._update_timer = QTimer(self)
self._update_timer.setSingleShot(False)
self._update_timer.timeout.connect(self._update)
self._update_timer.start(50)
self._base_time = time.monotonic()
self._plot_containers = []
#
# Control menu
#
control_menu = self.menuBar().addMenu('&Control')
self._stop_action = QAction(get_icon('stop'), '&Stop Updates', self)
self._stop_action.setStatusTip('While stopped, all new data will be discarded')
self._stop_action.setShortcut(QKeySequence('Ctrl+Shift+S'))
self._stop_action.setCheckable(True)
self._stop_action.toggled.connect(self._on_stop_toggled)
control_menu.addAction(self._stop_action)
self._pause_action = QAction(get_icon('pause'), '&Pause Updates', self)
self._pause_action.setStatusTip('While paused, new data will be accumulated in memory '
'to be processed once un-paused')
self._pause_action.setShortcut(QKeySequence('Ctrl+Shift+P'))
self._pause_action.setCheckable(True)
self._pause_action.toggled.connect(self._on_pause_toggled)
control_menu.addAction(self._pause_action)
control_menu.addSeparator()
self._reset_time_action = QAction(get_icon('history'), '&Reset', self)
self._reset_time_action.setStatusTip('Base time will be reset; all plots will be reset')
self._reset_time_action.setShortcut(QKeySequence('Ctrl+Shift+R'))
self._reset_time_action.triggered.connect(self._do_reset)
control_menu.addAction(self._reset_time_action)
#
# New Plot menu
#
plot_menu = self.menuBar().addMenu('&New Plot')
for idx, pl_name in enumerate(PLOT_AREAS.keys()):
new_plot_action = QAction('Add ' + pl_name, self)
new_plot_action.setStatusTip('Add new plot window')
new_plot_action.setShortcut(QKeySequence('Ctrl+Alt+' + str(idx)))
new_plot_action.triggered.connect(partial(self._do_add_new_plot, pl_name))
plot_menu.addAction(new_plot_action)
#
# Window stuff
#
self.statusBar().showMessage('Use the "New Plot" menu to add plots')
self.setCentralWidget(None)
self.resize(600, 400)
def _on_stop_toggled(self, checked):
self._pause_action.setChecked(False)
self.statusBar().showMessage('Stopped' if checked else 'Un-stopped')
def _on_pause_toggled(self, checked):
self.statusBar().showMessage('Paused' if checked else 'Un-paused')
def _do_add_new_plot(self, plot_area_name):
def remove():
self._plot_containers.remove(plc)
plc = PlotContainerWidget(self, PLOT_AREAS[plot_area_name], self._active_data_types)
plc.on_close = remove
self._plot_containers.append(plc)
docks = [
Qt.LeftDockWidgetArea,
Qt.LeftDockWidgetArea,
Qt.RightDockWidgetArea,
Qt.RightDockWidgetArea,
]
dock_to = docks[(len(self._plot_containers) - 1) % len(docks)]
self.addDockWidget(dock_to, plc)
if len(self._plot_containers) > 1:
self.statusBar().showMessage('Drag plots by the header to rearrange or detach them')
def _do_reset(self):
self._base_time = time.monotonic()
for plc in self._plot_containers:
try:
plc.reset()
except Exception:
logger.error('Failed to reset plot container', exc_info=True)
logger.info('Reset done, new time base %r', self._base_time)
def _update(self):
if self._stop_action.isChecked():
while self._get_transfer() is not None: # Discarding everything
pass
return
if not self._pause_action.isChecked():
while True:
tr = self._get_transfer()
if not tr:
break
self._active_data_types.add(tr.data_type_name)
for plc in self._plot_containers:
try:
plc.process_transfer(tr.ts_mono - self._base_time, tr)
except Exception:
logger.error('Plot container failed to process a transfer', exc_info=True)
for plc in self._plot_containers:
try:
plc.update()
except Exception:
logger.error('Plot container failed to update', exc_info=True)
|
the-stack_0_12537 | import datetime
import os
import shutil
import sys
import tempfile
from invoke import task
from invoke.exceptions import Exit
from .build_tags import get_default_build_tags
from .utils import (
REPO_PATH,
bin_name,
get_build_flags,
get_git_branch_name,
get_git_commit,
get_go_version,
get_version,
get_version_numeric_only,
)
BIN_DIR = os.path.join(".", "bin", "process-agent")
BIN_PATH = os.path.join(BIN_DIR, bin_name("process-agent", android=False))
GIMME_ENV_VARS = ['GOROOT', 'PATH']
@task
def build(
ctx,
race=False,
go_version=None,
incremental_build=False,
major_version='7',
python_runtimes='3',
arch="x64",
go_mod="vendor",
):
"""
Build the process agent
"""
ldflags, gcflags, env = get_build_flags(
ctx, arch=arch, major_version=major_version, python_runtimes=python_runtimes
)
# generate windows resources
if sys.platform == 'win32':
windres_target = "pe-x86-64"
if arch == "x86":
env["GOARCH"] = "386"
windres_target = "pe-i386"
ver = get_version_numeric_only(ctx, env, major_version=major_version)
maj_ver, min_ver, patch_ver = ver.split(".")
resdir = os.path.join(".", "cmd", "process-agent", "windows_resources")
ctx.run(
"windmc --target {target_arch} -r {resdir} {resdir}/process-agent-msg.mc".format(
resdir=resdir, target_arch=windres_target
)
)
ctx.run(
"windres --define MAJ_VER={maj_ver} --define MIN_VER={min_ver} --define PATCH_VER={patch_ver} -i cmd/process-agent/windows_resources/process-agent.rc --target {target_arch} -O coff -o cmd/process-agent/rsrc.syso".format(
maj_ver=maj_ver, min_ver=min_ver, patch_ver=patch_ver, target_arch=windres_target
)
)
# TODO use pkg/version for this
main = "main."
ld_vars = {
"Version": get_version(ctx, major_version=major_version),
"GoVersion": get_go_version(),
"GitBranch": get_git_branch_name(),
"GitCommit": get_git_commit(),
"BuildDate": datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
}
goenv = {}
if go_version:
lines = ctx.run("gimme {version}".format(version=go_version)).stdout.split("\n")
for line in lines:
for env_var in GIMME_ENV_VARS:
if env_var in line:
goenv[env_var] = line[line.find(env_var) + len(env_var) + 1 : -1].strip('\'\"')
ld_vars["GoVersion"] = go_version
# extend PATH from gimme with the one from get_build_flags
if "PATH" in os.environ and "PATH" in goenv:
goenv["PATH"] += ":" + os.environ["PATH"]
env.update(goenv)
ldflags += ' '.join(["-X '{name}={value}'".format(name=main + key, value=value) for key, value in ld_vars.items()])
build_tags = get_default_build_tags(build="process-agent", arch=arch)
## secrets is not supported on windows because the process agent still runs as
## root. No matter what `get_default_build_tags()` returns, take secrets out.
if sys.platform == 'win32' and "secrets" in build_tags:
build_tags.remove("secrets")
# TODO static option
cmd = 'go build -mod={go_mod} {race_opt} {build_type} -tags "{go_build_tags}" '
cmd += '-o {agent_bin} -gcflags="{gcflags}" -ldflags="{ldflags}" {REPO_PATH}/cmd/process-agent'
args = {
"go_mod": go_mod,
"race_opt": "-race" if race else "",
"build_type": "" if incremental_build else "-a",
"go_build_tags": " ".join(build_tags),
"agent_bin": BIN_PATH,
"gcflags": gcflags,
"ldflags": ldflags,
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args), env=env)
@task
def build_dev_image(ctx, image=None, push=False, base_image="datadog/agent:latest"):
"""
Build a dev image of the process-agent based off an existing datadog-agent image
image: the image name used to tag the image
push: if true, run a docker push on the image
base_image: base the docker image off this already build image (default: datadog/agent:latest)
"""
if image is None:
raise Exit(message="image was not specified")
with TempDir() as docker_context:
ctx.run("cp tools/ebpf/Dockerfiles/Dockerfile-process-agent-dev {to}".format(to=docker_context + "/Dockerfile"))
ctx.run("cp bin/process-agent/process-agent {to}".format(to=docker_context + "/process-agent"))
ctx.run("cp bin/system-probe/system-probe {to}".format(to=docker_context + "/system-probe"))
ctx.run("cp pkg/ebpf/bytecode/build/*.o {to}".format(to=docker_context))
with ctx.cd(docker_context):
ctx.run(
"docker build --tag {image} --build-arg AGENT_BASE={base_image} .".format(
image=image, base_image=base_image
)
)
if push:
ctx.run("docker push {image}".format(image=image))
class TempDir:
def __enter__(self):
self.fname = tempfile.mkdtemp()
print("created tempdir: {name}".format(name=self.fname))
return self.fname
def __exit__(self, exception_type, exception_value, traceback):
print("deleting tempdir: {name}".format(name=self.fname))
shutil.rmtree(self.fname)
|
the-stack_0_12539 | #!/usr/bin/env python3
"""
This module is used to dump out the SQL/DDL that was used to create any object
(of any type).
End users typically will not use this module directly. Instead, they will use
wrapper scripts that utilize this module. These include
- yb_ddl_table.py
- yb_ddl_view.py
- yb_ddl_sequence.py
"""
import os
import re
import sys
import copy
# fix for deepcopy in python 2.7
copy._deepcopy_dispatch[type(re.compile(''))] = lambda r, _: r
from yb_common import Common, Text, Util
from yb_get_table_names import get_table_names
from yb_get_view_names import get_view_names
from yb_get_sequence_names import get_sequence_names
class ddl_object(Util):
"""Issue the command used to dump out the SQL/DDL that was used to create a
given object.
"""
stored_proc_describe_query = """WITH
stored_proc_describe AS (
SELECT
ROW_NUMBER() OVER (ORDER BY LOWER(n.nspname), LOWER(p.proname)) AS ordinal
, n.nspname AS schema
, p.proname AS stored_proc
, pg_catalog.pg_get_userbyid(p.proowner) AS owner
, pg_catalog.pg_get_functiondef(p.oid) AS raw_ddl
, CASE
WHEN p.proisagg THEN 'agg'
WHEN p.proiswindow THEN 'window'
WHEN p.prosp THEN 'stored procedure'
WHEN p.prorettype = 'pg_catalog.trigger'::pg_catalog.regtype THEN 'trigger'
ELSE 'normal'
END AS type
, '-- Schema: ' || schema
|| CHR(10) || 'CREATE PROCEDURE '
|| stored_proc || REPLACE(REGEXP_REPLACE(raw_ddl, '[^(]*', ''), '$function$', '$CODE$') AS ddl
FROM
{database}.pg_catalog.pg_proc AS p
LEFT JOIN {database}.pg_catalog.pg_namespace AS n
ON n.oid = p.pronamespace
WHERE
n.nspname NOT IN ('sys', 'pg_catalog', 'information_schema')
AND type = 'stored procedure'
AND {filter_clause}
)
SELECT
DECODE(ordinal, 1, '', ', ')
|| '{{' || '"ordinal": ' || ordinal::VARCHAR || ''
|| ',"owner":""\" ' || owner || ' ""\"'
|| ',"database":""\" ' || '{database}' || ' ""\"'
|| ',"schema":""\" ' || schema || ' ""\"'
|| ',"stored_proc":""\" ' || stored_proc || ' ""\"'
|| ',"ddl":""\" ' || ddl || ' ""\"' || '}}' AS data
FROM
stored_proc_describe
ORDER BY LOWER(schema), LOWER(stored_proc)
"""
config = {
'optional_args_single': ['database']
, 'output_tmplt_default': '{ddl}{^M}' }
def init_config(self, object_type):
"""Initialize config dict.
"""
cmd_line_args = {
'sequence' : "@$HOME/conn.args --current_schema dev --sequence_like '%id%' --"
, 'stored_proc' : "@$HOME/conn.args --current_schema dev --stored_proc_like '%id%' --"
, 'table' : "@$HOME/conn.args --current_schema dev --table_like 'sale_%' --"
, 'view' : "@$HOME/conn.args --schema_in dev Prod --with_db --view_like '%sale%' --"
}
self.config['description'] = ('Return the {type}/s DDL for the requested'
' database. Use {type} filters to limit the set'
' of tables returned.').format(type = object_type)
self.config['optional_args_multi'] = ['owner', 'schema', object_type]
self.config['usage_example'] = {
'cmd_line_args': cmd_line_args[object_type]
, 'file_args': [Util.conn_args_file] }
self.config['output_tmplt_vars'] = []
if object_type == 'table':
self.config['output_tmplt_vars'].append('rowcount')
self.config['output_tmplt_vars'].extend(['%s_path' % object_type
, 'schema_path', 'ddl', 'ordinal'
, object_type, 'schema', 'database', 'owner'])
self.object_type = object_type
def init(self, object_type, db_conn=None, args_handler=None):
"""Initialize ddl_object class.
This initialization performs argument parsing and login verification.
It also provides access to functions such as logging and command
execution.
"""
self.init_config(object_type)
self.init_default(db_conn, args_handler)
def additional_args(self):
args_ddl_grp = self.args_handler.args_parser.add_argument_group('optional DDL arguments')
args_ddl_grp.add_argument("--with_schema"
, action='store_true', help="add the schema name to the %s DDL" % self.object_type)
args_ddl_grp.add_argument("--with_db"
, action='store_true', help="add the database name to the %s DDL" % self.object_type)
args_ddl_grp.add_argument("--schema_name"
, help="set a new schema name to the %s DDL" % self.object_type)
args_ddl_grp.add_argument("--db_name"
, help="set a new database name to the %s DDL" % self.object_type)
if self.object_type in ('stored_proc', 'view'):
args_ddl_grp.add_argument("--or_replace"
, action="store_true", help="add the 'OR REPLACE' clause to the %s DDL" % self.object_type)
def additional_args_process(self):
if self.args_handler.args.schema_name:
self.args_handler.args.with_schema = True
if self.args_handler.args.db_name:
self.args_handler.args.with_db = True
def execute(self):
describe_sql = self.get_describe_sql()
output = self.exec_query_and_apply_template(describe_sql)
if output != '':
output = self.ddl_modifications(
output, self.args_handler.args)
if self.args_handler.args.exec_output:
self.cmd_result = self.db_conn.ybsql_query(output)
self.cmd_result.on_error_exit()
output = self.cmd_result.stdout
return output
def object_meta_data_to_ybsql_py_dict(self, meta_data):
# 'object_path|ordinal|owner|database|schema|object'
ybsql_py_key_values = []
ybsql_py_key_values.append(self.sql_to_ybsql_py_key_value('ddl'
, 'DESCRIBE %s ONLY DDL;' % meta_data[0] ) )
if (self.object_type == 'table'
and re.search(r'\{rowcount[\}\:]', self.args_handler.args.template) ):
ybsql_py_key_values.append(self.sql_to_ybsql_py_key_value('rowcount'
, 'SELECT COUNT(*) FROM %s;' % meta_data[0] ) )
ybsql_py_key_values.extend(
self.dict_to_ybsql_py_key_values(
{ 'ordinal': meta_data[1]
, 'owner': meta_data[2]
, 'database': meta_data[3]
, 'schema': meta_data[4]
, self.object_type: meta_data[5] } ) )
py_dict = self.ybsql_py_key_values_to_py_dict(ybsql_py_key_values)
return py_dict
def get_describe_sql(self):
"""Build up SQL DESCRIBE statement/s.
:return: A string containing the SQL DESCRIBE statement
"""
if self.object_type == 'stored_proc':
self.db_filter_args.schema_set_all_if_none()
filter_clause = self.db_filter_args.build_sql_filter(
{'schema':'schema', 'stored_proc':'stored_proc', 'owner':'owner'} )
describe_sql = ddl_object.stored_proc_describe_query.format(
filter_clause = filter_clause
, database = self.db_conn.database)
else:
args_handler = copy.deepcopy(self.args_handler)
args_handler.args.exec_output = False
args_handler.args.template = ('{%s_path}|{ordinal}|{owner}|{database}|{schema}|{%s}'
% (self.object_type, self.object_type))
code = ('get_{object_type}_names'
'(db_conn=self.db_conn, args_handler=args_handler)').format(
object_type=self.object_type)
gons = eval(code)
object_meta_data_rows = gons.execute()
describe_objects = []
if object_meta_data_rows.strip() != '':
for object_meta_data in object_meta_data_rows.strip().split('\n'):
describe_clause = self.object_meta_data_to_ybsql_py_dict(object_meta_data.split('|'))
describe_objects.append(describe_clause)
describe_sql = '\echo ,\n'.join(describe_objects)
return describe_sql
def ddl_modifications(self, ddl, args):
"""
Modify a given DDL statement by optionally adding db/schema name to a
CREATE statement and transforming all SQL reserved words to uppercase.
:param ddl: The DDL statement to modify
:param args: The command line args after being processed
:return: A string containing the modified DDL statement
"""
new_ddl = []
ddl_schema = ''
for line in ddl.split('\n'):
token = line.split(':')
if token[0] == '-- Schema':
ddl_schema = token[1].strip()
#add schema and database to object name and quote name where needed
matches = re.match(r"\s*CREATE\s*([^\s]*)\s*([^\s(]*)(.*)"
, line, re.MULTILINE)
if matches:
tablepath = matches.group(2)
if args.with_schema or args.with_db:
tablepath = (
( args.schema_name
if args.schema_name
else ddl_schema)
+ '.' + tablepath
)
if args.with_db:
tablepath = (
( args.db_name
if args.db_name
else self.db_conn.database)
+ '.' + tablepath
)
tablepath = Common.quote_object_paths(tablepath)
line = 'CREATE %s %s%s' % (matches.group(1), tablepath, matches.group(3))
#change all data type key words to upper case
d_types = [
'bigint', 'integer', 'smallint', 'numeric', 'real'
, 'double precision', 'uuid', 'character varying', 'character'
, 'date', 'time without time zone'
, 'timestamp without time zone', 'timestamp with time zone'
, 'ipv4', 'ipv6', 'macaddr', 'macaddr8'
, 'boolean'
]
for data_type in d_types:
line = re.sub(r"( )" + data_type + r"(,?$|\()",
r"\1%s\2" % data_type.upper(), line)
new_ddl.append(line)
new_ddl = '\n'.join(new_ddl).strip() + '\n'
if self.object_type in('stored_proc', 'view') and self.args_handler.args.or_replace:
typ = {'view':'VIEW','stored_proc':'PROCEDURE'}[self.object_type]
new_ddl = new_ddl.replace('CREATE %s'%typ, 'CREATE OR REPLACE %s'%typ)
#remove DDL comments at the beginning of each object definition
new_ddl = re.sub(r"--( |-).*?\n", "", new_ddl)
#correct trailing ';' at end of each definition to be consistent
new_ddl = re.sub(r"(\s*);", ";", new_ddl)
return new_ddl
def main(util_name):
ddlo = ddl_object(util_name=util_name, init_default=False)
ddlo.init(object_type=util_name[4:])
print(ddlo.execute())
exit(ddlo.cmd_result.exit_code) |
the-stack_0_12541 | """
create_label_files.py
---------------
This script produces:
1. Reorient the processed registered_scene mesh in a mesh with an AABB centered at the
origin and the same dimensions as the OBB, saved under the name foldername.ply
2. Create label files with class labels and projections of 3D BBs in the format
singleshotpose requires, saved under labels
3. Create pixel-wise masks, saved under mask
4. Save the homogeneous transform of object in regards to the foldername.ply in each
frame
"""
import numpy as np
from pykdtree.kdtree import KDTree
import trimesh
import cv2
import glob
import os
import sys
from tqdm import trange
from scipy.optimize import minimize
from config.registrationParameters import *
import json
def get_camera_intrinsic(folder):
with open(folder+'intrinsics.json', 'r') as f:
camera_intrinsics = json.load(f)
K = np.zeros((3, 3), dtype='float64')
K[0, 0], K[0, 2] = float(camera_intrinsics['fx']), float(camera_intrinsics['ppx'])
K[1, 1], K[1, 2] = float(camera_intrinsics['fy']), float(camera_intrinsics['ppy'])
K[2, 2] = 1.
return (camera_intrinsics, K)
def compute_projection(points_3D,internal_calibration):
points_3D = points_3D.T
projections_2d = np.zeros((2, points_3D.shape[1]), dtype='float32')
camera_projection = (internal_calibration).dot(points_3D)
projections_2d[0, :] = camera_projection[0, :]/camera_projection[2, :]
projections_2d[1, :] = camera_projection[1, :]/camera_projection[2, :]
return projections_2d
def print_usage():
print("Usage: create_label_files.py <path>")
print("path: all or name of the folder")
print("e.g., create_label_files.py all, create_label_files.py LINEMOD/Cheezit")
if __name__ == "__main__":
try:
if sys.argv[1] == "all":
folders = glob.glob("LINEMOD/*/")
elif sys.argv[1]+"/" in glob.glob("LINEMOD/*/"):
folders = [sys.argv[1]+"/"]
else:
print_usage()
exit()
except:
print_usage()
exit()
for classlabel,folder in enumerate(folders):
# print(folder[8:-1], "is assigned class label:", classlabel)
print("%s is assigned class label %d." % (folder[8:-1],classlabel))
camera_intrinsics, K = get_camera_intrinsic(folder)
path_label = folder + "labels"
if not os.path.exists(path_label):
os.makedirs(path_label)
path_mask = folder + "mask"
if not os.path.exists(path_mask):
os.makedirs(path_mask)
path_transforms = folder + "transforms"
if not os.path.exists(path_transforms):
os.makedirs(path_transforms)
transforms_file = folder + 'transforms.npy'
try:
transforms = np.load(transforms_file)
except:
print("transforms not computed, run compute_gt_poses.py first")
continue
mesh = trimesh.load(folder + "registeredScene.ply")
Tform = mesh.apply_obb()
mesh.export(file_obj = folder + folder[8:-1] +".ply")
points = mesh.bounding_box.vertices
center = mesh.centroid
min_x = np.min(points[:,0])
min_y = np.min(points[:,1])
min_z = np.min(points[:,2])
max_x = np.max(points[:,0])
max_y = np.max(points[:,1])
max_z = np.max(points[:,2])
points = np.array([[min_x, min_y, min_z], [min_x, min_y, max_z], [min_x, max_y, min_z],
[min_x, max_y, max_z], [max_x, min_y, min_z], [max_x, min_y, max_z],
[max_x, max_y, min_z], [max_x, max_y, max_z]])
points_original = np.concatenate((np.array([[center[0],center[1],center[2]]]), points))
points_original = trimesh.transformations.transform_points(points_original,
np.linalg.inv(Tform))
projections = [[],[]]
for i in trange(len(transforms)):
mesh_copy = mesh.copy()
img = cv2.imread(folder+"JPEGImages/" + str(i*LABEL_INTERVAL) + ".jpg")
transform = np.linalg.inv(transforms[i])
transformed = trimesh.transformations.transform_points(points_original, transform)
corners = compute_projection(transformed,K)
corners = corners.T
corners[:,0] = corners[:,0]/int(camera_intrinsics['width'])
corners[:,1] = corners[:,1]/int(camera_intrinsics['height'])
T = np.dot(transform, np.linalg.inv(Tform))
mesh_copy.apply_transform(T)
filename = path_transforms + "/"+ str(i*LABEL_INTERVAL)+".npy"
np.save(filename, T)
sample_points = mesh_copy.sample(10000)
masks = compute_projection(sample_points,K)
masks = masks.T
min_x = np.min(masks[:,0])
min_y = np.min(masks[:,1])
max_x = np.max(masks[:,0])
max_y = np.max(masks[:,1])
image_mask = np.zeros(img.shape[:2],dtype = np.uint8)
for pixel in masks:
cv2.circle(image_mask,(int(pixel[0]),int(pixel[1])), 5, 255, -1)
thresh = cv2.threshold(image_mask, 30, 255, cv2.THRESH_BINARY)[1]
_, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnt = max(contours, key=cv2.contourArea)
image_mask = np.zeros(img.shape[:2],dtype = np.uint8)
cv2.drawContours(image_mask, [cnt], -1, 255, -1)
mask_path = path_mask+"/"+ str(i*LABEL_INTERVAL)+".png"
cv2.imwrite(mask_path, image_mask)
file = open(path_label+"/"+ str(i*LABEL_INTERVAL)+".txt","w")
message = str(classlabel)[:8] + " "
file.write(message)
for pixel in corners:
for digit in pixel:
message = str(digit)[:8] + " "
file.write(message)
message = str((max_x-min_x)/float(camera_intrinsics['width']))[:8] + " "
file.write(message)
message = str((max_y-min_y)/float(camera_intrinsics['height']))[:8]
file.write(message)
file.close()
|
the-stack_0_12543 | from __future__ import print_function, division
import numpy as np
from PyAstronomy.pyaC import pyaErrors as PE
import six.moves as smo
def expCorrRN(n, tau, mean=0.0, std=1.0, rnos=None, fullOut=False):
"""
Generate exponentially correlated random numbers.
This procedure implements the prescription given by
Deserno 2002 ("How to generate exponentially correlated Gaussian
random numbers"). The autocorrelation function of the resulting
numbers decays with the predefined "decay time", tau.
The correlation coefficient of the resulting numbers is given
by exp(-1/tau).
Parameters
----------
n : int
Number of numbers to be generated.
tau : float
Decay time
mean : float, optional
Mean of the numbers to be generated.
Default is 0.0.
std : float, optional
Standard deviation of the generated numbers.
Default is 1.0.
rnos : array, optional
Uncorrelated Gaussian random numbers with mean 0.0
and standard deviation 1.0 to be used to generate
correlated random numbers. If not given, Gaussian
random numbers will be obtained using numpy.random.normal.
fullOut : boolean, optional
If False (default), only the correlated random numbers
will be returned.
Returns
-------
Correlated RNs : array
Correlated Gaussian random numbers.
Uncorrelated RNs : array, optional
The uncorrelated random numbers used to generate
the correlated numbers (only of `fullOut` is True).
Correlation coefficient : float
The correlation coefficient (exp(-1/tau), only of
`fullOut` is True).
"""
if rnos is None:
# Get n uniformly distributed random numbers between 0 and 1
g = np.random.normal(0.0, 1.0, n)
else:
g = rnos
if len(g) != n:
raise(PE.PyAValError("The length of `rnos` must be n.",
where="expCorrRN",
solution=["Adjust `rnos`.", \
"Remove rnos argument to use internally generated Gaussian random numbers."]))
# Prepare result
result = np.zeros(n)
result[0] = g[0]
# Correlation coefficient
f = np.exp(-1.0/tau)
# Obtain correlated numbers
somf = np.sqrt(1.0 - f**2)
for i in smo.range(1,n):
result[i] = f*result[i-1] + somf*g[i]
result = mean + std*result
if fullOut:
return result, g, f
return result |
the-stack_0_12544 | #import xppcall as pxc
from xppcall import xpprun,read_pars_values_from_file,read_init_values_from_file
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
def get_tone_evoked_FR(t,u,v1,v2,tonelist):
"""
brute force method for extracting peaks following tone-evoked activity.
loop over each tone-evoked time frame.
"""
maxes_u = np.zeros((len(tonelist),2))
maxes_v1 = np.zeros((len(tonelist),2))
maxes_v2 = np.zeros((len(tonelist),2))
i = 0
for toneOn,toneOff in tonelist:
# get start/end time index
idx_start = np.argmin(np.abs(t-toneOn))+1
idx_end = np.argmin(np.abs(t-toneOff))-1
#print idx_start,idx_end,toneOn,toneOff
utemp = u[idx_start:idx_end]
v1temp = v1[idx_start:idx_end]
v2temp = v2[idx_start:idx_end]
ttemp = t[idx_start:idx_end]
# https://stackoverflow.com/questions/4624970/finding-local-maxima-minima-with-numpy-in-a-1d-numpy-array
maxes_utemp = np.r_[True, utemp[1:] > utemp[:-1]] & np.r_[utemp[:-1] > utemp[1:], True]
maxes_v1temp = np.r_[True, v1temp[1:] > v1temp[:-1]] & np.r_[v1temp[:-1] > v1temp[1:], True]
maxes_v2temp = np.r_[True, v2temp[1:] > v2temp[:-1]] & np.r_[v2temp[:-1] > v2temp[1:], True]
if False:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(ttemp,utemp)
ax.plot(ttemp,v1temp)
ax.plot(ttemp,v2temp)
ax.scatter(ttemp[maxes_utemp],utemp[maxes_utemp])
ax.scatter(ttemp[maxes_v1temp],v1temp[maxes_v1temp])
ax.scatter(ttemp[maxes_v2temp],v2temp[maxes_v2temp])
plt.show()
#assert(np.sum(maxes_utemp)==1)
#assert(np.sum(maxes_v1temp)==1)
#assert(np.sum(maxes_v2temp)==1)
# take the first max for now.
#print ttemp,maxes_utemp,np.shape(ttemp),np.shape(maxes_utemp)
if np.sum(ttemp[maxes_utemp]) > 1:
maxes_u[i,:] = [ttemp[maxes_utemp][0],utemp[maxes_utemp][0]]
elif np.sum(ttemp[maxes_utemp]) < 1:
pass
else:
maxes_u[i,:] = [ttemp[maxes_utemp],utemp[maxes_utemp]]
if np.sum(ttemp[maxes_v1temp]) > 1:
maxes_v1[i,:] = [ttemp[maxes_v1temp][0],v1temp[maxes_v1temp][0]]
elif np.sum(ttemp[maxes_v1temp]) < 1:
pass
else:
maxes_v1[i,:] = [ttemp[maxes_v1temp],v1temp[maxes_v1temp]]
if np.sum(ttemp[maxes_v2temp]) > 1:
maxes_v2[i,:] = [ttemp[maxes_v2temp][0],v2temp[maxes_v2temp][0]]
elif np.sum(ttemp[maxes_v2temp]) < 1:
pass
else:
print [ttemp[maxes_v2temp],v2temp[maxes_v2temp]]
maxes_v2[i,:] = [ttemp[maxes_v2temp],v2temp[maxes_v2temp]]
i += 1
#print maxes_u,np.shape(maxes_u)
return maxes_u,maxes_v1,maxes_v2
#print maxes_u
#print maxes_v1
#print maxes_v2
def run_experiment(fname,pars,inits,return_all=False):
npa, vn = xpprun(fname,
xppname='xppaut',
inits=inits,
parameters=pars,
clean_after=True)
t = npa[:,0]
sv = npa[:,1:]
total_time = t[-1]
u = sv[:,vn.index('u')]
v1 = sv[:,vn.index('v1')]
v2 = sv[:,vn.index('v2')]
ia = sv[:,vn.index('ia')]
g = sv[:,vn.index('g')]
if return_all:
tonelist = [(float(pars['tone1on']),float(pars['tone1off'])),
(float(pars['tone2on']),float(pars['tone2off'])),
(float(pars['tone3on']),float(pars['tone3off'])),
(float(pars['tone4on']),float(pars['tone4off'])),
(float(pars['tone5on']),float(pars['tone5off']))
]
# implement parameter return dict.
return {'t':t,'u':u,'v1':v1,'v2':v2,'inits':inits,'parameters':pars,'tonelist':tonelist,'sv':sv,'vn':vn,'ia':ia,'g':g}
else:
return {'t':t,'u':u,'v1':v1,'v2':v2}
def main():
fname = 'xpp/natan2015_simple_linear.ode'
pars = read_pars_values_from_file(fname)
inits = read_init_values_from_file(fname)
#pars['taud2']=7
# Pyr input weights
#pars['aee']=1.5
#pars['aie1']=1 # PV to pyr weight
#pars['aie2']=1 # SOM to pyr weight
#pars['aei1']=3;pars['aii1']=1;pars['asom2pv']=1 # pv input weights
#pars['aei2']=5;pars['aii2']=2;pars['apv2som']=1 # som input weights
#pars['taud1s2p']=200;pars['tads2p']=5 # som->pn depression
#pars['taud1s2v']=150;pars['tads2v']=5 # som->pv depression
# returns tuple (t,u,v1,v2,inits,parameters,tonelist)
control = run_experiment(
fname,
pars,inits,
return_all=True)
pars['pv_offall']=2
pv_off = run_experiment(
fname,
pars,inits,
return_all=True)
pars['pv_offall']=0
pars['som_offall']=1
som_off = run_experiment(
fname,
pars,inits,
return_all=True)
maxes_u_control,maxes_v1_control,maxes_v2_control = get_tone_evoked_FR(
control['t'],
control['u'],
control['v1'],
control['v2'],
control['tonelist'])
maxes_u_pv_off,maxes_v1_pv_off,maxes_v2_pv_off = get_tone_evoked_FR(
pv_off['t'],
pv_off['u'],
pv_off['v1'],
pv_off['v2'],
pv_off['tonelist'])
maxes_u_som_off,maxes_v1_som_off,maxes_v2_som_off = get_tone_evoked_FR(
som_off['t'],
som_off['u'],
som_off['v1'],
som_off['v2'],
som_off['tonelist'])
gs = gridspec.GridSpec(3, 3)
ax11 = plt.subplot(gs[0, 0])
ax11.set_title('control')
ax11.plot(control['t'],control['u'],label='pyr',color='blue')
ax11.plot(control['t'],control['v1'],label='PV',color='green')
ax11.plot(control['t'],control['v2'],label='SOM',color='red')
# plot detected peaks
ax11.scatter(maxes_u_control[:,0],maxes_u_control[:,1],color='blue')
ax11.legend()
ax21 = plt.subplot(gs[1,0])
ax21.set_title('PV off')
ax21.plot(pv_off['t'],pv_off['u'],label='pyr',color='blue')
ax21.plot(pv_off['t'],pv_off['v1'],label='PV',color='green')
ax21.plot(pv_off['t'],pv_off['v2'],label='SOM',color='red')
ax21.scatter(maxes_u_pv_off[:,0],maxes_u_pv_off[:,1],color='blue')
ax31 = plt.subplot(gs[2,0])
ax31.set_title('SOM off')
ax31.plot(som_off['t'],som_off['u'],label='pyr',color='blue')
ax31.plot(som_off['t'],som_off['v1'],label='PV',color='green')
ax31.plot(som_off['t'],som_off['v2'],label='SOM',color='red')
ax31.scatter(maxes_u_som_off[:,0],maxes_u_som_off[:,1],color='blue')
# plot relative firing rates
ax12 = plt.subplot(gs[:,1])
tone_number = np.array([0,1,2,3,4])
adapted_fr = maxes_u_control[-1,1]
bar_width = 0.2
ax12.set_title('Mean FR')
ax12.bar(tone_number,maxes_u_control[:,1]/adapted_fr,width=bar_width,label='control',color='blue')
ax12.bar(tone_number+bar_width,maxes_u_pv_off[:,1]/adapted_fr,width=bar_width,label='pv_off',color='green')
ax12.bar(tone_number+2*bar_width,maxes_u_som_off[:,1]/adapted_fr,width=bar_width,label='som_off',color='red')
ax12.plot([0,4],[1,1],ls='--',color='gray')
ax12.legend()
plt.tight_layout()
# plot diff in firing rates
ax13 = plt.subplot(gs[:,2])
ax13.set_title('Diff from Control')
ax13.bar(tone_number,np.abs(maxes_u_control[:,1]-maxes_u_pv_off[:,1])/adapted_fr,width=bar_width,label='control-pv_off',color='green')
ax13.bar(tone_number+bar_width,np.abs(maxes_u_control[:,1]-maxes_u_som_off[:,1])/adapted_fr,width=bar_width,label='control-som_off',color='red')
#ax13.plot([0,4],[1,1],ls='--',color='gray')
ax13.legend()
# plot synapses
if False:
sv = control['sv']
vn = control['vn']
aie2 = float(control['parameters']['aie2']) # som to pn
asom2pv = float(control['parameters']['asom2pv']) # som to pv
ws2p = sv[:,vn.index('ws2p')] # som to pn
ws2v = sv[:,vn.index('ws2v')] # som to pv
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.plot(control['t'],aie2*ws2p,label='som to pn')
ax2.plot(control['t'],asom2pv*ws2v,label='som to pv')
plt.show()
if __name__ == "__main__":
main()
|
the-stack_0_12546 | #!/usr/bin/env python3
"""Cyberjunky's 3Commas bot helpers."""
import argparse
import configparser
import json
import os
import ssl
import sys
import time
import uuid
from pathlib import Path
from aiohttp import web
from helpers.logging import Logger, NotificationHandler
from helpers.misc import format_pair
from helpers.threecommas import (
close_threecommas_deal,
control_threecommas_bots,
get_threecommas_account_marketcode,
get_threecommas_deals,
init_threecommas_api,
load_blacklist,
trigger_threecommas_bot_deal,
)
def load_config():
"""Create default or load existing config file."""
cfg = configparser.ConfigParser(allow_no_value=True)
if cfg.read(f"{datadir}/{program}.ini"):
return cfg
cfg["settings"] = {
"timezone": "Europe/Amsterdam",
"debug": False,
"logrotate": 7,
"3c-apikey": "Your 3Commas API Key",
"3c-apisecret": "Your 3Commas API Secret",
"notifications": False,
"notify-urls": ["notify-url1"],
}
cfg["webserver"] = {
"baseurl": uuid.uuid4(),
"port": 8090,
"; Use ssl certificates when connected to the internet!": None,
"ssl": False,
"certfile": "Full path to your fullchain.pem",
"privkey": "Full path to your privkey.pem",
}
cfg[f"webhook_{uuid.uuid4()}"] = {
"control-botids": [12345, 67890],
"usdt-botids": [],
"btc-botids": [],
"comment": "Just a description of this section",
}
with open(f"{datadir}/{program}.ini", "w") as cfgfile:
cfg.write(cfgfile)
return None
# Start application
program = Path(__file__).stem
# Parse and interpret options.
parser = argparse.ArgumentParser(description="Cyberjunky's 3Commas bot helper.")
parser.add_argument(
"-d", "--datadir", help="directory to use for config and logs files", type=str
)
parser.add_argument(
"-b", "--blacklist", help="local blacklist to use instead of 3Commas's", type=str
)
args = parser.parse_args()
if args.datadir:
datadir = args.datadir
else:
datadir = os.getcwd()
# pylint: disable-msg=C0103
if args.blacklist:
blacklistfile = args.blacklist
else:
blacklistfile = None
# Create or load configuration file
config = load_config()
if not config:
# Initialise temp logging
logger = Logger(datadir, program, None, 7, False, False)
logger.info(
f"Created example config file '{datadir}/{program}.ini', edit it and restart the program"
)
sys.exit(0)
else:
# Handle timezone
if hasattr(time, "tzset"):
os.environ["TZ"] = config.get(
"settings", "timezone", fallback="Europe/Amsterdam"
)
time.tzset()
# Init notification handler
notification = NotificationHandler(
program,
config.getboolean("settings", "notifications"),
config.get("settings", "notify-urls"),
)
# Initialise logging
logger = Logger(
datadir,
program,
notification,
int(config.get("settings", "logrotate", fallback=7)),
config.getboolean("settings", "debug"),
config.getboolean("settings", "notifications"),
)
logger.info(f"Loaded configuration from '{datadir}/{program}.ini'")
def webhook_deal(thebot, coin, trade):
"""Check pair and trigger the bot deal."""
# Gather some bot values
base = thebot["pairs"][0].split("_")[0]
exchange = thebot["account_name"]
minvolume = thebot["min_volume_btc_24h"]
logger.debug("Base coin for this bot: %s" % base)
logger.debug("Minimal 24h volume in BTC for this bot: %s" % minvolume)
# Get marketcode (exchange) from account
marketcode = get_threecommas_account_marketcode(logger, api, thebot["account_id"])
if not marketcode:
return
logger.info("Bot exchange: %s (%s)" % (exchange, marketcode))
skipchecks = False
if blacklistfile:
skipchecks = True
# Construct pair based on bot settings and marketcode (BTC stays BTC, but USDT can become BUSD)
pair = format_pair(logger, marketcode, base, coin)
# Check if pair is on 3Commas blacklist
if pair in blacklist:
logger.debug(
"This pair is on your 3Commas blacklist and was skipped: %s" % pair, True
)
return
# Check if pair is in bot's pairlist
if pair not in thebot["pairs"]:
logger.debug(
"This pair is not in bot's pairlist, and was skipped: %s" % pair,
True,
)
return
if trade == "buy":
# We have valid pair for our bot so we trigger an open asap action
logger.info("Triggering your 3Commas bot for buy")
trigger_threecommas_bot_deal(logger, api, thebot, pair, skipchecks)
else:
# Find active deal(s) for this bot so we can close deal(s) for pair
deals = get_threecommas_deals(logger, api, thebot["id"], "active")
if deals:
for deal in deals:
if deal["pair"] == pair:
close_threecommas_deal(logger, api, deal["id"], pair)
return
logger.info(
"No active deal(s) found for bot '%s' and pair '%s'"
% (thebot["name"], pair)
)
else:
logger.info("No active deal(s) found for bot '%s'" % thebot["name"])
# Initialize 3Commas API
api = init_threecommas_api(config)
blacklist = load_blacklist(logger, api, blacklistfile)
# Webserver app
app = web.Application(logger=logger)
# Webserver settings
baseurl = config.get("webserver", "baseurl")
httpport = config.get("webserver", "port")
# SSL
sslenabled = config.getboolean("webserver", "ssl")
certfile = config.get("webserver", "certfile")
privkey = config.get("webserver", "privkey")
# Fetch configured hooks
tokens = list()
for section in config.sections():
if section.startswith("webhook_"):
# Add token to list
tokens.append(section.replace("webhook_", ""))
# Process webhook calls
async def handle(request):
"""Handle web requests."""
data = await request.json()
logger.debug("Webhook alert received: %s" % data)
token = data.get("token")
if token in tokens:
logger.debug("Webhook alert token acknowledged")
# Get and verify actions
actiontype = data.get("action").lower()
# Bot actions
if actiontype in ["enable", "disable"]:
logger.debug(f"Webhook bot command received: {actiontype}")
botids = json.loads(config.get(f"webhook_{token}", "control-botids"))
# Walk through the configured bot(s)
for botid in botids:
error, data = api.request(
entity="bots",
action="show",
action_id=str(botid),
)
if data:
logger.debug(f"Webhook '{actiontype}' bot with id '{botid}'")
control_threecommas_bots(logger, api, data, actiontype)
else:
if error and "msg" in error:
logger.error("Error occurred updating bots: %s" % error["msg"])
else:
logger.error("Error occurred updating bots")
# Deal actions
elif actiontype in ["buy", "sell"]:
logger.debug(f"Webhook deal command received: {actiontype}")
pair = data.get("pair")
base = pair.split("_")[0]
coin = pair.split("_")[1]
logger.debug("Pair: %s" % pair)
logger.debug("Base: %s" % base)
logger.debug("Coin: %s" % coin)
logger.debug("Trade type: %s" % actiontype)
if base == "USDT":
botids = json.loads(config.get(f"webhook_{token}", "usdt-botids"))
if len(botids) == 0:
logger.debug(
"No valid usdt-botids configured for '%s', cannot execute %s"
% (base, actiontype)
)
return web.Response()
elif base == "BTC":
botids = json.loads(config.get(f"webhook_{token}", "btc-botids"))
if len(botids) == 0:
logger.debug(
"No valid btc-botids configured for '%s', cannot execute %s"
% (base, actiontype)
)
return
else:
logger.error("Error the base of pair '%s' is not supported yet!" % pair)
return web.Response()
for botid in botids:
if botid == 0:
logger.debug("No valid botid configured, skipping")
continue
error, data = api.request(
entity="bots",
action="show",
action_id=str(botid),
)
if data:
logger.debug(f"Webhook '{actiontype}' bot with id '{botid}'")
webhook_deal(data, coin, actiontype)
else:
if error and "msg" in error:
logger.error(
"Error occurred triggering bots: %s" % error["msg"]
)
else:
logger.error("Error occurred triggering bots")
else:
logger.error(
f"Webhook alert received ignored, unsupported type '{actiontype}'"
)
return web.Response()
logger.error("Webhook alert received denied, token '%s' invalid" % token)
return web.Response(status=403)
# Prepare webhook webserver
app.router.add_post(f"/{baseurl}", handle)
logger.info(f"Starting webserver listening to '/{baseurl}'")
# https
if sslenabled:
# Build ssl context
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(certfile, privkey)
web.run_app(
app, host="0.0.0.0", port=httpport, ssl_context=context, access_log=None
)
# http
web.run_app(app, host="0.0.0.0", port=httpport, access_log=None)
|
the-stack_0_12547 | ##############################################################################
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import pytest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
import dfpipeline as dfp
df = pd.DataFrame({
'col1': ['A', 'B', 'C'],
'col2': ['L', 'M', 'N'],
'col3': ['X', 'Y', 'Z'],
})
selected_df = pd.DataFrame({
'col1': ['A', 'B', 'C'],
'col2': ['L', 'M', 'N'],
})
dropped_df = pd.DataFrame({
'col2': ['L', 'M', 'N'],
'col3': ['X', 'Y', 'Z'],
})
def test_select():
select = dfp.ColumnSelector(columns=['col1', 'col2'])
out = select.fit_transform(df.copy())
assert_frame_equal(out, selected_df)
def test_drop():
select = dfp.ColumnSelector(columns=['col1'], drop=True)
out = select.fit_transform(df.copy())
assert_frame_equal(out, dropped_df)
|
the-stack_0_12549 | import shutil
import glob
import os
from collections import OrderedDict
import functools
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import _LRScheduler
#from utility import *
class SegmentationMetric(object):
def __init__(self, num_class):
self.num_class = num_class
self.confusion_matrix = np.zeros((self.num_class,)*2)
def Pixel_Accuracy(self):
Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()
return Acc
def Pixel_Accuracy_Class(self):
Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)
Acc = np.nanmean(Acc)
return Acc
def Mean_Intersection_over_Union(self):
MIoU = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
MIoU = np.nanmean(MIoU)
return MIoU
def Frequency_Weighted_Intersection_over_Union(self):
freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
iu = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def _generate_matrix(self, gt_image, pre_image):
mask = (gt_image >= 0) & (gt_image < self.num_class)
label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]
count = np.bincount(label, minlength=self.num_class**2)
confusion_matrix = count.reshape(self.num_class, self.num_class)
return confusion_matrix
def add_batch(self, gt_image, pre_image):
assert gt_image.shape == pre_image.shape
self.confusion_matrix += self._generate_matrix(gt_image, pre_image)
def reset(self):
self.confusion_matrix = np.zeros((self.num_class,) * 2)
class Evaluator(object):
def __init__(self,num_class=2):
self.eval = SegmentationMetric(num_class)
self.reset()
def reset(self):
self.met = {}
def __call__(self, gt, pre):
self.eval.reset()
self.eval.add_batch(gt.data.cpu().numpy(), pre.data.cpu().numpy())
self.met['pa'] = self.eval.Pixel_Accuracy()
self.met['mpa'] = self.eval.Pixel_Accuracy_Class()
self.met['miou'] = self.eval.Mean_Intersection_over_Union()
self.met['fwiou'] = self.eval.Frequency_Weighted_Intersection_over_Union()
return self.met
####
class Reseroir(object):
'''Reservoir: function --> info logging, model checkpoint, config storage'''
def __init__(self, opt):
self.opt = opt
self.best_params = {}
self.init_folder()
self.metric = []
self.clear("pa","mpa","miou","fwiou")
def init_folder(self):
def _sanity_check(folder):
if not os.path.exists(folder):
os.makedirs(folder)
return folder
folder = "/disk/data"
# log info folder : ../loginfo/(time dependent)/loging/
self.log_folder = os.path.join(folder,"loginfo",self.opt.configure_name,"loging")
print(self.log_folder)
_sanity_check(self.log_folder)
# model checkpoint folder : ../loginfo/(time dependent)/models/
self.model_folder = os.path.join(folder,"loginfo",self.opt.configure_name,"models")
_sanity_check(self.model_folder)
# config folder : ../loginfo/(time dependent)/config
self.config_folder = os.path.join(folder,"loginfo",self.opt.configure_name,"config")
_sanity_check(self.config_folder)
logging.basicConfig(level=logging.DEBUG,\
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\
datefmt = '%a, %d %b %Y %H:%M:%S',\
filename = os.path.join(self.config_folder, "loging.txt"),\
filemode='w')
def set_metrics(self, *args):
#print("args --> ", args)
for arg in args:
#print("arg -- > ", arg)
self.best_params[arg] = 0
def reset(self):
for arg in self.best_params.keys():
self.best_params[arg] = -1.0
def clear(self, *args):
self.best_params = {}
#print("clear --> ",args)
self.set_metrics(*args)
def save_checkpoint(self, state_dict, scores, epoch, filename="checkpoint.pth.tar"):
if not isinstance(scores, dict):
raise ValueError("scores for checkpoint must be dict ")
self.metric.append(scores)
print("scores --> ", scores)
for key, value in scores.items():
if key in self.best_params.keys() and value > self.best_params[key]:
self.best_params[key] = value
model_name = os.path.join(self.model_folder, key+'_'+filename)
torch.save(state_dict, model_name)
self.save_configure()
def save_configure(self):
config_name = os.path.join(self.config_folder,"best_configure.yml")
with open(config_name, "w") as f:
p = vars(self.opt)
for key, value in p.items():
f.write(key+": "+str(value)+"\n")
def save_metrics(self):
metric_name = os.path.join(self.model_folder,"metrics.txt")
with open(metric_name, "w") as f:
for data in self.metric:
f.write(data)
if __name__ == "__main__":
#opt = parse_opts()
#s = Saver(opt)
e = Evaluator(num_class=2)
x = np.zeros((5,5), dtype=np.int)
x[2:4,1] = 1
y = np.zeros((5,5), dtype=np.int)
y[2:3,1] = 1
e.add_batch(x,y)
import pdb
pdb.set_trace()
print(e.Pixel_Accuracy())
|
the-stack_0_12550 | # Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import collections
import html
import logging
import types
import urllib
from http import HTTPStatus
from inspect import isawaitable
from typing import (
Any,
Awaitable,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Pattern,
Tuple,
Union,
)
import jinja2
from canonicaljson import encode_canonical_json
from typing_extensions import Protocol
from zope.interface import implementer
from twisted.internet import defer, interfaces
from twisted.python import failure
from twisted.web import resource
from twisted.web.server import NOT_DONE_YET, Request
from twisted.web.static import File
from twisted.web.util import redirectTo
from synapse.api.errors import (
CodeMessageException,
Codes,
RedirectException,
SynapseError,
UnrecognizedRequestError,
)
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background
from synapse.logging.opentracing import trace_servlet
from synapse.util import json_encoder
from synapse.util.caches import intern_dict
from synapse.util.iterutils import chunk_seq
logger = logging.getLogger(__name__)
HTML_ERROR_TEMPLATE = """<!DOCTYPE html>
<html lang=en>
<head>
<meta charset="utf-8">
<title>Error {code}</title>
</head>
<body>
<p>{msg}</p>
</body>
</html>
"""
def return_json_error(f: failure.Failure, request: SynapseRequest) -> None:
"""Sends a JSON error response to clients."""
if f.check(SynapseError):
# mypy doesn't understand that f.check asserts the type.
exc: SynapseError = f.value # type: ignore
error_code = exc.code
error_dict = exc.error_dict()
logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg)
else:
error_code = 500
error_dict = {"error": "Internal server error", "errcode": Codes.UNKNOWN}
logger.error(
"Failed handle request via %r: %r",
request.request_metrics.name,
request,
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
)
# Only respond with an error response if we haven't already started writing,
# otherwise lets just kill the connection
if request.startedWriting:
if request.transport:
try:
request.transport.abortConnection()
except Exception:
# abortConnection throws if the connection is already closed
pass
else:
respond_with_json(
request,
error_code,
error_dict,
send_cors=True,
)
def return_html_error(
f: failure.Failure,
request: Request,
error_template: Union[str, jinja2.Template],
) -> None:
"""Sends an HTML error page corresponding to the given failure.
Handles RedirectException and other CodeMessageExceptions (such as SynapseError)
Args:
f: the error to report
request: the failing request
error_template: the HTML template. Can be either a string (with `{code}`,
`{msg}` placeholders), or a jinja2 template
"""
if f.check(CodeMessageException):
# mypy doesn't understand that f.check asserts the type.
cme: CodeMessageException = f.value # type: ignore
code = cme.code
msg = cme.msg
if isinstance(cme, RedirectException):
logger.info("%s redirect to %s", request, cme.location)
request.setHeader(b"location", cme.location)
request.cookies.extend(cme.cookies)
elif isinstance(cme, SynapseError):
logger.info("%s SynapseError: %s - %s", request, code, msg)
else:
logger.error(
"Failed handle request %r",
request,
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
)
else:
code = HTTPStatus.INTERNAL_SERVER_ERROR
msg = "Internal server error"
logger.error(
"Failed handle request %r",
request,
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
)
if isinstance(error_template, str):
body = error_template.format(code=code, msg=html.escape(msg))
else:
body = error_template.render(code=code, msg=msg)
respond_with_html(request, code, body)
def wrap_async_request_handler(h):
"""Wraps an async request handler so that it calls request.processing.
This helps ensure that work done by the request handler after the request is completed
is correctly recorded against the request metrics/logs.
The handler method must have a signature of "handle_foo(self, request)",
where "request" must be a SynapseRequest.
The handler may return a deferred, in which case the completion of the request isn't
logged until the deferred completes.
"""
async def wrapped_async_request_handler(self, request):
with request.processing():
await h(self, request)
# we need to preserve_fn here, because the synchronous render method won't yield for
# us (obviously)
return preserve_fn(wrapped_async_request_handler)
# Type of a callback method for processing requests
# it is actually called with a SynapseRequest and a kwargs dict for the params,
# but I can't figure out how to represent that.
ServletCallback = Callable[
..., Union[None, Awaitable[None], Tuple[int, Any], Awaitable[Tuple[int, Any]]]
]
class HttpServer(Protocol):
"""Interface for registering callbacks on a HTTP server"""
def register_paths(
self,
method: str,
path_patterns: Iterable[Pattern],
callback: ServletCallback,
servlet_classname: str,
) -> None:
"""Register a callback that gets fired if we receive a http request
with the given method for a path that matches the given regex.
If the regex contains groups these gets passed to the callback via
an unpacked tuple.
Args:
method: The HTTP method to listen to.
path_patterns: The regex used to match requests.
callback: The function to fire if we receive a matched
request. The first argument will be the request object and
subsequent arguments will be any matched groups from the regex.
This should return either tuple of (code, response), or None.
servlet_classname (str): The name of the handler to be used in prometheus
and opentracing logs.
"""
pass
class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
"""Base class for resources that have async handlers.
Sub classes can either implement `_async_render_<METHOD>` to handle
requests by method, or override `_async_render` to handle all requests.
Args:
extract_context: Whether to attempt to extract the opentracing
context from the request the servlet is handling.
"""
def __init__(self, extract_context=False):
super().__init__()
self._extract_context = extract_context
def render(self, request):
"""This gets called by twisted every time someone sends us a request."""
defer.ensureDeferred(self._async_render_wrapper(request))
return NOT_DONE_YET
@wrap_async_request_handler
async def _async_render_wrapper(self, request: SynapseRequest):
"""This is a wrapper that delegates to `_async_render` and handles
exceptions, return values, metrics, etc.
"""
try:
request.request_metrics.name = self.__class__.__name__
with trace_servlet(request, self._extract_context):
callback_return = await self._async_render(request)
if callback_return is not None:
code, response = callback_return
self._send_response(request, code, response)
except Exception:
# failure.Failure() fishes the original Failure out
# of our stack, and thus gives us a sensible stack
# trace.
f = failure.Failure()
self._send_error_response(f, request)
async def _async_render(self, request: Request):
"""Delegates to `_async_render_<METHOD>` methods, or returns a 400 if
no appropriate method exists. Can be overridden in sub classes for
different routing.
"""
# Treat HEAD requests as GET requests.
request_method = request.method.decode("ascii")
if request_method == "HEAD":
request_method = "GET"
method_handler = getattr(self, "_async_render_%s" % (request_method,), None)
if method_handler:
raw_callback_return = method_handler(request)
# Is it synchronous? We'll allow this for now.
if isawaitable(raw_callback_return):
callback_return = await raw_callback_return
else:
callback_return = raw_callback_return # type: ignore
return callback_return
_unrecognised_request_handler(request)
@abc.abstractmethod
def _send_response(
self,
request: SynapseRequest,
code: int,
response_object: Any,
) -> None:
raise NotImplementedError()
@abc.abstractmethod
def _send_error_response(
self,
f: failure.Failure,
request: SynapseRequest,
) -> None:
raise NotImplementedError()
class DirectServeJsonResource(_AsyncResource):
"""A resource that will call `self._async_on_<METHOD>` on new requests,
formatting responses and errors as JSON.
"""
def __init__(self, canonical_json=False, extract_context=False):
super().__init__(extract_context)
self.canonical_json = canonical_json
def _send_response(
self,
request: SynapseRequest,
code: int,
response_object: Any,
):
"""Implements _AsyncResource._send_response"""
# TODO: Only enable CORS for the requests that need it.
respond_with_json(
request,
code,
response_object,
send_cors=True,
canonical_json=self.canonical_json,
)
def _send_error_response(
self,
f: failure.Failure,
request: SynapseRequest,
) -> None:
"""Implements _AsyncResource._send_error_response"""
return_json_error(f, request)
class JsonResource(DirectServeJsonResource):
"""This implements the HttpServer interface and provides JSON support for
Resources.
Register callbacks via register_paths()
Callbacks can return a tuple of status code and a dict in which case the
the dict will automatically be sent to the client as a JSON object.
The JsonResource is primarily intended for returning JSON, but callbacks
may send something other than JSON, they may do so by using the methods
on the request object and instead returning None.
"""
isLeaf = True
_PathEntry = collections.namedtuple(
"_PathEntry", ["pattern", "callback", "servlet_classname"]
)
def __init__(self, hs, canonical_json=True, extract_context=False):
super().__init__(canonical_json, extract_context)
self.clock = hs.get_clock()
self.path_regexs = {}
self.hs = hs
def register_paths(self, method, path_patterns, callback, servlet_classname):
"""
Registers a request handler against a regular expression. Later request URLs are
checked against these regular expressions in order to identify an appropriate
handler for that request.
Args:
method (str): GET, POST etc
path_patterns (Iterable[str]): A list of regular expressions to which
the request URLs are compared.
callback (function): The handler for the request. Usually a Servlet
servlet_classname (str): The name of the handler to be used in prometheus
and opentracing logs.
"""
method = method.encode("utf-8") # method is bytes on py3
for path_pattern in path_patterns:
logger.debug("Registering for %s %s", method, path_pattern.pattern)
self.path_regexs.setdefault(method, []).append(
self._PathEntry(path_pattern, callback, servlet_classname)
)
def _get_handler_for_request(
self, request: SynapseRequest
) -> Tuple[ServletCallback, str, Dict[str, str]]:
"""Finds a callback method to handle the given request.
Returns:
A tuple of the callback to use, the name of the servlet, and the
key word arguments to pass to the callback
"""
# At this point the path must be bytes.
request_path_bytes: bytes = request.path # type: ignore
request_path = request_path_bytes.decode("ascii")
# Treat HEAD requests as GET requests.
request_method = request.method
if request_method == b"HEAD":
request_method = b"GET"
# Loop through all the registered callbacks to check if the method
# and path regex match
for path_entry in self.path_regexs.get(request_method, []):
m = path_entry.pattern.match(request_path)
if m:
# We found a match!
return path_entry.callback, path_entry.servlet_classname, m.groupdict()
# Huh. No one wanted to handle that? Fiiiiiine. Send 400.
return _unrecognised_request_handler, "unrecognised_request_handler", {}
async def _async_render(self, request):
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
# Make sure we have an appropriate name for this handler in prometheus
# (rather than the default of JsonResource).
request.request_metrics.name = servlet_classname
# Now trigger the callback. If it returns a response, we send it
# here. If it throws an exception, that is handled by the wrapper
# installed by @request_handler.
kwargs = intern_dict(
{
name: urllib.parse.unquote(value) if value else value
for name, value in group_dict.items()
}
)
raw_callback_return = callback(request, **kwargs)
# Is it synchronous? We'll allow this for now.
if isinstance(raw_callback_return, (defer.Deferred, types.CoroutineType)):
callback_return = await raw_callback_return
else:
callback_return = raw_callback_return # type: ignore
return callback_return
class DirectServeHtmlResource(_AsyncResource):
"""A resource that will call `self._async_on_<METHOD>` on new requests,
formatting responses and errors as HTML.
"""
# The error template to use for this resource
ERROR_TEMPLATE = HTML_ERROR_TEMPLATE
def _send_response(
self,
request: SynapseRequest,
code: int,
response_object: Any,
):
"""Implements _AsyncResource._send_response"""
# We expect to get bytes for us to write
assert isinstance(response_object, bytes)
html_bytes = response_object
respond_with_html_bytes(request, 200, html_bytes)
def _send_error_response(
self,
f: failure.Failure,
request: SynapseRequest,
) -> None:
"""Implements _AsyncResource._send_error_response"""
return_html_error(f, request, self.ERROR_TEMPLATE)
class StaticResource(File):
"""
A resource that represents a plain non-interpreted file or directory.
Differs from the File resource by adding clickjacking protection.
"""
def render_GET(self, request: Request):
set_clickjacking_protection_headers(request)
return super().render_GET(request)
def _unrecognised_request_handler(request):
"""Request handler for unrecognised requests
This is a request handler suitable for return from
_get_handler_for_request. It actually just raises an
UnrecognizedRequestError.
Args:
request (twisted.web.http.Request):
"""
raise UnrecognizedRequestError()
class RootRedirect(resource.Resource):
"""Redirects the root '/' path to another path."""
def __init__(self, path):
resource.Resource.__init__(self)
self.url = path
def render_GET(self, request):
return redirectTo(self.url.encode("ascii"), request)
def getChild(self, name, request):
if len(name) == 0:
return self # select ourselves as the child to render
return resource.Resource.getChild(self, name, request)
class OptionsResource(resource.Resource):
"""Responds to OPTION requests for itself and all children."""
def render_OPTIONS(self, request):
request.setResponseCode(204)
request.setHeader(b"Content-Length", b"0")
set_cors_headers(request)
return b""
def getChildWithDefault(self, path, request):
if request.method == b"OPTIONS":
return self # select ourselves as the child to render
return resource.Resource.getChildWithDefault(self, path, request)
class RootOptionsRedirectResource(OptionsResource, RootRedirect):
pass
@implementer(interfaces.IPushProducer)
class _ByteProducer:
"""
Iteratively write bytes to the request.
"""
# The minimum number of bytes for each chunk. Note that the last chunk will
# usually be smaller than this.
min_chunk_size = 1024
def __init__(
self,
request: Request,
iterator: Iterator[bytes],
):
self._request: Optional[Request] = request
self._iterator = iterator
self._paused = False
# Register the producer and start producing data.
self._request.registerProducer(self, True)
self.resumeProducing()
def _send_data(self, data: List[bytes]) -> None:
"""
Send a list of bytes as a chunk of a response.
"""
if not data or not self._request:
return
self._request.write(b"".join(data))
def pauseProducing(self) -> None:
self._paused = True
def resumeProducing(self) -> None:
# We've stopped producing in the meantime (note that this might be
# re-entrant after calling write).
if not self._request:
return
self._paused = False
# Write until there's backpressure telling us to stop.
while not self._paused:
# Get the next chunk and write it to the request.
#
# The output of the JSON encoder is buffered and coalesced until
# min_chunk_size is reached. This is because JSON encoders produce
# very small output per iteration and the Request object converts
# each call to write() to a separate chunk. Without this there would
# be an explosion in bytes written (e.g. b"{" becoming "1\r\n{\r\n").
#
# Note that buffer stores a list of bytes (instead of appending to
# bytes) to hopefully avoid many allocations.
buffer = []
buffered_bytes = 0
while buffered_bytes < self.min_chunk_size:
try:
data = next(self._iterator)
buffer.append(data)
buffered_bytes += len(data)
except StopIteration:
# The entire JSON object has been serialized, write any
# remaining data, finalize the producer and the request, and
# clean-up any references.
self._send_data(buffer)
self._request.unregisterProducer()
self._request.finish()
self.stopProducing()
return
self._send_data(buffer)
def stopProducing(self) -> None:
# Clear a circular reference.
self._request = None
def _encode_json_bytes(json_object: Any) -> bytes:
"""
Encode an object into JSON. Returns an iterator of bytes.
"""
return json_encoder.encode(json_object).encode("utf-8")
def respond_with_json(
request: SynapseRequest,
code: int,
json_object: Any,
send_cors: bool = False,
canonical_json: bool = True,
):
"""Sends encoded JSON in response to the given request.
Args:
request: The http request to respond to.
code: The HTTP response code.
json_object: The object to serialize to JSON.
send_cors: Whether to send Cross-Origin Resource Sharing headers
https://fetch.spec.whatwg.org/#http-cors-protocol
canonical_json: Whether to use the canonicaljson algorithm when encoding
the JSON bytes.
Returns:
twisted.web.server.NOT_DONE_YET if the request is still active.
"""
# could alternatively use request.notifyFinish() and flip a flag when
# the Deferred fires, but since the flag is RIGHT THERE it seems like
# a waste.
if request._disconnected:
logger.warning(
"Not sending response to request %s, already disconnected.", request
)
return None
if canonical_json:
encoder = encode_canonical_json
else:
encoder = _encode_json_bytes
request.setResponseCode(code)
request.setHeader(b"Content-Type", b"application/json")
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
if send_cors:
set_cors_headers(request)
run_in_background(
_async_write_json_to_request_in_thread, request, encoder, json_object
)
return NOT_DONE_YET
def respond_with_json_bytes(
request: Request,
code: int,
json_bytes: bytes,
send_cors: bool = False,
):
"""Sends encoded JSON in response to the given request.
Args:
request: The http request to respond to.
code: The HTTP response code.
json_bytes: The json bytes to use as the response body.
send_cors: Whether to send Cross-Origin Resource Sharing headers
https://fetch.spec.whatwg.org/#http-cors-protocol
Returns:
twisted.web.server.NOT_DONE_YET if the request is still active.
"""
if request._disconnected:
logger.warning(
"Not sending response to request %s, already disconnected.", request
)
return
request.setResponseCode(code)
request.setHeader(b"Content-Type", b"application/json")
request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
if send_cors:
set_cors_headers(request)
_write_bytes_to_request(request, json_bytes)
return NOT_DONE_YET
async def _async_write_json_to_request_in_thread(
request: SynapseRequest,
json_encoder: Callable[[Any], bytes],
json_object: Any,
):
"""Encodes the given JSON object on a thread and then writes it to the
request.
This is done so that encoding large JSON objects doesn't block the reactor
thread.
Note: We don't use JsonEncoder.iterencode here as that falls back to the
Python implementation (rather than the C backend), which is *much* more
expensive.
"""
json_str = await defer_to_thread(request.reactor, json_encoder, json_object)
_write_bytes_to_request(request, json_str)
def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
"""Writes the bytes to the request using an appropriate producer.
Note: This should be used instead of `Request.write` to correctly handle
large response bodies.
"""
# The problem with dumping all of the response into the `Request` object at
# once (via `Request.write`) is that doing so starts the timeout for the
# next request to be received: so if it takes longer than 60s to stream back
# the response to the client, the client never gets it.
#
# The correct solution is to use a Producer; then the timeout is only
# started once all of the content is sent over the TCP connection.
# To make sure we don't write all of the bytes at once we split it up into
# chunks.
chunk_size = 4096
bytes_generator = chunk_seq(bytes_to_write, chunk_size)
# We use a `_ByteProducer` here rather than `NoRangeStaticProducer` as the
# unit tests can't cope with being given a pull producer.
_ByteProducer(request, bytes_generator)
def set_cors_headers(request: Request):
"""Set the CORS headers so that javascript running in a web browsers can
use this API
Args:
request: The http request to add CORS to.
"""
request.setHeader(b"Access-Control-Allow-Origin", b"*")
request.setHeader(
b"Access-Control-Allow-Methods", b"GET, HEAD, POST, PUT, DELETE, OPTIONS"
)
request.setHeader(
b"Access-Control-Allow-Headers",
b"X-Requested-With, Content-Type, Authorization, Date",
)
def respond_with_html(request: Request, code: int, html: str):
"""
Wraps `respond_with_html_bytes` by first encoding HTML from a str to UTF-8 bytes.
"""
respond_with_html_bytes(request, code, html.encode("utf-8"))
def respond_with_html_bytes(request: Request, code: int, html_bytes: bytes):
"""
Sends HTML (encoded as UTF-8 bytes) as the response to the given request.
Note that this adds clickjacking protection headers and finishes the request.
Args:
request: The http request to respond to.
code: The HTTP response code.
html_bytes: The HTML bytes to use as the response body.
"""
# could alternatively use request.notifyFinish() and flip a flag when
# the Deferred fires, but since the flag is RIGHT THERE it seems like
# a waste.
if request._disconnected:
logger.warning(
"Not sending response to request %s, already disconnected.", request
)
return
request.setResponseCode(code)
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),))
# Ensure this content cannot be embedded.
set_clickjacking_protection_headers(request)
request.write(html_bytes)
finish_request(request)
def set_clickjacking_protection_headers(request: Request):
"""
Set headers to guard against clickjacking of embedded content.
This sets the X-Frame-Options and Content-Security-Policy headers which instructs
browsers to not allow the HTML of the response to be embedded onto another
page.
Args:
request: The http request to add the headers to.
"""
request.setHeader(b"X-Frame-Options", b"DENY")
request.setHeader(b"Content-Security-Policy", b"frame-ancestors 'none';")
def respond_with_redirect(request: Request, url: bytes) -> None:
"""Write a 302 response to the request, if it is still alive."""
logger.debug("Redirect to %s", url.decode("utf-8"))
request.redirect(url)
finish_request(request)
def finish_request(request: Request):
"""Finish writing the response to the request.
Twisted throws a RuntimeException if the connection closed before the
response was written but doesn't provide a convenient or reliable way to
determine if the connection was closed. So we catch and log the RuntimeException
You might think that ``request.notifyFinish`` could be used to tell if the
request was finished. However the deferred it returns won't fire if the
connection was already closed, meaning we'd have to have called the method
right at the start of the request. By the time we want to write the response
it will already be too late.
"""
try:
request.finish()
except RuntimeError as e:
logger.info("Connection disconnected before response was written: %r", e)
|
the-stack_0_12556 | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
out_path):
import os
from genomicode import filelib
from genomicode import parallel
from genomicode import alignlib
from Betsy import module_utils
bam_node, ref_node, target_node = antecedents
bam_filenames = module_utils.find_bam_files(bam_node.identifier)
assert bam_filenames, "No .bam files."
target_filenames = filelib.list_files_in_path(
target_node.identifier, endswith=".intervals")
assert target_filenames, "No .intervals files."
ref = alignlib.create_reference_genome(ref_node.identifier)
filelib.safe_mkdir(out_path)
assert len(bam_filenames) == len(target_filenames), \
"Should have an .intervals file for each bam file."
sample2bamfilename = {}
for filename in bam_filenames:
p, f = os.path.split(filename)
sample, ext = os.path.splitext(f)
assert sample not in sample2bamfilename
sample2bamfilename[sample] = filename
sample2targetfilename = {}
for filename in target_filenames:
p, f = os.path.split(filename)
sample, ext = os.path.splitext(f)
assert sample not in sample2targetfilename
sample2targetfilename[sample] = filename
assert len(sample2bamfilename) == len(sample2targetfilename)
missing = [
x for x in sample2bamfilename if x not in sample2targetfilename]
assert not missing, "Missing interval files for %d bam files." % \
len(missing)
# list of (bam_filename, target_filename, log_filename, out_filename)
jobs = []
for sample in sample2bamfilename:
bam_filename = sample2bamfilename[sample]
target_filename = sample2targetfilename[sample]
p, f = os.path.split(bam_filename)
sample, ext = os.path.splitext(f)
out_filename = os.path.join(out_path, "%s.bam" % sample)
log_filename = os.path.join(out_path, "%s.log" % sample)
x = bam_filename, target_filename, log_filename, out_filename
jobs.append(x)
known_sites = []
x1 = module_utils.get_user_option(
user_options, "realign_known_sites1", check_file=True)
x2 = module_utils.get_user_option(
user_options, "realign_known_sites2", check_file=True)
x3 = module_utils.get_user_option(
user_options, "realign_known_sites3", check_file=True)
x = [x1, x2, x3]
x = [x for x in x if x]
known_sites = x
assert known_sites
# java -Xmx5g -jar /usr/local/bin/GATK/GenomeAnalysisTK.jar \
# -T IndelRealigner -R <ref.fa> \
# -I <bam_file> -targetIntervals <target_file> -o <bam_file>
# Make a list of commands.
commands = []
for x in jobs:
bam_filename, target_filename, log_filename, out_filename = x
x = [("known", x) for x in known_sites]
x = alignlib.make_GATK_command(
T="IndelRealigner", R=ref.fasta_file_full,
I=bam_filename, targetIntervals=target_filename,
o=out_filename, _UNHASHABLE=x)
x = "%s >& %s" % (x, log_filename)
commands.append(x)
#for x in commands:
# print x
#import sys; sys.exit(0)
parallel.pshell(commands, max_procs=num_cores)
# Make sure the analysis completed successfully.
out_filenames = [x[-1] for x in jobs]
filelib.assert_exists_nz_many(out_filenames)
def name_outfile(self, antecedents, user_options):
return "realigned.bam"
|
the-stack_0_12557 | from censys_ml.field_transforms import utils
def encode_certificate_fields(input_field, output_field):
lines = []
parsed_versions = [1, 2]
sha_strs = ["SHA1"]
validity_lengths = ["days", "weeks", "months"]
# 8192 has been added to the options after i saw some records on censys that have it
rsa_lengths = [512, 768, 1024, 2048, 3072, 4096]
# 8192 has been considered but left out of the options after i saw some records on censys that have it
dh_prime_lengths = [512, 768, 1024, 2048, 3072, 4096]
# 521 has been left out
ecdsa_lengths = [256, 384]
# common exponents, 65537 is left out
rsa_exponents = [2, 3, 5, 17, 257]
if 'rsa_public_key.length' in input_field or 'rsa_params.length' in input_field or 'dh_params.prime.length' in input_field:
lines.extend(utils.one_hot_encode(input_field, output_field, rsa_lengths, "encode_key_length", "is_"))
lines.extend(utils.one_hot_encode(input_field, output_field, sha_strs, "encode_SHA_support", "supports_"))
elif 'dh_params.prime.length' in input_field:
lines.extend(utils.one_hot_encode(input_field, output_field, dh_prime_lengths, "encode_key_length", "is_"))
elif 'parsed.version' in input_field:
lines.extend(utils.one_hot_encode(input_field, output_field, parsed_versions, "encode_certificate_version"))
elif 'ecdsa_public_key.length' in input_field:
lines.extend(utils.one_hot_encode(input_field, output_field, ecdsa_lengths, "encode_key_length", 'is_'))
elif 'rsa_public_key.exponent' in input_field or 'rsa_params.exponent' in input_field:
lines.extend(utils.one_hot_encode(input_field, output_field, rsa_exponents, "encode_RSA_exponent", "is_"))
elif 'validity.length' in input_field:
lines.extend(utils.one_hot_encode(input_field, output_field, validity_lengths, "encode_validity_length", "in_"))
elif 'basic_constraints.max_path_len' in input_field:
lines.extend(utils.generate_check_lines(input_field, output_field[:-13], 'has_no_sub_CA', 'has_no_subordinate_CA'))
return lines
def encode_service_version_fields(input_field, output_field):
lines = []
# smb major version groups 3 has been left out
smb_versions = [1, 2]
# 1 is left out
amqp_versions = [0]
# rdp major versions, 10 is left out
rdp_versions = [1, 2, 3, 4, 5, 6, 7, 8, 9]
if 'smb_version.major' in input_field:
lines.extend(utils.one_hot_encode(input_field, output_field, smb_versions, "encode_version_major", "is_"))
lines.extend(utils.generate_relating_lines(input_field, output_field[:-6], 'version_number', 'version'))
elif 'amqp.banner.version.major' in input_field:
lines.extend(utils.one_hot_encode(input_field, output_field, amqp_versions, "encode_version_major", "is_"))
lines.extend(utils.generate_relating_lines(input_field, output_field[:-6], 'version_number', 'version'))
elif 'rdp.banner.version.major' in input_field:
lines.extend(utils.one_hot_encode(input_field, output_field, rdp_versions, "encode_version_major", "is_"))
lines.extend(utils.generate_relating_lines(input_field, output_field[:-6], 'version_number', 'version'))
elif 'bacnet.device_id.vendor.id' in input_field:
lines.extend(utils.generate_check_lines(input_field, output_field, 'is_ashrae', 'is_ashrae'))
return lines
def encode_http_fields(input_field, output_field):
lines = []
status_code_strs = ['success', 'redirection', 'client_error', 'server_error']
# same one_hot_encode function was used but functionality is different
session_time_ranges = ["minutes", "hours", "days"]
if 'session_ticket.lifetime_hint' in input_field:
lines.extend(utils.one_hot_encode(input_field, output_field, session_time_ranges, "encode_validity_length", "in_"))
elif 'status_code' in input_field:
lines.extend(utils.one_hot_encode(input_field, output_field, status_code_strs, "encode_status_code", "is_"))
return lines
def transform_common_fields(input_field, output_field):
lines = []
lines.extend(encode_certificate_fields(input_field, output_field))
lines.extend(encode_service_version_fields(input_field, output_field))
lines.extend(encode_http_fields(input_field, output_field))
return lines
def generate_numeric_lines(input_field, output_field, field_data=None):
lines = []
lines.extend(transform_common_fields(input_field, output_field))
if input_field[0] == 'p':
lines.extend(transform_common_fields(input_field[1:], output_field[1:]))
lines.extend(utils.handle_general_case(input_field=input_field,
output_field=output_field,
line_generator=utils.generate_numeric_line))
return lines
def main():
lines = []
strings = [
'p993.imaps.tls.tls.certificate.parsed.version',
'p993.imaps.tls.tls.chain.parsed.subject_key_info.rsa_public_key.length',
'p80.http.open_proxy.connect.status_code',
'p25.smtp.starttls.tls.certificate.parsed.subject_key_info.rsa_public_key.exponent',
'p443.https.tls.chain.parsed.extensions.basic_constraints.max_path_len',
'p445.smb.banner.smb_version.major',
'p5672.amqp.banner.version.major',
'p3389.rdp.banner.version.major',
'p47808.bacnet.device_id.vendor.id',
'p443.https.tls.server_key_exchange.rsa_params.length',
'p443.https.tls.server_key_exchange.dh_params.prime.length',
'p110.pop3.ssl_2.certificate.parsed.subject_key_info.ecdsa_public_key.length',
'p110.pop3.starttls.tls.session_ticket.lifetime_hint',
'p1433.mssql.banner.tls.certificate.parsed.validity.length',
'p1433.mssql.banner.tls.server_key_exchange.rsa_params.exponent'
]
for string in strings:
output_field = string.replace('.', '__')
lines.extend(generate_numeric_lines(string, output_field))
print('\n'.join(lines))
if __name__ == '__main__':
main()
|
the-stack_0_12561 | #! /usr/bin/env python
# -*-coding: utf-8 -*-
__author__ = 'dracarysX'
from collections import deque
from peewee import Expression, ForeignKeyField
from rest_query.operator import Operator, operator_list
from rest_query.query import QueryBuilder
from rest_query.parser import BaseParamsParser, cache_property
from rest_query.models import ModelExtra
from rest_query.serializer import BaseSerializer
class PeeweeModelExtraMixin(ModelExtra):
"""
peewee model mixin
"""
all_field = '*'
def push_join_model(self, field, model):
if model not in self.join_model:
self.join_model[model] = (field == model.id)
def field_by_model(self, model, field_name):
if field_name == self.all_field:
return model
return model._meta.fields[field_name]
def is_field_exist(self, model, field_name):
if field_name == self.all_field:
return True
return field_name in model._meta.fields
def foreign_model(self, field):
return field.rel_model
class PeeweeOperator(Operator):
"""
operator for peewee orm
:field: field must be peewee Field instance
"""
def __init__(self, field, value):
super(PeeweeOperator, self).__init__(field, value)
def __getattribute__(self, value, *args, **kwargs):
if value in operator_list or value == 'iin':
result = super(PeeweeOperator, self).__getattribute__(value, *args, **kwargs)()
def _(*args, **kwargs):
return Expression(result['field'], result['op'], result['value'])
return _
return super(PeeweeOperator, self).__getattribute__(value, *args, **kwargs)
def between(self):
low, high = self._split_value()
from peewee import Clause, R
return self.format('between', value=Clause(low, R('AND'), high))
class PeeweeParamsParser(PeeweeModelExtraMixin, BaseParamsParser):
operator_engine = PeeweeOperator
def __init__(self, params_args, model=None, **kwargs):
super(PeeweeParamsParser, self).__init__(params_args, **kwargs)
self.model = model
self.foreign_key = ForeignKeyField
def parse_select(self):
selects = super(PeeweeParamsParser, self).parse_select()
self.select_list = list(filter(self.check_field_exist, selects))
return [self.get_field(select) for select in self.select_list]
def split_where(self):
_wheres = []
for field, values in self.where_args.items():
try:
_value = values.split('.')
operator, value = _value[0], '.'.join(_value[1:])
except AttributeError:
operator, value = '=', values
if self.check_field_exist(field):
if operator not in self.operator_list:
_wheres.append(self.operator_engine(self.get_field(field), values).eq())
else:
_wheres.append(
getattr(self.operator_engine(self.get_field(field), value), operator)()
)
return _wheres
def parse_order(self):
orders = super(PeeweeParamsParser, self).parse_order()
_order = []
for order in orders:
for k, v in order.items():
if self.check_field_exist(k):
_order.append(getattr(self.get_field(k), v)())
return _order
def parse_paginate(self):
paginate = super(PeeweeParamsParser, self).parse_paginate()
return (paginate['page'], paginate['limit'])
class PeeweeQueryBuilder(QueryBuilder):
"""
query builder for peewee orm
"""
parser_engine = PeeweeParamsParser
def __init__(self, model, params, **kwargs):
super(PeeweeQueryBuilder, self).__init__(model, params, **kwargs)
def build(self):
query = self.model.select(*self.select)
if self.where:
query = query.where(*self.where)
if self.order:
query = query.order_by(*self.order)
for model, condition in self.parser.join_model.items():
query = query.join(model, on=condition)
return query.paginate(*self.paginate)
class PeeweeSerializer(BaseSerializer):
"""
serializer for peewee object instance.
>>> serializer = PeeweeSerializer(obj=book, select_args=['id', 'name', 'author.id', 'author.name'])
>>> serializer.data()
{
'id': xxx,
'name': 'xxx',
'author': {
'id': xxx,
'name': 'xxx'
}
}
>>> serializer = PeeweeSerializer(object_list=book_list, select_args=['id', 'name', 'author.id', 'author.name'])
>>> serializer.data()
[
{
'id': xxx,
'name': 'xxx',
'author': {
'id': xxx,
'name': 'xxx'
}
},
{
'id': xxx,
'name': 'xxx',
'author': {
'id': xxx,
'name': 'xxx'
}
}
]
"""
def _obj_update(self, o1, o2):
"""
o1 update o2, if key in o1 not override
"""
for key, value in o2.items():
if key not in o1 or (key in o1 and isinstance(o1[key], int)):
o1[key] = value
return o1
def obj_serializer(self, obj):
return {k: getattr(obj, k if not isinstance(v, ForeignKeyField) else '%s_id' % k)
for k, v in obj.__class__._meta.fields.items()}
def _getattr(self, obj, field):
value = getattr(obj, field)
if hasattr(value, 'DoesNotExist'):
return getattr(obj, '{}_id'.format(field))
return value
def serializer(self, obj):
if not self.select_args:
return self.obj_serializer(obj)
data = {}
def _serializer(_data, select, obj):
args = select.split('.')
if len(args) == 1:
if select == '*':
_data = self._obj_update(_data, self.obj_serializer(obj))
# _data.update(self.obj_serializer(obj))
else:
if select not in _data:
_data[select] = self._getattr(obj, select)
else:
if not isinstance(_data[select], dict):
_data[select] = self._getattr(obj, select)
else:
prefix = args[0]
if prefix not in _data:
_data[prefix] = {}
_serializer(_data[prefix], '.'.join(args[1:]), getattr(obj, prefix))
for i in self.select_args:
_serializer(data, i, obj)
return data
# def serializer(self, obj):
# """
# single obj serializer.
# """
# if self.select_args is None:
# return self.obj_serializer(obj)
# d = {}
# def _get_instance(_obj, _key):
# """
# >>> _get_instance(book, 'author.school')
# author
# """
# k = _key.split('.')
# k.reverse()
# instance = _obj
# while len(k) > 0:
# v = k.pop()
# if instance is not None and v in instance._meta.fields:
# instance = getattr(instance, v)
# else:
# instance = None
# return instance
# def _serializer(args):
# for arg in args:
# if not isinstance(arg, list):
# v = arg.rsplit('.', 1)
# if len(v) == 1:
# if v[0] in obj._meta.fields:
# if isinstance(obj._meta.fields[v[0]], ForeignKeyField):
# d[v[0]] = getattr(obj, '{}_id'.format(v[0]))
# else:
# d[v[0]] = getattr(obj, v[0])
# else:
# if v[1] == '*':
# set_dict(d, v[0], self.obj_serializer(_get_instance(obj, v[0])))
# else:
# _model = _get_instance(obj, v[0])
# if v[1] in _model._meta.fields:
# set_dict(d, arg, getattr(_model, v[1]))
# else:
# _serializer(arg)
# if len(self.select_args) == 0:
# return self.obj_serializer(obj)
# _serializer(self.select_args)
# return d
|
the-stack_0_12564 | from . import *
class KernelVersionCommand(Command):
name = "kernel_version"
shell = True
command = "/bin/sh -c 'printf \"`uname -a`\\n`uname -v`\"'"
desc = "analyze changes in kernel version"
def parse(output):
lines = output.splitlines()
if len(lines) != 2:
return None
kernel_version = lines[1]
line = lines[0].split(kernel_version)[0].split()
kernel_name, hostname, kernel_release = line
return (kernel_name, hostname, kernel_release, kernel_version)
def compare(prev, cur):
ret = []
if not prev:
prev = cur
# a hostname change is something for which we want to see a warning
if prev[1] != cur[1]:
ret.append(W("hostname changed from %s to %s" % (prev[1], cur[1])))
else:
ret.append(D("kernel version check (hostname) yields %s" % cur[1]))
# count changes and if we found anything which changed in the
# kernel's name, version or release information the kernel got
# updated so output a warning too then.
c = 0
if prev[0] != cur[0]:
ret.append(C("kernel name changed from %s to %s" % (prev[0], cur[0])))
c = c + 1
else:
ret.append(D("kernel version check (kernel name) yields %s" % cur[0]))
if prev[2] != cur[2]:
ret.append(C("kernel release changed from %s to %s" % (prev[2], cur[2])))
c = c + 1
else:
ret.append(D("kernel version check (kernel release) yields %s" % cur[2]))
if prev[3] != cur[3]:
ret.append(C("kernel version changed from %s to %s" % (prev[3], cur[3])))
c = c + 1
else:
ret.append(D("kernel version check (kernel version) yields %s" % cur[3]))
# if we see a count of > 0 it means something in the kernel has
# changed so output a warning
if c > 0:
ret.append(W("kernel seems to have changed from %s to %s" % (" ".join(prev), " ".join(cur))))
return ret
class LSBVersionCommand(Command):
name = "lsb_version"
shell = False
command = "/usr/bin/lsb_release -idcr"
desc = "analyze changes in Linux Standard Base release settings"
def parse(output):
lines = output.splitlines()
if len(lines) != 4:
return {}
ret = {}
for line in lines:
lf = line.strip().find(":")
prop = line[0:lf].strip()
val = line[lf+1:].strip()
ret[prop] = val
return ret
def compare(prev, cur):
anomalies = []
entries = merge_keys_to_list(prev, cur)
for entry in entries:
p = prev[entry] if entry in prev else ""
c = cur[entry] if entry in cur else ""
if entry not in ["Description", "Distributor ID", "Codename", "Release"]:
anomalies.append(W("unknown entry '%s' returned by lsb_release prev: '%s', cur: '%s'" % (entry, p, c)))
elif p == "":
anomalies.append(C("LSB '%s' added with value '%s'" % (entry, c)))
elif c == "":
anomalies.append(W("LSB '%s' removed somehow (had value '%s')" % (entry, p)))
elif p != c:
anomalies.append(C("LSB %s changed from '%s' to '%s'" % (entry, p, c)))
else:
anomalies.append(D("LSB %s = %s" % (entry, c)))
return anomalies
|
the-stack_0_12565 | import numpy as np
from logging import getLogger
import kit_timetable_rebuilder as kitrb
class TimeTable(object):
def __init__(self, image: np.ndarray, logger=None):
self.logger = logger or getLogger(__name__)
self.image = image
bin_img = kitrb.convert_to_bin(image)
# 枠線検出
verticals = kitrb.find_vertical_frames(bin_img, num_vertical=8)
sides = kitrb.find_side_frames(bin_img, num_side=6)
# 枠線に沿ってトリミング, Periodオブジェクトとして保持
periods = list()
for d in range(3):
days = list()
for p in range(6):
dst_img = image[
sides[d + 2]["y_seg"]:sides[d + 3]["y_seg"],
verticals[p + 1]["x_seg"]:verticals[p + 2]["x_seg"]
]
period = kitrb.Period(dst_img)
days.append(period)
periods.append(days)
|
the-stack_0_12566 | # Copyright (c) 2019 Andres Gomez Ramirez.
# All Rights Reserved.
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
from arhuaco.analysis.features.data_helpers\
import DataHelpers
from tqdm import tqdm
import numpy as np
import random
import sys
import string
import os
# This is the main class for RNN based generative models,
# That creates synthetic data based on previous examples.
class RnnGen:
def __init__(self, data_helpers, maxlen,
step, num_epochs, num_chars,
samples_per_epoch, weights_file,
model_file, generated_file
, number_generated):
# Parameters
self.maxlen = maxlen
self.step = step
self.num_epochs = num_epochs
self.num_chars = 0
self.model = None
self.data_helpers = data_helpers
self.data_generator = None
self.char_indices = None
self.indices_char = None
self.samples_per_epoch = samples_per_epoch
self.weights_file = weights_file
self.model_file = model_file
self.generated_file = generated_file
self.number_generated = number_generated
def get_data(self):
self.data_generator = self.data_helpers.generator_from_file(
self.data_helpers.data_source[1],
self.data_helpers.samples_per_batch)
# Initialize character set
chars = sorted(list(set(string.printable+"\n")))
print('total chars:', len(chars))
self.num_chars = len(chars)
self.char_indices = dict((c, i) for i, c in enumerate(chars))
self.indices_char = dict((i, c) for i, c in enumerate(chars))
def format_text(self, text):
# cut the text in semi-redundant
# sequences of maxlen characters
sentences = []
next_chars = []
for i in range(0, len(text) - self.maxlen, self.step):
sentences.append(text[i: i + self.maxlen])
next_chars.append(text[i + self.maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), self.maxlen,
self.num_chars), dtype=np.bool)
y = np.zeros((len(sentences), self.num_chars),
dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, self.char_indices[char]] = 1
y[i, self.char_indices[next_chars[i]]] = 1
return (X,y)
def build_model(self):
# build the model: a single LSTM
print('Build model...')
self.model = Sequential()
self.model.add(LSTM(128,
input_shape=(self.maxlen,
self.num_chars)))
self.model.add(Dense(self.num_chars))
self.model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
self.model.compile(loss='categorical_crossentropy',
optimizer=optimizer)
def sample(self, preds, temperature=1.0):
# helper function to sample an index
# from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def train_model(self):
# train the model, output generated text
# after each iteration
if os.path.exists(self.weights_file):
self.model.load_weights(self.weights_file)
print("Model loaded from disk.")
x_train = next(self.data_generator)
text = self.data_helpers.get_text_from_list(
x_train)
else:
for iteration in range(1):
x_train = next(self.data_generator)
text = self.data_helpers.get_text_from_list(
x_train)
print('total chars in text:', len(text))
X, y = self.format_text(text)
print('-' * 50)
print('Iteration', iteration)
self.model.fit(X, y,
batch_size=self.samples_per_epoch,
nb_epoch=self.num_epochs)
# Save model
print("dumping weights to file...")
# serialize model to JSON
model_json = self.model.to_json()
with open(self.model_file, "w") as json_file:
json_file.write(model_json)
self.model.save_weights(self.weights_file,
overwrite=True)
self.test_model(text)
def test_model(self, text):
# Generate new data
print("Size of text:"+str(len(text)))
for diversity in [0.2, 0.5, 1.0, 1.2]:
start_index = random.randint(0, len(text)\
- self.maxlen - 1)
with open(self.generated_file+"-"+str(diversity),
"a") as gen_file:
print()
print('----- diversity:', diversity)
# Create a seed for generating data
generated = ''
sentence = text[start_index: start_index + self.maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
for i in tqdm(range(self.number_generated)):
x = np.zeros((1, self.maxlen, self.num_chars))
for t, char in enumerate(sentence):
x[0, t, self.char_indices[char]] = 1.
preds = self.model.predict(x, verbose=0)[0]
next_index = self.sample(preds, diversity)
next_char = self.indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
gen_file.write(generated)
|
the-stack_0_12567 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import httplib
from flask import request, session, make_response
from flask_restful import Resource
from flask_restful_swagger import swagger
from cairis.daemon.CairisHTTPError import ObjectNotFoundHTTPError
from cairis.data.DimensionDAO import DimensionDAO
from cairis.tools.JsonConverter import json_serialize
__author__ = 'Robin Quetin, Shamal Faily'
class DimensionsAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get all dimensions of a specific table',
nickname='dimensions-table-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
},
{
"name": "constraint_id",
"description": "The ID of the constraint used when obtaining the data",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
},
{
"code": httplib.CONFLICT,
"message": "Database conflict"
}
]
)
# endregion
def get(self, table):
session_id = request.args.get('session_id', None)
id = request.args.get('constraint_id', -1)
dao = DimensionDAO(session_id)
dimension_names = dao.getDimensions(table,id)
dao.close()
resp = make_response(json_serialize(dimension_names, session_id=session_id), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
class DimensionNamesAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get all dimensions of a specific table in a specific environment',
nickname='dimensions-table-environment-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, table, environment):
session_id = request.args.get('session_id', None)
dao = DimensionDAO(session_id)
dimension_names = dao.getDimensionNames(table,environment)
dao.close()
resp = make_response(json_serialize(dimension_names, session_id=session_id), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
|
the-stack_0_12568 | from __future__ import division # Use floating point for math calculations
from CTFd.plugins import register_plugin_assets_directory
from CTFd.plugins.flags import get_flag_class
from CTFd.utils.user import get_current_user
from CTFd import utils
from CTFd.models import (
db,
Solves,
Fails,
Flags,
Challenges,
ChallengeFiles,
Tags,
Hints,
Users,
Notifications
)
from flask import render_template, request, jsonify, Blueprint, current_app
from CTFd.utils.user import get_ip
from CTFd.utils.uploads import delete_file
from CTFd.utils.decorators import admins_only, authed_only, during_ctf_time_only
from CTFd.utils.modes import get_model
from CTFd.utils import user as current_user
from .models import GlowwormChallenge, ADAChallenge, GlowwormContainers, GlowwormAttacks
from CTFd.plugins.challenges import CHALLENGE_CLASSES
from .db_utils import DBUtils
from .control_utils import ControlUtil
import datetime, fcntl
import logging, os, sys, uuid
from .extensions import get_mode
def load(app):
# upgrade()
app.db.create_all()
CHALLENGE_CLASSES["ada_challenge"] = GlowwormChallenge
register_plugin_assets_directory(
app, base_path="/plugins/ctfd_glowworm/assets/"
)
glowworm_blueprint = Blueprint(
"ctfd-glowworm",
__name__,
template_folder="templates",
static_folder="assets",
url_prefix="/plugins/ctfd-glowworm"
)
log_dir = app.config["LOG_FOLDER"]
logger_glowworm = logging.getLogger("glowworm")
logger_glowworm.setLevel(logging.INFO)
logs = {
"glowworm": os.path.join(log_dir, "glowworm.log"),
}
try:
for log in logs.values():
if not os.path.exists(log):
open(log, "a").close()
container_log = logging.handlers.RotatingFileHandler(
logs["glowworm"], maxBytes=10000
)
logger_glowworm.addHandler(container_log)
except IOError:
pass
stdout = logging.StreamHandler(stream=sys.stdout)
logger_glowworm.addHandler(stdout)
logger_glowworm.propagate = 0
@glowworm_blueprint.route("/flag", methods=['POST'])
# TODO: fix differfent time bug
# @during_ctf_time_only
def update_flag():
try:
req = request.get_json()
print(req)
key = GlowwormContainers.query.filter_by(docker_id=req['name']).first().key
if req['key'] != key:
return jsonify({'success': False})
else:
flag = uuid.uuid3(uuid.UUID(req['uuid']), req['name'] + req['time'] + key)
if DBUtils.update_flag(req['name'], 'flag{' + str(flag) + '}'):
return jsonify({'success': True})
else:
return jsonify({'success': False})
except Exception as e:
print(e)
return jsonify({'success': False})
@glowworm_blueprint.route("/challenge/<challenge_id>", methods=['GET'])
def get_targets(challenge_id):
try:
datas = {'success': True, 'data':[]}
containers = GlowwormContainers.query.filter_by(challenge_id=challenge_id).all()
print(challenge_id,containers)
for container in containers:
datas['data'].append({"target":"{}:{}".format(container.ip, container.service_port)})
datas['length'] = len(datas['data'])
return jsonify(datas)
except Exception as e:
print(e)
return jsonify({'success': False})
@glowworm_blueprint.route('/admin/settings', methods=['GET'])
@admins_only
# list plugin settings
def admin_list_configs():
configs = DBUtils.get_all_configs()
return render_template('glowworm_configs.html', configs=configs)
@glowworm_blueprint.route('/admin/settings', methods=['PATCH'])
@admins_only
# modify plugin settings
def admin_save_configs():
req = request.get_json()
DBUtils.save_all_configs(req.items())
return jsonify({'success': True})
@glowworm_blueprint.route("/admin/containers", methods=['GET'])
@admins_only
# list alive containers
def admin_list_containers():
configs = DBUtils.get_all_configs()
page = abs(request.args.get("page", 1, type=int))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
count = DBUtils.get_all_alive_container_count()
containers = DBUtils.get_all_alive_container_page(page_start, page_end)
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template("glowworm_containers.html", containers=containers, pages=pages, curr_page=page,
curr_page_start=page_start, configs=configs)
@glowworm_blueprint.route('/admin/containers', methods=['PATCH'])
@admins_only
def renew_admin_container():
user_id = request.args.get('user_id')
challenge_id = request.args.get('challenge_id')
if ControlUtil.frequency_limit():
return jsonify({'success': False, 'msg': 'Frequency limit, You should wait at least 1 min.'})
try:
ControlUtil.renew_container(user_id, challenge_id)
return jsonify({'success': True})
except Exception as e:
print(e)
return jsonify({'success': False})
@glowworm_blueprint.route("/admin/environments", methods=['GET'])
@admins_only
# list alive containers
def admin_list_environments():
configs = DBUtils.get_all_configs()
page = abs(request.args.get("page", 1, type=int))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
count = DBUtils.get_all_alive_environment_count()
environments = DBUtils.get_all_alive_environment_page(page_start, page_end)
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template("glowworm_environments.html", environments=environments, pages=pages, curr_page=page,
curr_page_start=page_start)
@glowworm_blueprint.route("/admin/init", methods=['PATCH'])
@admins_only
def admin_init_competitions():
try:
from .schedule import scheduler
start_time = int(utils.get_config("start"))
interval = DBUtils.get_all_configs().get("per_round")
interval = str(int(int(interval) / 60))
if ControlUtil.init_competition():
job = scheduler.add_job(id='time_base', func=ControlUtil.check_env, args=["init"], trigger='cron', minute="*/{}".format(interval))
# job = scheduler.add_job(id='time_base', func=ControlUtil.check_env, args=["init"], trigger='interval', seconds=5)
print(job)
return jsonify({'success': True})
else:
return jsonify({'success': False})
except Exception as e:
return jsonify({'success': False, 'msg': str(e)})
@glowworm_blueprint.route("/admin/remove", methods=['PATCH'])
@admins_only
def admin_remove_competitions():
if ControlUtil.remove_competition():
return jsonify({'success': True})
else:
return jsonify({'success': False})
@glowworm_blueprint.route("/live/attacks", methods=['GET', 'POST'])
@admins_only
def attacks():
if request.method == "GET":
attacks = GlowwormAttacks.query.order_by(GlowwormAttacks.time.desc()).all()
print(attacks)
return jsonify({'success': True})
elif request.method == "POST":
req = request.get_json()
print(req)
return jsonify({'success': True})
return jsonify({'success': False})
@glowworm_blueprint.route("/admin/env", methods=['PATCH'])
@admins_only
def admin_env():
req = request.get_json()
print(req)
if req["type"] == "init":
result = ControlUtil.check_env()
elif req["type"] == "check":
result = ControlUtil.check_env(req['type'], req['challenge_id'])
elif req["type"] == "build":
result = ControlUtil.build_env(req['challenge_id'])
elif req["type"] == "run":
result = ControlUtil.start_env(req['challenge_id'])
elif req["type"] == "remove":
result = ControlUtil.remove_env(req['challenge_id'])
if result:
return jsonify({'success': True})
else:
return jsonify({'success': False, 'msg': result})
@glowworm_blueprint.route('/container', methods=['GET'])
@authed_only
def container_info():
user_id = get_mode()
challenge_id = request.args.get('challenge_id')
ControlUtil.check_challenge(challenge_id, user_id)
data = ControlUtil.get_container(user_id=user_id, challenge_id=challenge_id)
configs = DBUtils.get_all_configs()
if data is not None:
if int(data.challenge_id) != int(challenge_id):
return jsonify({})
else:
return jsonify({'success': True, 'type': 'direct', 'ip': configs.get('direct_address', ""),
'service_port' : data.service_port, 'ssh_port' : data.ssh_port, 'ssh_key' : data.ssh_key
})
else:
return jsonify({'success': True})
@glowworm_blueprint.route('/container', methods=['PATCH'])
@authed_only
def renew_container():
user_id = get_mode()
challenge_id = request.args.get('challenge_id')
if ControlUtil.frequency_limit():
return jsonify({'success': False, 'msg': 'Frequency limit, You should wait at least 1 min.'})
try:
ControlUtil.renew_container(user_id, challenge_id)
return jsonify({'success': True})
except Exception as e:
print(e)
return jsonify({'success': False})
@glowworm_blueprint.route("/attacks", methods=['GET'])
def list_attacks():
page = abs(request.args.get("page", 1, type=int))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
count = GlowwormAttacks.query.count()
attacks = (
GlowwormAttacks.query.order_by(GlowwormAttacks.time.desc())
.slice(page_start, page_end)
.all()
)
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template("glowworm_attacks.html", attacks=attacks, pages=pages, curr_page=page)
app.register_blueprint(glowworm_blueprint)
try:
lock_file = open("/tmp/ctfd_glowworm.lock", "w")
lock_fd = lock_file.fileno()
fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
from .schedule import scheduler, Config
app.config.from_object(Config())
scheduler.init_app(app)
scheduler.start()
except IOError:
pass |
the-stack_0_12569 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 22 17:32:28 2021
@author: galfredo
"""
# dataset settings
dataset_type = 'CocoVideoDataset'
classes = ('1')
data_root = 'C:/Users/galfredo/Documents/Challenge Everest/dff test/customized annotations/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadMultiImagesFromFile'),
dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True),
dict(type='SeqResize', img_scale=(1000, 600), keep_ratio=True),
dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5),
dict(type='SeqNormalize', **img_norm_cfg),
dict(type='SeqPad', size_divisor=16),
dict(
type='VideoCollect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_instance_ids']),
dict(type='ConcatVideoReferences'),
dict(type='SeqDefaultFormatBundle', ref_prefix='ref')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1000, 600),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=16),
dict(type='ImageToTensor', keys=['img']),
dict(type='VideoCollect', keys=['img'])
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=2,
train=[
dict(
type=dataset_type,
ann_file=data_root + 'annotations.json',
img_prefix=data_root + 'VID',
ref_img_sampler=dict(
num_ref_imgs=1,
frame_range=9,
filter_key_img=False,
method='uniform'),
pipeline=train_pipeline)
],
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations.json',
img_prefix=data_root + 'VID',
ref_img_sampler=None,
pipeline=test_pipeline,
test_mode=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations.json',
img_prefix=data_root + 'VID',
ref_img_sampler=None,
pipeline=test_pipeline,
test_mode=True))
|
the-stack_0_12570 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def _test_eq(test_case, shape, device):
arr1 = np.random.randn(*shape)
arr2 = np.random.randn(*shape)
input = flow.Tensor(arr1, dtype=flow.float32, device=flow.device(device))
other = flow.Tensor(arr2, dtype=flow.float32, device=flow.device(device))
of_out = flow.eq(input, other)
of_out2 = flow.equal(input, other)
np_out = np.equal(arr1, arr2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
test_case.assertTrue(np.array_equal(of_out2.numpy(), np_out))
def _test_eq_int(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.Tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1
of_out = flow.eq(input, num)
np_out = np.equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_eq_float(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.Tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1
of_out = flow.eq(input, num)
np_out = np.equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestEq(flow.unittest.TestCase):
def test_eq(test_case):
arg_dict = OrderedDict()
arg_dict["test_func"] = [_test_eq, _test_eq_int, _test_eq_float]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
|
the-stack_0_12571 | import os
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
from tensorflow.tools.graph_transforms import TransformGraph
import config
import download
import loss
class MSINET:
"""The class representing the MSI-Net based on the VGG16 model. It
implements a definition of the computational graph, as well as
functions related to network training.
"""
def __init__(self):
self._output = None
self._mapping = {}
self.attention = "none"
if config.PARAMS["device"] == "gpu":
self._data_format = "channels_first"
self._channel_axis = 1
self._dims_axis = (2, 3)
elif config.PARAMS["device"] == "cpu":
self._data_format = "channels_last"
self._channel_axis = 3
self._dims_axis = (1, 2)
def cbam_block(self, input_feature, name, ratio=8, input_filters=128):
"""Contains the implementation of Convolutional Block Attention Module(CBAM) block.
As described in https://arxiv.org/abs/1807.06521.
"""
with tf.variable_scope(name):
attention_feature = self.channel_attention(input_feature, 'ch_at', ratio, input_filters=input_filters)
attention_feature = self.spatial_attention(attention_feature, 'sp_at', input_filters=input_filters)
return attention_feature
def channel_attention(self, input_feature, name, ratio=8, input_filters=128):
kernel_initializer = tf.contrib.layers.variance_scaling_initializer()
bias_initializer = tf.constant_initializer(value=0.0)
with tf.variable_scope(name):
channel = input_filters
avg_pool = tf.reduce_mean(input_feature, axis=self._dims_axis, keepdims=True)
avg_pool = tf.layers.conv2d(avg_pool, channel // ratio, (1, 1), use_bias=True,
name=name + 'mlp_0', strides=(1, 1),
padding='valid', data_format=self._data_format,
activation=tf.nn.relu,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
reuse=None)
avg_pool = tf.layers.conv2d(avg_pool, channel, (1, 1), use_bias=True,
name=name + 'mlp_1', strides=(1, 1),
padding='valid', data_format=self._data_format,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
reuse=None)
max_pool = tf.reduce_max(input_feature, axis=self._dims_axis, keepdims=True)
max_pool = tf.layers.conv2d(max_pool, channel//ratio, (1, 1), use_bias=True,
name=name + 'mlp_0', strides=(1, 1),
padding='valid', data_format=self._data_format,
activation=tf.nn.relu,
reuse=True)
max_pool = tf.layers.conv2d(max_pool, channel, (1, 1), use_bias=True,
name=name + 'mlp_1', strides=(1, 1),
padding='valid', data_format=self._data_format,
reuse=True)
scale = tf.sigmoid(avg_pool + max_pool, 'sigmoid')
return input_feature * scale
def spatial_attention(self, input_feature, name, input_filters=128):
kernel_size = 7
kernel_initializer = tf.contrib.layers.variance_scaling_initializer()
with tf.variable_scope(name):
avg_pool = tf.reduce_mean(input_feature, axis=[3], keepdims=True)
max_pool = tf.reduce_max(input_feature, axis=[3], keepdims=True)
concat = tf.concat([avg_pool,max_pool], 3)
concat = tf.layers.conv2d(concat,
filters=1,
kernel_size=[kernel_size,kernel_size],
strides=[1,1],
padding="same",
activation=None,
kernel_initializer=kernel_initializer,
use_bias=False,
name='conv')
concat = tf.sigmoid(concat, 'sigmoid')
return input_feature * concat
def se_block(self, residual, name, ratio=8, input_filters=128):
with tf.variable_scope(name):
# Global average pooling
kernel_initializer = tf.contrib.layers.variance_scaling_initializer()
bias_initializer = tf.constant_initializer(value=0.0)
squeeze = tf.reduce_mean(residual, axis=self._dims_axis, keepdims=True)
excitation = tf.layers.conv2d(squeeze, input_filters // ratio, (1, 1), use_bias=True,
name=name + '_1x1_down', strides=(1, 1),
padding='valid', data_format=self._data_format,
activation=tf.nn.relu,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
excitation = tf.layers.conv2d(excitation, input_filters, (1, 1), use_bias=True,
name=name + '_1x1_up', strides=(1, 1),
padding='valid', data_format=self._data_format,
activation=tf.nn.sigmoid,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
scale = residual * excitation
return scale
def _encoder(self, images):
"""The encoder of the model consists of a pretrained VGG16 architecture
with 13 convolutional layers. All dense layers are discarded and the
last 3 layers are dilated at a rate of 2 to account for the omitted
downsampling. Finally, the activations from 3 layers are combined.
Args:
images (tensor, float32): A 4D tensor that holds the RGB image
batches used as input to the network.
"""
imagenet_mean = tf.constant([103.939, 116.779, 123.68])
imagenet_mean = tf.reshape(imagenet_mean, [1, 1, 1, 3])
images -= imagenet_mean
if self._data_format == "channels_first":
images = tf.transpose(images, (0, 3, 1, 2))
layer01 = tf.layers.conv2d(images, 64, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="conv1/conv1_1")
layer02 = tf.layers.conv2d(layer01, 64, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="conv1/conv1_2")
layer03 = tf.layers.max_pooling2d(layer02, 2, 2,
data_format=self._data_format)
layer04 = tf.layers.conv2d(layer03, 128, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="conv2/conv2_1")
layer05 = tf.layers.conv2d(layer04, 128, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="conv2/conv2_2")
layer06 = tf.layers.max_pooling2d(layer05, 2, 2,
data_format=self._data_format)
layer07 = tf.layers.conv2d(layer06, 256, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="conv3/conv3_1")
layer08 = tf.layers.conv2d(layer07, 256, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="conv3/conv3_2")
layer09 = tf.layers.conv2d(layer08, 256, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="conv3/conv3_3")
layer10 = tf.layers.max_pooling2d(layer09, 2, 2,
data_format=self._data_format)
layer11 = tf.layers.conv2d(layer10, 512, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="conv4/conv4_1")
layer12 = tf.layers.conv2d(layer11, 512, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="conv4/conv4_2")
layer13 = tf.layers.conv2d(layer12, 512, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="conv4/conv4_3")
layer14 = tf.layers.max_pooling2d(layer13, 2, 1,
padding="same",
data_format=self._data_format)
layer15 = tf.layers.conv2d(layer14, 512, 3,
padding="same",
activation=tf.nn.relu,
dilation_rate=2,
data_format=self._data_format,
name="conv5/conv5_1")
layer16 = tf.layers.conv2d(layer15, 512, 3,
padding="same",
activation=tf.nn.relu,
dilation_rate=2,
data_format=self._data_format,
name="conv5/conv5_2")
layer17 = tf.layers.conv2d(layer16, 512, 3,
padding="same",
activation=tf.nn.relu,
dilation_rate=2,
data_format=self._data_format,
name="conv5/conv5_3")
layer18 = tf.layers.max_pooling2d(layer17, 2, 1,
padding="same",
data_format=self._data_format)
encoder_output = tf.concat([layer10, layer14, layer18],axis=self._channel_axis)
if config.PARAMS["attention"] == None :
self._output = encoder_output
elif config.PARAMS["attention"] == 'se' :
self._output = self.se_block(encoder_output, name="00_se", input_filters=1280)
elif config.PARAMS["attention"] == 'cbam' :
self._output = self.cbam_block(input_feature=encoder_output, name="00_cbam", ratio=8, input_filters=1280)
def _aspp(self, features):
"""The ASPP module samples information at multiple spatial scales in
parallel via convolutional layers with different dilation factors.
The activations are then combined with global scene context and
represented as a common tensor.
Args:
features (tensor, float32): A 4D tensor that holds the features
produced by the encoder module.
"""
branch1 = tf.layers.conv2d(features, 256, 1,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="aspp/conv1_1")
branch2 = tf.layers.conv2d(features, 256, 3,
padding="same",
activation=tf.nn.relu,
dilation_rate=4,
data_format=self._data_format,
name="aspp/conv1_2")
branch3 = tf.layers.conv2d(features, 256, 3,
padding="same",
activation=tf.nn.relu,
dilation_rate=8,
data_format=self._data_format,
name="aspp/conv1_3")
branch4 = tf.layers.conv2d(features, 256, 3,
padding="same",
activation=tf.nn.relu,
dilation_rate=12,
data_format=self._data_format,
name="aspp/conv1_4")
branch5 = tf.reduce_mean(features,
axis=self._dims_axis,
keepdims=True)
branch5 = tf.layers.conv2d(branch5, 256, 1,
padding="valid",
activation=tf.nn.relu,
data_format=self._data_format,
name="aspp/conv1_5")
shape = tf.shape(features)
branch5 = self._upsample(branch5, shape, 1)
context = tf.concat([branch1, branch2, branch3, branch4, branch5],
axis=self._channel_axis)
aspp_output = tf.layers.conv2d(context, 256, 1,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="aspp/conv2")
self._output = aspp_output
def _decoder(self, features):
"""The decoder model applies a series of 3 upsampling blocks that each
performs bilinear upsampling followed by a 3x3 convolution to avoid
checkerboard artifacts in the image space. Unlike all other layers,
the output of the model is not modified by a ReLU.
Args:
features (tensor, float32): A 4D tensor that holds the features
produced by the ASPP module.
"""
shape = tf.shape(features)
layer1 = self._upsample(features, shape, 2)
layer2 = tf.layers.conv2d(layer1, 128, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="decoder/conv1")
shape = tf.shape(layer2)
layer3 = self._upsample(layer2, shape, 2)
layer4 = tf.layers.conv2d(layer3, 64, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="decoder/conv2")
shape = tf.shape(layer4)
layer5 = self._upsample(layer4, shape, 2)
layer6 = tf.layers.conv2d(layer5, 32, 3,
padding="same",
activation=tf.nn.relu,
data_format=self._data_format,
name="decoder/conv3")
decoder_output = tf.layers.conv2d(layer6, 1, 3,
padding="same",
data_format=self._data_format,
name="decoder/conv4")
if self._data_format == "channels_first":
decoder_output = tf.transpose(decoder_output, (0, 2, 3, 1))
self._output = decoder_output
def _upsample(self, stack, shape, factor):
"""This function resizes the input to a desired shape via the
bilinear upsampling method.
Args:
stack (tensor, float32): A 4D tensor with the function input.
shape (tensor, int32): A 1D tensor with the reference shape.
factor (scalar, int): An integer denoting the upsampling factor.
Returns:
tensor, float32: A 4D tensor that holds the activations after
bilinear upsampling of the input.
"""
if self._data_format == "channels_first":
stack = tf.transpose(stack, (0, 2, 3, 1))
stack = tf.image.resize_bilinear(stack, (shape[self._dims_axis[0]] * factor,
shape[self._dims_axis[1]] * factor))
if self._data_format == "channels_first":
stack = tf.transpose(stack, (0, 3, 1, 2))
return stack
def _normalize(self, maps, eps=1e-7):
"""This function normalizes the output values to a range
between 0 and 1 per saliency map.
Args:
maps (tensor, float32): A 4D tensor that holds the model output.
eps (scalar, float, optional): A small factor to avoid numerical
instabilities. Defaults to 1e-7.
"""
min_per_image = tf.reduce_min(maps, axis=(1, 2, 3), keep_dims=True)
maps -= min_per_image
max_per_image = tf.reduce_max(maps, axis=(1, 2, 3), keep_dims=True)
maps = tf.divide(maps, eps + max_per_image, name="output")
self._output = maps
def _pretraining(self):
"""The first 26 variables of the model here are based on the VGG16
network. Therefore, their names are matched to the ones of the
pretrained VGG16 checkpoint for correct initialization.
"""
if self.attention == 'se':
index = 38 #70
else:
index = 26
for var in tf.global_variables()[:index]:
if 'se' not in var.name.split("/", 1)[1]:
#print(var.name.split("/", 1)[1])
key = var.name.split("/", 1)[1]
key = key.replace("kernel:0", "weights")
key = key.replace("bias:0", "biases")
self._mapping[key] = var
def forward(self, images):
"""Public method to forward RGB images through the whole network
architecture and retrieve the resulting output.
Args:
images (tensor, float32): A 4D tensor that holds the values of the
raw input images.
Returns:
tensor, float32: A 4D tensor that holds the values of the
predicted saliency maps.
"""
self._encoder(images)
self._aspp(self._output)
self._decoder(self._output)
self._normalize(self._output)
return self._output
def train(self, ground_truth, predicted_maps, learning_rate):
"""Public method to define the loss function and optimization
algorithm for training the model.
Args:
ground_truth (tensor, float32): A 4D tensor with the ground truth.
predicted_maps (tensor, float32): A 4D tensor with the predictions.
learning_rate (scalar, float): Defines the learning rate.
Returns:
object: The optimizer element used to train the model.
tensor, float32: A 0D tensor that holds the averaged error.
"""
error = loss.kld(ground_truth, predicted_maps)
optimizer = tf.train.AdamOptimizer(learning_rate)
optimizer = optimizer.minimize(error)
return optimizer, error
def save(self, saver, sess, dataset, path, device):
"""This saves a model checkpoint to disk and creates
the folder if it doesn't exist yet.
Args:
saver (object): An object for saving the model.
sess (object): The current TF training session.
path (str): The path used for saving the model.
device (str): Represents either "cpu" or "gpu".
"""
os.makedirs(path, exist_ok=True)
saver.save(sess, path + "model_%s_%s.ckpt" % (dataset, device),
write_meta_graph=False, write_state=False)
def restore(self, sess, dataset, paths, device):
"""This function allows continued training from a prior checkpoint and
training from scratch with the pretrained VGG16 weights. In case the
dataset is either CAT2000 or MIT1003, a prior checkpoint based on
the SALICON dataset is required.
Args:
sess (object): The current TF training session.
dataset ([type]): The dataset used for training.
paths (dict, str): A dictionary with all path elements.
device (str): Represents either "cpu" or "gpu".
Returns:
object: A saver object for saving the model.
"""
model_name = "model_%s_%s" % (dataset, device)
salicon_name = "model_salicon_%s" % device
vgg16_name = "vgg16_hybrid"
ext1 = ".ckpt.data-00000-of-00001"
ext2 = ".ckpt.index"
saver = tf.train.Saver()
if os.path.isfile(paths["latest"] + model_name + ext1) and \
os.path.isfile(paths["latest"] + model_name + ext2):
saver.restore(sess, paths["latest"] + model_name + ".ckpt")
elif dataset in ("mit1003", "dutomron", "cat2000",
"pascals", "osie", "fiwi"):
if os.path.isfile(paths["best"] + salicon_name + ext1) and \
os.path.isfile(paths["best"] + salicon_name + ext2):
saver.restore(sess, paths["best"] + salicon_name + ".ckpt")
else:
raise FileNotFoundError("Train model on SALICON first")
else:
if not (os.path.isfile(paths["weights"] + vgg16_name + ext1) or
os.path.isfile(paths["weights"] + vgg16_name + ext2)):
download.download_pretrained_weights(paths["weights"],
"vgg16_hybrid")
self._pretraining()
loader = tf.train.Saver(self._mapping)
loader.restore(sess, paths["weights"] + vgg16_name + ".ckpt")
return saver
def optimize(self, sess, dataset, path, device):
"""The best performing model is frozen, optimized for inference
by removing unneeded training operations, and written to disk.
Args:
sess (object): The current TF training session.
path (str): The path used for saving the model.
device (str): Represents either "cpu" or "gpu".
.. seealso:: https://bit.ly/2VBBdqQ and https://bit.ly/2W7YqBa
"""
model_name = "model_%s_%s" % (dataset, device)
model_path = path + model_name
tf.train.write_graph(sess.graph.as_graph_def(),
path, model_name + ".pbtxt")
freeze_graph.freeze_graph(model_path + ".pbtxt", "", False,
model_path + ".ckpt", "output",
"save/restore_all", "save/Const:0",
model_path + ".pb", True, "")
os.remove(model_path + ".pbtxt")
graph_def = tf.GraphDef()
with tf.gfile.Open(model_path + ".pb", "rb") as file:
graph_def.ParseFromString(file.read())
transforms = ["remove_nodes(op=Identity)",
"merge_duplicate_nodes",
"strip_unused_nodes",
"fold_constants(ignore_errors=true)"]
optimized_graph_def = TransformGraph(graph_def,
["input"],
["output"],
transforms)
tf.train.write_graph(optimized_graph_def,
logdir=path,
as_text=False,
name=model_name + ".pb")
|
the-stack_0_12572 | from dal import autocomplete, forward
from django import forms
from .models import TModel
class TForm(forms.ModelForm):
class Meta:
model = TModel
fields = ('name', 'owner', 'test')
widgets = {
'test': autocomplete.ModelSelect2(
url='linked_data_rf',
forward=(forward.Field(src="owner", dst="possessor"),
forward.Const(val=42, dst="secret"))
)
}
class Media:
js = (
'linked_data.js',
)
|
the-stack_0_12573 | # coding: utf-8
import sys
import re
from natsort import natsorted
INPUT = sys.argv[ 1 ]
# NOTE: This script for Kontakt6!
# KSP Reference Manual.txt created by Acrobat DC (Windows version & locale cp932) and re-save on vscode as utf-8 encoding.
# If created by different locale, change encoding name.
ENCODING = 'utf-8'
REGEX = r"(\s*[a-z|A-Z|_]+\(\)\s*)+"
wordList = []
IGNORE_WORD_LIST = [
"select",
"while",
"ui_waveform",
# in Explain, Examples
"array",
"by_mark", # miss?
"change_xxx",
"get_keyrange_xxx",
"if",
"it",
"low_group",
"ray_idx",
"set_condition", #lower
"range", # not exist
# Interrupted word
"group",
"idx",
"par_str_arr",
"tach_zone", # line separated (-> attach_zone() )
"ui_control", # It is callback
"trol_par_str_arr", # extract miss (expexted: control_par_str_arr)
]
def appendWord( word, targetList ):
if( len( word ) > 0 and not word in IGNORE_WORD_LIST and not word in targetList ) :
targetList.append( word )
f = open( INPUT, 'r', encoding = ENCODING )
while( True ):
line = f.readline()
if( not line ):
break
m = re.findall( REGEX, line )
if( m == None ):
continue
for i in m:
word = i.strip()
word = re.sub( r".*?\s+.*", "", word )
word = re.sub( r"\s*\(\s*", "", word )
word = re.sub( r"\s*\)\s*", "", word )
if word.find( "-" ) >= 0:
continue
appendWord( word.lower(), wordList )
f.close()
for i in natsorted( wordList ):
print( i )
|
the-stack_0_12574 | import re
import tensorflow.compat.v1 as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.nets import resnet_v1
from dataset.pose_dataset import Batch
from nnet import losses
net_funcs = {'resnet_50': resnet_v1.resnet_v1_50,
'resnet_101': resnet_v1.resnet_v1_101}
def prediction_layer(cfg, input, name, num_outputs):
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], padding='SAME',
activation_fn=None, normalizer_fn=None,
weights_regularizer=slim.l2_regularizer(cfg.weight_decay)):
with tf.variable_scope(name):
pred = slim.conv2d_transpose(input, num_outputs,
kernel_size=[3, 3], stride=2,
scope='block4')
return pred
def get_batch_spec(cfg):
num_joints = cfg.num_joints
batch_size = cfg.batch_size
batch_spec = {
Batch.inputs: [batch_size, None, None, 3],
Batch.part_score_targets: [batch_size, None, None, num_joints],
Batch.part_score_weights: [batch_size, None, None, num_joints]
}
if cfg.location_refinement:
batch_spec[Batch.locref_targets] = [batch_size, None, None, num_joints * 2]
batch_spec[Batch.locref_mask] = [batch_size, None, None, num_joints * 2]
if cfg.pairwise_predict:
batch_spec[Batch.pairwise_targets] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
batch_spec[Batch.pairwise_mask] = [batch_size, None, None, num_joints * (num_joints - 1) * 2]
return batch_spec
class PoseNet:
def __init__(self, cfg):
self.cfg = cfg
def extract_features(self, inputs):
net_fun = net_funcs[self.cfg.net_type]
mean = tf.constant(self.cfg.mean_pixel,
dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
im_centered = inputs - mean
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = net_fun(im_centered, global_pool=False, output_stride=16, is_training=False)
return net, end_points
def prediction_layers(self, features, end_points, reuse=None, no_interm=False, scope='pose'):
cfg = self.cfg
num_layers = re.findall("resnet_([0-9]*)", cfg.net_type)[0]
layer_name = 'resnet_v1_{}'.format(num_layers) + '/block{}/unit_{}/bottleneck_v1'
out = {}
with tf.variable_scope(scope, reuse=reuse):
out['part_pred'] = prediction_layer(cfg, features, 'part_pred',
cfg.num_joints)
if cfg.location_refinement:
out['locref'] = prediction_layer(cfg, features, 'locref_pred',
cfg.num_joints * 2)
if cfg.pairwise_predict:
out['pairwise_pred'] = prediction_layer(cfg, features, 'pairwise_pred',
cfg.num_joints * (cfg.num_joints - 1) * 2)
if cfg.intermediate_supervision and not no_interm:
interm_name = layer_name.format(3, cfg.intermediate_supervision_layer)
block_interm_out = end_points[interm_name]
out['part_pred_interm'] = prediction_layer(cfg, block_interm_out,
'intermediate_supervision',
cfg.num_joints)
return out
def get_net(self, inputs):
net, end_points = self.extract_features(inputs)
return self.prediction_layers(net, end_points)
def test(self, inputs):
heads = self.get_net(inputs)
return self.add_test_layers(heads)
def add_test_layers(self, heads):
prob = tf.sigmoid(heads['part_pred'])
outputs = {'part_prob': prob}
if self.cfg.location_refinement:
outputs['locref'] = heads['locref']
if self.cfg.pairwise_predict:
outputs['pairwise_pred'] = heads['pairwise_pred']
return outputs
def part_detection_loss(self, heads, batch, locref, pairwise, intermediate):
cfg = self.cfg
weigh_part_predictions = cfg.weigh_part_predictions
part_score_weights = batch[Batch.part_score_weights] if weigh_part_predictions else 1.0
def add_part_loss(pred_layer):
return tf.losses.sigmoid_cross_entropy(batch[Batch.part_score_targets],
heads[pred_layer],
part_score_weights)
loss = {}
loss['part_loss'] = add_part_loss('part_pred')
total_loss = loss['part_loss']
if intermediate:
loss['part_loss_interm'] = add_part_loss('part_pred_interm')
total_loss = total_loss + loss['part_loss_interm']
if locref:
locref_pred = heads['locref']
locref_targets = batch[Batch.locref_targets]
locref_weights = batch[Batch.locref_mask]
loss_func = losses.huber_loss if cfg.locref_huber_loss else tf.losses.mean_squared_error
loss['locref_loss'] = cfg.locref_loss_weight * loss_func(locref_targets, locref_pred, locref_weights)
total_loss = total_loss + loss['locref_loss']
if pairwise:
pairwise_pred = heads['pairwise_pred']
pairwise_targets = batch[Batch.pairwise_targets]
pairwise_weights = batch[Batch.pairwise_mask]
loss_func = losses.huber_loss if cfg.pairwise_huber_loss else tf.losses.mean_squared_error
loss['pairwise_loss'] = cfg.pairwise_loss_weight * loss_func(pairwise_targets, pairwise_pred,
pairwise_weights)
total_loss = total_loss + loss['pairwise_loss']
# loss['total_loss'] = slim.losses.get_total_loss(add_regularization_losses=params.regularize)
loss['total_loss'] = total_loss
return loss
def train(self, batch):
cfg = self.cfg
intermediate = cfg.intermediate_supervision
locref = cfg.location_refinement
pairwise = cfg.pairwise_predict
heads = self.get_net(batch[Batch.inputs])
return self.part_detection_loss(heads, batch, locref, pairwise, intermediate)
|
the-stack_0_12575 | import unittest
from dmLibrary import create_app
from dmLibrary.external.googleBook import GoogleBook
import time
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class TestBookClass(unittest.TestCase):
def setUp(self):
app = create_app()
self.ctx = app.app_context()
self.ctx.push()
self.client = app.test_client()
self.gb = GoogleBook()
logger.debug('logged from test_something')
def tearDown(self):
"""Do the testing """
pass
def test_postBook(self):
"""
test post new book, try update, and delete it
"""
data = {
"title":"เรียนรู้วิถีชีวิต ประเพณี พิธีกรรม และความเชื่อแบบบูรณาการ",
"isbn":"9748846709"
}
response = self.client.post('/api/v1/books',json=data)
logger.info(f"rawResp: {response.data}")
self.assertEqual(response.status_code, 201)
respData = response.get_json()
self.assertEqual(respData["data"]['title'], data['title'])
self.assertEqual(respData["data"]['isbn'], data['isbn'])
pageCount = self.gb.getPageCount(data["isbn"])
self.assertEqual(respData["data"]['pageCount'], 159)
logger.info(f"respData: {respData}")
book_id = respData["data"]["id"]
# try update the title
data = {
"title":"test update Book"
}
response = self.client.patch(f'/api/v1/books/{book_id}',json=data)
logger.info(f"rawResp: {response.data}")
self.assertEqual(response.status_code, 200)
respData = response.get_json()
self.assertEqual(respData["data"]['title'], data['title'])
# delete it
response = self.client.delete(f'/api/v1/books/{book_id}')
logger.info(f"rawResp: {response.data}")
self.assertEqual(response.status_code, 200)
|
the-stack_0_12576 | #coding=utf-8
'''
use NTFS usn to iter path
requires Admin
'''
import os
import sys
curpath = os.path.dirname(os.path.realpath(__file__))
#sys.path.append(os.path.join(curpath,'msvc','Debug'))
sys.path.append(os.path.join(curpath,'build','lib.win32-2.7'))
def entry():
import libfileiterusn
count = 0
it = libfileiterusn.iter_drive(u'c:\\')
if it:
for v in it:
count +=1
sys.stdout.write('{}\n'.format(v.encode('utf-8')))
print('count {}'.format(count))
if __name__ == '__main__':
entry() |
the-stack_0_12580 | import cv2
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
p = "./test/11.jpg"
oriImg = cv2.imread(p)
oriPoints = np.float32([[896.75,145.0],[1246.5,336.0], [796.0,407.25]])
canvasPoints = np.float32([[797.25,143.5],[1151.25,297.5],[710.75,415.25]])
rotationMatrix = cv2.getAffineTransform(np.array(oriPoints),np.array(canvasPoints))
dstImg = cv2.warpAffine(oriImg,rotationMatrix,(1664,2352))
# cv2.imshow("perspectiveImg", dstImg)
plt.imsave("./test/11out.jpg", dstImg)
plt.figure('image')
plt.imshow(dstImg)
plt.show() |
the-stack_0_12581 | from django.conf.urls import include
from django.urls import re_path
from django.views.decorators.cache import never_cache
from . import views
from .utils import render_xml
services_patterns = [
re_path(r'^monitor\.json$', never_cache(views.monitor),
name='amo.monitor'),
re_path(r'^loaded$', never_cache(views.loaded), name='amo.loaded'),
re_path(r'^403', views.handler403),
re_path(r'^404', views.handler404),
re_path(r'^500', views.handler500),
]
api_patterns = [
re_path(r'^site/$', views.SiteStatusView.as_view(),
name='amo-site-status'),
]
urlpatterns = [
re_path(r'^robots\.txt$', views.robots, name='robots.txt'),
re_path(r'^contribute\.json$', views.contribute, name='contribute.json'),
re_path(r'^services/', include(services_patterns)),
re_path(r'^__version__$', views.version, name='version.json'),
re_path(r'^opensearch\.xml$', render_xml,
{'template': 'amo/opensearch.xml'},
name='amo.opensearch'),
]
|
the-stack_0_12582 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
""" This file is a part of the VeRyPy classical vehicle routing problem
heuristic library and provides an implementation of the Stewart & Golden (1984)
3-opt* heuristic with Lagrangean relaxation.
The script is callable and can be used as a standalone solver for TSPLIB
formatted CVRPs. It has moderate dependencies: a TSP solver (the built-in one
can be used) and the internal 3-opt* implementation. Also, numpy is needed for
by the algorithm for few convenience functions, as well as scipy for reading
and preparing the problem instance.
"""
###############################################################################
import numpy as np
from logging import log, DEBUG
from random import shuffle
from functools import partial
from tsp_solvers.tsp_solver_lkh import solve_tsp_lkh as tsp_opt_algo
from cvrp_ops import fast_constraint_check, calculate_objective, normalize_solution
from local_search.solution_operators import do_3optstar_move
from local_search import LSOPT
from util import without_empty_routes, sol2routes, routes2sol
from config import COST_EPSILON as S_EPS
from config import CAPACITY_EPSILON as C_EPS
__author__ = "Jussi Rasku"
__copyright__ = "Copyright 2018, Jussi Rasku"
__credits__ = ["Jussi Rasku"]
__license__ = "MIT"
__maintainer__ = "Jussi Rasku"
__email__ = "[email protected]"
__status__ = "Development"
def _make_checker_function(lambda1, lambda2):
return
def _check_lr3opt_move(D, C, L, removed_weights, best_delta,
edges, end_p, end_n, cum_d, cum_l,
ldepot_12, ldepot_34, lambdas):
""" The Lagrangian constrant violation penalty calculation.
The penalties for each of the 14 possible 3-opt* moves are calculated here.
They are lifted as the edges are removed and recalculated when the new
edges involved in the move are added back. This also updates the delta.
.. note:: a good performance optimization would be to do this in parts when
i,j,k are changed but that would mean duplication of the do_3optstar_move
code, which was not preferable."""
## Calculate the penalties that are lifted
removed_penalties = 0
if C:
route_d = cum_d[0]+cum_d[1]
if ldepot_12:
if route_d-C_EPS>C:
removed_penalties+=(route_d-C)*lambdas[0]
route_d = cum_d[2]+cum_d[3]
else:
route_d+=cum_d[3]
if ldepot_34:
if route_d-C_EPS>C:
removed_penalties+=(route_d-C)*lambdas[0]
route_d = cum_d[4]+cum_d[5]
else:
route_d += cum_d[5]
if route_d-C_EPS>C:
removed_penalties+=(route_d-C)*lambdas[0]
if L:
route_l = cum_l[0]+cum_l[1]+D[end_n[0],end_n[1]]
if ldepot_12:
if route_l-S_EPS>L:
removed_penalties+=(route_l-L)*lambdas[1]
route_l = cum_l[2]+cum_l[3]+D[end_n[2],end_n[3]]
else:
route_l+=cum_l[3]+D[end_n[2],end_n[3]]
if ldepot_34:
if route_l-S_EPS>L:
removed_penalties+=(route_l-L)*lambdas[1]
route_l = cum_l[4]+cum_l[5]+D[end_n[4],end_n[5]]
else:
route_l += cum_l[5]+D[end_n[4],end_n[5]]
if route_l-S_EPS>L:
removed_penalties+=(route_l-L)*lambdas[1]
## Calculate the delta (still missing the move induced penalties)
added_weights = D[end_n[edges[0][0]], end_n[edges[0][1]]]+\
D[end_n[edges[1][0]], end_n[edges[1][1]]]+\
D[end_n[edges[2][0]], end_n[edges[2][1]]]
delta = added_weights-removed_weights-removed_penalties
## Calculate the move induced Lagrangian relaxation penalties
# Note that the move has to have some potential to be an improvement.
if (delta+S_EPS<best_delta):
prev_edge = None
route_d = 0
route_l = 0
# Assume that the edges are in right order and to the right direction
# so that the final solution can be formed by applying them
# consecutively.
for curr_edge in edges:
# Check if there is a visit to the depot on the previous segment,
# that is, between previous END and the current edge START nodes.
if (curr_edge[0]==0 or prev_edge[1]==5 or
(ldepot_12 is not None and (
end_p[prev_edge[1]]<=ldepot_12<=end_p[curr_edge[0]] or
end_p[prev_edge[1]]>=ldepot_12>=end_p[curr_edge[0]])) or
( ldepot_34 is not None and (
end_p[prev_edge[1]]<=ldepot_34<=end_p[curr_edge[0]] or
end_p[prev_edge[1]]>=ldepot_34>=end_p[curr_edge[0]]))):
if C:
if route_d-C_EPS>C:
delta += (route_d-C)*lambdas[0]
route_d = cum_d[curr_edge[0]]
if L:
if route_l-S_EPS>L:
delta += (route_l-L)*lambdas[1]
route_l = cum_l[curr_edge[0]]
# abort poor moves early
if (delta+S_EPS>=best_delta):
return None
if C:
route_d += cum_d[curr_edge[1]]
if L:
e_wt = D[end_n[curr_edge[0]],end_n[curr_edge[1]]]
route_l+=e_wt+cum_l[curr_edge[1]]
prev_edge = curr_edge
# The last edge has been connected, time to do check if the last formed
# route induces any penalties.
if C and route_d-C_EPS>C:
delta += (route_d-C)*lambdas[0]
if L and route_l-S_EPS>L:
delta += (route_l-L)*lambdas[1]
if (delta+S_EPS<best_delta):
return delta
# was not an improving move
return None
def _init_with_random(D,d,C,L):
customers = list(range(1,len(D)))
shuffle(customers)
random_sol = [0]+customers+[0]
return random_sol, calculate_objective(random_sol, D)
def _init_with_tsp(D,d,C,L):
route_tsp_sol, route_f = tsp_opt_algo(D, range(0,len(D)))
return route_tsp_sol+[0], route_f
def _force_feasible(sol, D, d, C, L):
# Force an incomplete solution feasible
feasible_routes = []
routes = sol2routes(sol)
for r in routes:
feasibler,totald,totall = [0], 0.0, 0.0
prevn = 0
for n in r[1:]:
C_violated = (C and totald+d[n]-C_EPS > C)
L_violated = (L and totall+D[prevn,n]+D[n,0]-S_EPS > L)
if (n==0 or C_violated or L_violated) and len(feasibler)>1:
feasibler.append(0)
feasible_routes.append(feasibler)
feasibler,totald,totall = [0], 0.0, 0.0
prevn = 0
if C:
totald+=d[n]
if L:
totall+=D[prevn,n]
if n!=0:
feasibler.append(n)
prevn = n
return routes2sol(feasible_routes)
def _get_max(D, with_sol):
aidx = np.array( np.unique( with_sol ) )
return np.max( D[aidx][:, aidx] )
def lr3opt_init(D, d, C, L,
initial_lambda1_C=None, initial_lambda1_L=None,
initialization_algorithm=_init_with_tsp,
postoptimize_with_3optstar=True,
max_concecutive_lamba_incs=None):
""" An implementation of the Stewart & Golden [1]_ 3-opt* heuristic
with Lagrangean relaxation.
The algorithm starts from a solution that can be either feasible or
infeasible and uses local search to move towards better and feasible
solutions. More specifically, it works by replacing the constraint checks
of the 3-opt* with a penalty that depends on how much the constraint was
violated. The 3-opt* that operates on the entire solution, that is, checks
for both intra and inter route moves on one pass, was used. The penalties
are iteratively doubled as the local search progresses and it is assumed
that this eventually forces the solutions to feasible region.
.. [1] Stewart, W. R. and Golden, B. L. (1984). A lagrangean relaxation
heuristic for vehicle routing. European Journal of Operational
Research, 15(1):84–88.
Parameters
----------
D : numpy.ndarray
is the full 2D distance matrix.
d : list
is a list of demands. d[0] should be 0.0 as it is the depot.
C : float
is the capacity constraint limit for the identical vehicles.
L : float
is the optional constraint for the maximum route length/duration/cost.
initial_lambda1_C : float
is the initial Langrange multiplier value for the capacity constraint C.
If left empty (None) the formula ``l1_C=average(d)/(20*max(D))`` is used.
The alternative value suggested by Stewart & Golden (1984) was 0.05.
initial_lambda1_L : float
is the initial Langrange multiplier value for the maximum route cost/
duration/length constraint. If left empty (None) the formula
``l1_L=average(distance to nearest neighbor)/(10*max(D))`` is used.
initialization_algorithm (function): is a function that retuns a TSP or VRP
solution and its objective function value. The default is to use LKH TSP
solution, but the function _init_with_random can be used to replicate the
results of Stewart & Golden (1984) where a random solution is used.
Returns
-------
list
The solution as a list of node indices to visit.
.. todo:: due to how the algorithm works, introducing minimize_K would require
balancing between penalizing constraint violations and penalizing new
routes with an additional multipiler. This was not implemented.
"""
sol = None
try:
## STEP 1: Generate an initial solution
sol, initial_f = initialization_algorithm(D,d,C,L)
max_D = None
lambdas = [initial_lambda1_C,initial_lambda1_L]
if C and lambdas[0]==None:
max_D = _get_max(D, sol)
lambdas[0] = np.average(d)/(20*max_D)
if L and lambdas[1]==None:
# Stewart & Golden (1984) did not propose an extension for the maximum
# route duration/length/cost constraint, but here we have something
# similar to L than they used for C constraint relaxation.
max_D = _get_max(D, sol) if (max_D is None) else max_D
closest_neighbor_D = D.copy()
np.fill_diagonal(closest_neighbor_D, max_D)
lambdas[1] = np.average(closest_neighbor_D.min(axis=0))/(10*max_D)
if __debug__:
log(DEBUG, "Start from initial solution %s (%.2f), and with l1=%.2f, l2=%.2f"%
(sol, calculate_objective(sol, D),
(0 if lambdas[0] is None else lambdas[0]),
(0 if lambdas[1] is None else lambdas[1])))
checker_function = partial(_check_lr3opt_move, lambdas=lambdas)
# STEP 2: Solve the relaxed problem using 3-opt*
c_lambda_incs = 0
while True:
# Make sure there is an empty route (for giving the 3-opt* procedure
# the option of adding vehicles)
while not ( sol[-1]==0 and sol[-2]==0 ):
sol+=[0]
if __debug__:
log(DEBUG-2, "Finding a LR3OPT move for %s (%.2f)"%
(sol, calculate_objective(sol, D)))
new_sol, delta = do_3optstar_move(sol, D, d, C, L,
strategy=LSOPT.FIRST_ACCEPT,
move_checker = checker_function)
# local optima reached, tighten the relaxation
# TODO: it should not happen that the sol==new_sol. However it happens and as a quickfix check for it.
if delta is None or sol==new_sol:
# return the first feasible solution (note: does not check for covering)
if fast_constraint_check(sol,D,d,C,L):
if __debug__:
log(DEBUG, "Reached feasible solution %s (%.2f)"%
(sol, calculate_objective(sol,D)))
while postoptimize_with_3optstar:
opt_sol, delta = do_3optstar_move(sol, D, d, C, L,
strategy=LSOPT.FIRST_ACCEPT)
if delta is None:
return normalize_solution(sol) # remove any [0,0]'s
else:
sol = opt_sol
#print("REMOVEME improved with post-optimization 3-opt*")
log(DEBUG, "Found improving 3-opt* move leading to %s (%.2f)"%
(sol, calculate_objective(sol,D)))
return normalize_solution(sol) # remove any [0,0]'s
else:
# STEP 3: Update lambdas
lambda_at_inf = False
if lambdas[0] is not None:
lambdas[0] = lambdas[0]*2
lambda_at_inf = lambdas[0]==float('inf')
if lambdas[1] is not None:
lambdas[1] = lambdas[1]*2
lambda_at_inf = lambda_at_inf or lambdas[0]==float('inf')
if __debug__:
log(DEBUG-1, "No improving moves left, increasing lambda to l1=%.2f, l2=%.2f"%
((0 if lambdas[0] is None else lambdas[0]),
(0 if lambdas[1] is None else lambdas[1])))
#print("No improving moves left, increasing lambda to l1=%.2f, l2=%.2f"%
# ((0 if lambdas[0] is None else lambdas[0]),
# (0 if lambdas[1] is None else lambdas[1])))
#TODO: if penalty >> cost, break (stuck on a infeasible region)
# how much bigger can be determined by finding such a
# pathological problem instance?
# safeguard for getting stuck
c_lambda_incs+=1
#print("REMOVEME: c_lambda_incs", c_lambda_incs)
if lambda_at_inf or (max_concecutive_lamba_incs is not None and
c_lambda_incs > max_concecutive_lamba_incs):
return _force_feasible(sol, D, d, C, L)
else:
if __debug__:
log(DEBUG, "Found improving LR3OPT move leading to %s (%.2f)"%
(new_sol, calculate_objective(new_sol,D)))
sol = new_sol
c_lambda_incs = 0
except KeyboardInterrupt: # or SIGINT
# Pass on the current solution forced feasbile by splitting routes
# according to the constraints.
raise KeyboardInterrupt(_force_feasible(sol, D, d, C, L))
return without_empty_routes(sol)
# Wrapper for the command line user interface (CLI)
def get_lr3opt_algorithm():
algo_name = "SG84-LR3OPT"
algo_desc = "Stewart & Golden (1984) Lagrangian relaxed 3-opt* heuristic"
def call_init(points, D, d, C, L, st, wtt, single, minimize_K):
if minimize_K:
raise NotImplementedError("LR3OPT does not support minimizing the number of vehicles")
return lr3opt_init(D, d, C, L)
return (algo_name, algo_desc, call_init)
if __name__=="__main__":
from shared_cli import cli
cli(*get_lr3opt_algorithm())
|
the-stack_0_12584 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetVMInstanceView(Model):
"""The instance view of a virtual machine scale set VM.
Variables are only populated by the server, and will be ignored when
sending a request.
:param platform_update_domain: The Update Domain count.
:type platform_update_domain: int
:param platform_fault_domain: The Fault Domain count.
:type platform_fault_domain: int
:param rdp_thumb_print: The Remote desktop certificate thumbprint.
:type rdp_thumb_print: str
:param vm_agent: The VM Agent running on the virtual machine.
:type vm_agent:
~azure.mgmt.compute.v2018_04_01.models.VirtualMachineAgentInstanceView
:param maintenance_redeploy_status: The Maintenance Operation status on
the virtual machine.
:type maintenance_redeploy_status:
~azure.mgmt.compute.v2018_04_01.models.MaintenanceRedeployStatus
:param disks: The disks information.
:type disks: list[~azure.mgmt.compute.v2018_04_01.models.DiskInstanceView]
:param extensions: The extensions information.
:type extensions:
list[~azure.mgmt.compute.v2018_04_01.models.VirtualMachineExtensionInstanceView]
:ivar vm_health: The health status for the VM.
:vartype vm_health:
~azure.mgmt.compute.v2018_04_01.models.VirtualMachineHealthStatus
:param boot_diagnostics: Boot Diagnostics is a debugging feature which
allows you to view Console Output and Screenshot to diagnose VM status.
<br><br> You can easily view the output of your console log. <br><br>
Azure also enables you to see a screenshot of the VM from the hypervisor.
:type boot_diagnostics:
~azure.mgmt.compute.v2018_04_01.models.BootDiagnosticsInstanceView
:param statuses: The resource status information.
:type statuses:
list[~azure.mgmt.compute.v2018_04_01.models.InstanceViewStatus]
:param placement_group_id: The placement group in which the VM is running.
If the VM is deallocated it will not have a placementGroupId.
:type placement_group_id: str
"""
_validation = {
'vm_health': {'readonly': True},
}
_attribute_map = {
'platform_update_domain': {'key': 'platformUpdateDomain', 'type': 'int'},
'platform_fault_domain': {'key': 'platformFaultDomain', 'type': 'int'},
'rdp_thumb_print': {'key': 'rdpThumbPrint', 'type': 'str'},
'vm_agent': {'key': 'vmAgent', 'type': 'VirtualMachineAgentInstanceView'},
'maintenance_redeploy_status': {'key': 'maintenanceRedeployStatus', 'type': 'MaintenanceRedeployStatus'},
'disks': {'key': 'disks', 'type': '[DiskInstanceView]'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineExtensionInstanceView]'},
'vm_health': {'key': 'vmHealth', 'type': 'VirtualMachineHealthStatus'},
'boot_diagnostics': {'key': 'bootDiagnostics', 'type': 'BootDiagnosticsInstanceView'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
'placement_group_id': {'key': 'placementGroupId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSetVMInstanceView, self).__init__(**kwargs)
self.platform_update_domain = kwargs.get('platform_update_domain', None)
self.platform_fault_domain = kwargs.get('platform_fault_domain', None)
self.rdp_thumb_print = kwargs.get('rdp_thumb_print', None)
self.vm_agent = kwargs.get('vm_agent', None)
self.maintenance_redeploy_status = kwargs.get('maintenance_redeploy_status', None)
self.disks = kwargs.get('disks', None)
self.extensions = kwargs.get('extensions', None)
self.vm_health = None
self.boot_diagnostics = kwargs.get('boot_diagnostics', None)
self.statuses = kwargs.get('statuses', None)
self.placement_group_id = kwargs.get('placement_group_id', None)
|
the-stack_0_12586 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This module contains REST servlets to do with profile: /profile/<paths> """
from twisted.internet import defer
from .base import ClientV1RestServlet, client_path_patterns
from synapse.types import UserID
from synapse.http.servlet import parse_json_object_from_request
class ProfileDisplaynameRestServlet(ClientV1RestServlet):
PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)/displayname")
def __init__(self, hs):
super(ProfileDisplaynameRestServlet, self).__init__(hs)
self.handlers = hs.get_handlers()
@defer.inlineCallbacks
def on_GET(self, request, user_id):
user = UserID.from_string(user_id)
displayname = yield self.handlers.profile_handler.get_displayname(
user,
)
ret = {}
if displayname is not None:
ret["displayname"] = displayname
defer.returnValue((200, ret))
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
user = UserID.from_string(user_id)
is_admin = yield self.auth.is_server_admin(requester.user)
content = parse_json_object_from_request(request)
try:
new_name = content["displayname"]
except:
defer.returnValue((400, "Unable to parse name"))
yield self.handlers.profile_handler.set_displayname(
user, requester, new_name, is_admin)
defer.returnValue((200, {}))
def on_OPTIONS(self, request, user_id):
return (200, {})
class ProfileAvatarURLRestServlet(ClientV1RestServlet):
PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)/avatar_url")
def __init__(self, hs):
super(ProfileAvatarURLRestServlet, self).__init__(hs)
self.handlers = hs.get_handlers()
@defer.inlineCallbacks
def on_GET(self, request, user_id):
user = UserID.from_string(user_id)
avatar_url = yield self.handlers.profile_handler.get_avatar_url(
user,
)
ret = {}
if avatar_url is not None:
ret["avatar_url"] = avatar_url
defer.returnValue((200, ret))
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
requester = yield self.auth.get_user_by_req(request)
user = UserID.from_string(user_id)
is_admin = yield self.auth.is_server_admin(requester.user)
content = parse_json_object_from_request(request)
try:
new_name = content["avatar_url"]
except:
defer.returnValue((400, "Unable to parse name"))
yield self.handlers.profile_handler.set_avatar_url(
user, requester, new_name, is_admin)
defer.returnValue((200, {}))
def on_OPTIONS(self, request, user_id):
return (200, {})
class ProfileRestServlet(ClientV1RestServlet):
PATTERNS = client_path_patterns("/profile/(?P<user_id>[^/]*)")
def __init__(self, hs):
super(ProfileRestServlet, self).__init__(hs)
self.handlers = hs.get_handlers()
@defer.inlineCallbacks
def on_GET(self, request, user_id):
user = UserID.from_string(user_id)
displayname = yield self.handlers.profile_handler.get_displayname(
user,
)
avatar_url = yield self.handlers.profile_handler.get_avatar_url(
user,
)
ret = {}
if displayname is not None:
ret["displayname"] = displayname
if avatar_url is not None:
ret["avatar_url"] = avatar_url
defer.returnValue((200, ret))
def register_servlets(hs, http_server):
ProfileDisplaynameRestServlet(hs).register(http_server)
ProfileAvatarURLRestServlet(hs).register(http_server)
ProfileRestServlet(hs).register(http_server)
|
the-stack_0_12588 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from six import text_type
from doommoses.corpus import Perluniprops
from doommoses.corpus import NonbreakingPrefixes
from doommoses.util import is_cjk
perluniprops = Perluniprops()
nonbreaking_prefixes = NonbreakingPrefixes()
class MosesTokenizer(object):
"""
This is a Python port of the Moses Tokenizer from
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl
"""
# Perl Unicode Properties character sets.
IsN = text_type("".join(perluniprops.chars("IsN")))
IsAlnum = text_type("".join(perluniprops.chars("IsAlnum"))) # + u'्'
IsSc = text_type("".join(perluniprops.chars("IsSc")))
IsSo = text_type("".join(perluniprops.chars("IsSo")))
IsAlpha = text_type("".join(perluniprops.chars("IsAlpha")))
IsLower = text_type("".join(perluniprops.chars("IsLower")))
# Remove ASCII junk.
DEDUPLICATE_SPACE = r"\s+", r" "
ASCII_JUNK = r"[\000-\037]", r""
# Neurotic Perl heading space, multi-space and trailing space chomp.
# These regexes are kept for reference purposes and shouldn't be used!!
MID_STRIP = r" +", r" " # Use DEDUPLICATE_SPACE instead.
LEFT_STRIP = r"^ ", r"" # Uses text.lstrip() instead.
RIGHT_STRIP = r" $", r"" # Uses text.rstrip() instead.
# Pad all "other" special characters not in IsAlnum.
PAD_NOT_ISALNUM = r"([^{}\s\.'’\`\,\-])".format(IsAlnum), r" \1 "
# Splits all hyphens (regardless of circumstances), e.g.
# 'foo-bar' -> 'foo @-@ bar'
AGGRESSIVE_HYPHEN_SPLIT = (
r"([{alphanum}])\-(?=[{alphanum}])".format(alphanum=IsAlnum),
r"\1 @-@ ",
)
# Make multi-dots stay together.
REPLACE_DOT_WITH_LITERALSTRING_1 = r"\.([\.]+)", " DOTMULTI\1"
REPLACE_DOT_WITH_LITERALSTRING_2 = r"DOTMULTI\.([^\.])", "DOTDOTMULTI \1"
REPLACE_DOT_WITH_LITERALSTRING_3 = r"DOTMULTI\.", "DOTDOTMULTI"
# Separate out "," except if within numbers (5,300)
# e.g. A,B,C,D,E > A , B,C , D,E
# First application uses up B so rule can't see B,C
# two-step version here may create extra spaces but these are removed later
# will also space digit,letter or letter,digit forms (redundant with next section)
COMMA_SEPARATE_1 = r"([^{}])[,]".format(IsN), r"\1 , "
COMMA_SEPARATE_2 = r"[,]([^{}])".format(IsN), r" , \1"
COMMA_SEPARATE_3 = r"([{}])[,]$".format(IsN), r"\1 , "
# Attempt to get correct directional quotes.
DIRECTIONAL_QUOTE_1 = r"^``", r"`` "
DIRECTIONAL_QUOTE_2 = r'^"', r"`` "
DIRECTIONAL_QUOTE_3 = r"^`([^`])", r"` \1"
DIRECTIONAL_QUOTE_4 = r"^'", r"` "
DIRECTIONAL_QUOTE_5 = r'([ ([{<])"', r"\1 `` "
DIRECTIONAL_QUOTE_6 = r"([ ([{<])``", r"\1 `` "
DIRECTIONAL_QUOTE_7 = r"([ ([{<])`([^`])", r"\1 ` \2"
DIRECTIONAL_QUOTE_8 = r"([ ([{<])'", r"\1 ` "
# Replace ... with _ELLIPSIS_
REPLACE_ELLIPSIS = r"\.\.\.", r" _ELLIPSIS_ "
# Restore _ELLIPSIS_ with ...
RESTORE_ELLIPSIS = r"_ELLIPSIS_", r"\.\.\."
# Restore apostrophe
RESTORE_APOS = r"_APOS_", r"\'"
RESTORE_APOS_ALT = r"_APOS_ALT_", r"’"
# Pad , with tailing space except if within numbers, e.g. 5,300
COMMA_1 = r"([^{numbers}])[,]([^{numbers}])".format(numbers=IsN), r"\1 , \2"
COMMA_2 = r"([{numbers}])[,]([^{numbers}])".format(numbers=IsN), r"\1 , \2"
COMMA_3 = r"([^{numbers}])[,]([{numbers}])".format(numbers=IsN), r"\1 , \2"
# Pad unicode symbols with spaces.
SYMBOLS = r"([;:@#\$%&{}{}])".format(IsSc, IsSo), r" \1 "
# Separate out intra-token slashes. PTB tokenization doesn't do this, so
# the tokens should be merged prior to parsing with a PTB-trained parser.
# e.g. "and/or" -> "and @/@ or"
INTRATOKEN_SLASHES = (
r"([{alphanum}])\/([{alphanum}])".format(alphanum=IsAlnum),
r"$1 \@\/\@ $2",
)
# Splits final period at end of string.
FINAL_PERIOD = r"""([^.])([.])([\]\)}>"']*) ?$""", r"\1 \2\3"
# Pad all question marks and exclamation marks with spaces.
PAD_QUESTION_EXCLAMATION_MARK = r"([?!])", r" \1 "
# Handles parentheses, brackets and converts them to PTB symbols.
PAD_PARENTHESIS = r"([\]\[\(\){}<>])", r" \1 "
CONVERT_PARENTHESIS_1 = r"\(", "-LRB-"
CONVERT_PARENTHESIS_2 = r"\)", "-RRB-"
CONVERT_PARENTHESIS_3 = r"\[", "-LSB-"
CONVERT_PARENTHESIS_4 = r"\]", "-RSB-"
CONVERT_PARENTHESIS_5 = r"\{", "-LCB-"
CONVERT_PARENTHESIS_6 = r"\}", "-RCB-"
# Pads double dashes with spaces.
PAD_DOUBLE_DASHES = r"--", " -- "
# Adds spaces to start and end of string to simplify further regexps.
PAD_START_OF_STR = r"^", " "
PAD_END_OF_STR = r"$", " "
# Converts double quotes to two single quotes and pad with spaces.
CONVERT_DOUBLE_TO_SINGLE_QUOTES = r'"', " '' "
# Handles single quote in possessives or close-single-quote.
HANDLES_SINGLE_QUOTES = r"([^'’])(['’]) ", r"\1 \2 "
# Pad apostrophe in possessive or close-single-quote.
APOSTROPHE = r"([^'’])(['’])", r"\1 \2 "
# Prepend space on contraction apostrophe.
CONTRACTION_1 = r"'([sSmMdD]) ", r"_APOS_\1 "
CONTRACTION_2 = r"'ll ", r"_APOS_ll "
CONTRACTION_3 = r"'re ", r"_APOS_re "
CONTRACTION_4 = r"'ve ", r"_APOS_ve "
CONTRACTION_5 = r"n't ", r"n_APOS_t "
CONTRACTION_6 = r"'LL ", r"_APOS_LL "
CONTRACTION_7 = r"'RE ", r"_APOS_RE "
CONTRACTION_8 = r"'VE ", r"_APOS_VE "
CONTRACTION_9 = r"N'T ", r"N_APOS_T "
# Informal Contractions.
CONTRACTION_10 = r" ([Cc])annot ", r" \1annot "
CONTRACTION_11 = r" ([Dd])'ye ", r" \1_APOS_ye "
CONTRACTION_12 = r" ([Gg])imme ", r" \1imme "
CONTRACTION_13 = r" ([Gg])onna ", r" \1onna "
CONTRACTION_14 = r" ([Gg])otta ", r" \1otta "
CONTRACTION_15 = r" ([Ll])emme ", r" \1emme "
CONTRACTION_16 = r" ([Mm])ore'n ", r" \1ore_APOS_n "
CONTRACTION_17 = r" '([Tt])is ", r" _APOS_\1is "
CONTRACTION_18 = r" '([Tt])was ", r" _APOS_\1was "
CONTRACTION_19 = r" ([Ww])anna ", r" \1anna "
# Prepend space on contraction apostrophe.
CONTRACTIONALT_1 = r"’([sSmMdD]) ", r"_APOS_ALT_\1 "
CONTRACTIONALT_2 = r"’ll ", r"_APOS_ALT_ll "
CONTRACTIONALT_3 = r"’re ", r"_APOS_ALT_re "
CONTRACTIONALT_4 = r"’ve ", r"_APOS_ALT_ve "
CONTRACTIONALT_5 = r"n’t ", r"n_APOS_ALT_t "
CONTRACTIONALT_6 = r"’LL ", r"_APOS_ALT_LL "
CONTRACTIONALT_7 = r"’RE ", r"_APOS_ALT_RE "
CONTRACTIONALT_8 = r"’VE ", r"_APOS_ALT_VE "
CONTRACTIONALT_9 = r"N’T ", r"N_APOS__ALT_T "
# Informal Contractions.
CONTRACTIONALT_10 = r" ([Cc])annot ", r" \1annot "
CONTRACTIONALT_11 = r" ([Dd])’ye ", r" \1_APOS_ALT_ye "
CONTRACTIONALT_12 = r" ([Gg])imme ", r" \1imme "
CONTRACTIONALT_13 = r" ([Gg])onna ", r" \1onna "
CONTRACTIONALT_14 = r" ([Gg])otta ", r" \1otta "
CONTRACTIONALT_15 = r" ([Ll])emme ", r" \1emme "
CONTRACTIONALT_16 = r" ([Mm])ore’n ", r" \1ore_APOS_ALT_n "
CONTRACTIONALT_17 = r" ’([Tt])is ", r" _APOS_ALT_\1is "
CONTRACTIONALT_18 = r" ’([Tt])was ", r" _APOS_ALT_\1was "
CONTRACTIONALT_19 = r" ([Ww])anna ", r" \1anna "
# Hyphens at the boundaries
BOUNDARIES_1 = r"(\w)-(\W)", r"\1 - \2"
BOUNDARIES_2 = r"^-(\w)", r"- \1"
BOUNDARIES_3 = r"(\w)-$", r"\1 -"
BOUNDARIES_4 = r"(\W)-(\w)", r"\1 - \2"
# Clean out extra spaces
CLEAN_EXTRA_SPACE_1 = r" *", r" "
CLEAN_EXTRA_SPACE_2 = r"^ *", r""
CLEAN_EXTRA_SPACE_3 = r" *$", r""
# Neurotic Perl regexes to escape special characters.
ESCAPE_AMPERSAND = r"&", r"&"
ESCAPE_PIPE = r"\|", r"|"
ESCAPE_LEFT_ANGLE_BRACKET = r"<", r"<"
ESCAPE_RIGHT_ANGLE_BRACKET = r">", r">"
ESCAPE_SINGLE_QUOTE = r"\'", r"'"
ESCAPE_DOUBLE_QUOTE = r"\"", r"""
ESCAPE_LEFT_SQUARE_BRACKET = r"\[", r"["
ESCAPE_RIGHT_SQUARE_BRACKET = r"]", r"]"
EN_SPECIFIC_1 = r"([^{alpha}])(['’])([^{alpha}])".format(alpha=IsAlpha), r"\1 \2 \3"
EN_SPECIFIC_2 = (
r"([^{alpha}{isn}])(['’])([{alpha}])".format(alpha=IsAlpha, isn=IsN),
r"\1 \2 \3",
)
EN_SPECIFIC_3 = r"([{alpha}])(['’])([^{alpha}])".format(alpha=IsAlpha), r"\1 \2 \3"
EN_SPECIFIC_4 = r"([{alpha}])(['’])([{alpha}])".format(alpha=IsAlpha), r"\1 \2 \3"
EN_SPECIFIC_5 = r"([{isn}])(['’])([s])".format(isn=IsN), r"\1 \2 \3"
ENGLISH_SPECIFIC_APOSTROPHE = [
EN_SPECIFIC_1,
EN_SPECIFIC_2,
EN_SPECIFIC_3,
#EN_SPECIFIC_4,
EN_SPECIFIC_5,
]
FR_IT_SPECIFIC_1 = r"([^{alpha}])(['’])([^{alpha}])".format(alpha=IsAlpha), r"\1 \2 \3"
FR_IT_SPECIFIC_2 = r"([^{alpha}])(['’])([{alpha}])".format(alpha=IsAlpha), r"\1 \2 \3"
FR_IT_SPECIFIC_3 = r"([{alpha}])(['’])([^{alpha}])".format(alpha=IsAlpha), r"\1 \2 \3"
FR_IT_SPECIFIC_4 = r"([{alpha}])(['’])([{alpha}])".format(alpha=IsAlpha), r"\1\2 \2"
FR_IT_SPECIFIC_APOSTROPHE = [
FR_IT_SPECIFIC_1,
FR_IT_SPECIFIC_2,
FR_IT_SPECIFIC_3,
FR_IT_SPECIFIC_4,
]
NON_SPECIFIC_APOSTROPHE = r"([\'’])", " \1 "
TRAILING_DOT_APOSTROPHE = r"\.' ?$", " . ' "
BASIC_PROTECTED_PATTERN_1 = r"<\/?\S+\/?>"
BASIC_PROTECTED_PATTERN_2 = r'<\S+( [a-zA-Z0-9]+\="?[^"]")+ ?\/?>'
BASIC_PROTECTED_PATTERN_3 = r"<\S+( [a-zA-Z0-9]+\='?[^']')+ ?\/?>"
BASIC_PROTECTED_PATTERN_4 = r"[\w\-\_\.]+\@([\w\-\_]+\.)+[a-zA-Z]{2,}"
BASIC_PROTECTED_PATTERN_5 = r"(http[s]?|ftp):\/\/[^:\/\s]+(\/\w+)*\/[\w\-\.]+"
MOSES_PENN_REGEXES_1 = [
DEDUPLICATE_SPACE,
ASCII_JUNK,
DIRECTIONAL_QUOTE_1,
DIRECTIONAL_QUOTE_2,
DIRECTIONAL_QUOTE_3,
DIRECTIONAL_QUOTE_4,
DIRECTIONAL_QUOTE_5,
DIRECTIONAL_QUOTE_6,
DIRECTIONAL_QUOTE_7,
DIRECTIONAL_QUOTE_8,
REPLACE_ELLIPSIS,
COMMA_1,
COMMA_2,
COMMA_3,
SYMBOLS,
INTRATOKEN_SLASHES,
FINAL_PERIOD,
PAD_QUESTION_EXCLAMATION_MARK,
PAD_PARENTHESIS,
CONVERT_PARENTHESIS_1,
CONVERT_PARENTHESIS_2,
CONVERT_PARENTHESIS_3,
CONVERT_PARENTHESIS_4,
CONVERT_PARENTHESIS_5,
CONVERT_PARENTHESIS_6,
PAD_DOUBLE_DASHES,
PAD_START_OF_STR,
PAD_END_OF_STR,
CONVERT_DOUBLE_TO_SINGLE_QUOTES,
HANDLES_SINGLE_QUOTES,
APOSTROPHE,
CONTRACTION_1,
CONTRACTION_2,
CONTRACTION_3,
CONTRACTION_4,
CONTRACTION_5,
CONTRACTION_6,
CONTRACTION_7,
CONTRACTION_8,
CONTRACTION_9,
CONTRACTION_10,
CONTRACTION_11,
CONTRACTION_12,
CONTRACTION_13,
CONTRACTION_14,
CONTRACTION_15,
CONTRACTION_16,
CONTRACTION_17,
CONTRACTION_18,
CONTRACTION_19,
CONTRACTIONALT_1,
CONTRACTIONALT_2,
CONTRACTIONALT_3,
CONTRACTIONALT_4,
CONTRACTIONALT_5,
CONTRACTIONALT_6,
CONTRACTIONALT_7,
CONTRACTIONALT_8,
CONTRACTIONALT_9,
CONTRACTIONALT_10,
CONTRACTIONALT_11,
CONTRACTIONALT_12,
CONTRACTIONALT_13,
CONTRACTIONALT_14,
CONTRACTIONALT_15,
CONTRACTIONALT_16,
CONTRACTIONALT_17,
CONTRACTIONALT_18,
CONTRACTIONALT_19,
]
MOSES_PENN_REGEXES_2 = [
RESTORE_ELLIPSIS,
CLEAN_EXTRA_SPACE_1,
CLEAN_EXTRA_SPACE_2,
CLEAN_EXTRA_SPACE_3,
ESCAPE_AMPERSAND,
ESCAPE_PIPE,
ESCAPE_LEFT_ANGLE_BRACKET,
ESCAPE_RIGHT_ANGLE_BRACKET,
ESCAPE_SINGLE_QUOTE,
ESCAPE_DOUBLE_QUOTE,
RESTORE_APOS,
RESTORE_APOS_ALT
]
MOSES_ESCAPE_XML_REGEXES = [
ESCAPE_AMPERSAND,
ESCAPE_PIPE,
ESCAPE_LEFT_ANGLE_BRACKET,
ESCAPE_RIGHT_ANGLE_BRACKET,
ESCAPE_SINGLE_QUOTE,
ESCAPE_DOUBLE_QUOTE,
ESCAPE_LEFT_SQUARE_BRACKET,
ESCAPE_RIGHT_SQUARE_BRACKET,
]
BASIC_PROTECTED_PATTERNS = [
BASIC_PROTECTED_PATTERN_1,
BASIC_PROTECTED_PATTERN_2,
BASIC_PROTECTED_PATTERN_3,
BASIC_PROTECTED_PATTERN_4,
BASIC_PROTECTED_PATTERN_5,
]
def __init__(self, lang="en", custom_nonbreaking_prefixes_file=None):
# Initialize the object.
super(MosesTokenizer, self).__init__()
self.lang = lang
# Initialize the language specific nonbreaking prefixes.
self.NONBREAKING_PREFIXES = [
_nbp.strip() for _nbp in nonbreaking_prefixes.words(lang)
]
# Load custom nonbreaking prefixes file.
if custom_nonbreaking_prefixes_file:
self.NONBREAKING_PREFIXES = []
with open(custom_nonbreaking_prefixes_file, 'r') as fin:
for line in fin:
line = line.strip()
if line and not line.startswith("#"):
if line not in self.NONBREAKING_PREFIXES:
self.NONBREAKING_PREFIXES.append(line)
self.NUMERIC_ONLY_PREFIXES = [
w.rpartition(" ")[0]
for w in self.NONBREAKING_PREFIXES
if self.has_numeric_only(w)
]
# Add CJK characters to alpha and alnum.
if self.lang in ['zh', 'ja', 'ko', 'cjk']:
cjk_chars = ""
if self.lang in ["ko", 'cjk']:
cjk_chars += text_type("".join(perluniprops.chars("Hangul")))
if self.lang in ["zh", 'cjk']:
cjk_chars += text_type("".join(perluniprops.chars("Han")))
if self.lang in ["ja", 'cjk']:
cjk_chars += text_type("".join(perluniprops.chars("Hiragana")))
cjk_chars += text_type("".join(perluniprops.chars("Katakana")))
cjk_chars += text_type("".join(perluniprops.chars("Han")))
self.IsAlpha += cjk_chars
self.IsAlnum += cjk_chars
# Overwrite the alnum regexes.
self.PAD_NOT_ISALNUM = r"([^{}\s\.'\`\,\-])".format(self.IsAlnum), r" \1 "
self.AGGRESSIVE_HYPHEN_SPLIT = (
r"([{alphanum}])\-(?=[{alphanum}])".format(alphanum=self.IsAlnum),
r"\1 @-@ ",
)
self.INTRATOKEN_SLASHES = (
r"([{alphanum}])\/([{alphanum}])".format(alphanum=self.IsAlnum),
r"$1 \@\/\@ $2",
)
def replace_multidots(self, text):
text = re.sub(r"\.([\.]+)", r" DOTMULTI\1", text)
while re.search(r"DOTMULTI\.", text):
text = re.sub(r"DOTMULTI\.([^\.])", r"DOTDOTMULTI \1", text)
text = re.sub(r"DOTMULTI\.", "DOTDOTMULTI", text)
return text
def restore_multidots(self, text):
while re.search(r"DOTDOTMULTI", text):
text = re.sub(r"DOTDOTMULTI", r"DOTMULTI.", text)
return re.sub(r"DOTMULTI", r".", text)
def islower(self, text):
return not set(text).difference(set(self.IsLower))
def isanyalpha(self, text):
return any(set(text).intersection(set(self.IsAlpha)))
def has_numeric_only(self, text):
return bool(re.search(r"(.*)[\s]+(\#NUMERIC_ONLY\#)", text))
def handles_nonbreaking_prefixes(self, text):
# Splits the text into tokens to check for nonbreaking prefixes.
tokens = text.split()
num_tokens = len(tokens)
for i, token in enumerate(tokens):
# Checks if token ends with a fullstop.
token_ends_with_period = re.search(r"^(\S+)\.$", token)
if token_ends_with_period:
prefix = token_ends_with_period.group(1)
# Checks for 3 conditions if
# i. the prefix contains a fullstop and
# any char in the prefix is within the IsAlpha charset
# ii. the prefix is in the list of nonbreaking prefixes and
# does not contain #NUMERIC_ONLY#
# iii. the token is not the last token and that the
# next token contains all lowercase.
if (
("." in prefix and self.isanyalpha(prefix))
or (
prefix in self.NONBREAKING_PREFIXES
and prefix not in self.NUMERIC_ONLY_PREFIXES
)
or (
i != num_tokens - 1
and tokens[i + 1]
and self.islower(tokens[i + 1][0])
)
):
pass # No change to the token.
# Checks if the prefix is in NUMERIC_ONLY_PREFIXES
# and ensures that the next word is a digit.
elif (
prefix in self.NUMERIC_ONLY_PREFIXES
and (i + 1) < num_tokens
and re.search(r"^[0-9]+", tokens[i + 1])
):
pass # No change to the token.
else: # Otherwise, adds a space after the tokens before a dot.
tokens[i] = prefix + " ."
return " ".join(tokens) # Stitch the tokens back.
def escape_xml(self, text):
for regexp, substitution in self.MOSES_ESCAPE_XML_REGEXES:
text = re.sub(regexp, substitution, text)
return text
def penn_tokenize(self, text, return_str=False):
"""
This is a Python port of the Penn treebank tokenizer adapted by the Moses
machine translation community.
"""
# Converts input string into unicode.
text = text_type(text)
# Perform a chain of regex substituitions using MOSES_PENN_REGEXES_1
for regexp, substitution in self.MOSES_PENN_REGEXES_1:
text = re.sub(regexp, substitution, text)
# Handles nonbreaking prefixes.
text = self.handles_nonbreaking_prefixes(text)
# Restore ellipsis, clean extra spaces, escape XML symbols.
for regexp, substitution in self.MOSES_PENN_REGEXES_2:
text = re.sub(regexp, substitution, text)
return text if return_str else text.split()
def tokenize(
self,
text,
aggressive_dash_splits=False,
return_str=False,
escape=True,
protected_patterns=None,
):
"""
Python port of the Moses tokenizer.
:param tokens: A single string, i.e. sentence text.
:type tokens: str
:param aggressive_dash_splits: Option to trigger dash split rules .
:type aggressive_dash_splits: bool
"""
# Converts input string into unicode.
text = text_type(text)
# De-duplicate spaces and clean ASCII junk
for regexp, substitution in [self.DEDUPLICATE_SPACE, self.ASCII_JUNK, self.BOUNDARIES_1,
self.BOUNDARIES_2, self.BOUNDARIES_3, self.BOUNDARIES_4]:
text = re.sub(regexp, substitution, text)
if protected_patterns:
# Find the tokens that needs to be protected.
protected_tokens = [
match.group()
for protected_pattern in protected_patterns
for match in re.finditer(protected_pattern, text, re.IGNORECASE)
]
# Apply the protected_patterns.
for i, token in enumerate(protected_tokens):
substitution = "THISISPROTECTED" + str(i).zfill(3)
text = text.replace(token, substitution)
# Strips heading and trailing spaces.
text = text.strip()
# FIXME!!!
'''
# For Finnish and Swedish, separate out all "other" special characters.
if self.lang in ["fi", "sv"]:
# In Finnish and Swedish, the colon can be used inside words
# as an apostrophe-like character:
# USA:n, 20:een, EU:ssa, USA:s, S:t
regexp, substitution = self.FI_SV_COLON_APOSTROPHE
text = re.sub(regexp, substitution, text)
# If a colon is not immediately followed by lower-case characters,
# separate it out anyway.
regexp, substitution = self.FI_SV_COLON_NO_LOWER_FOLLOW
text = re.sub(regexp, substitution, text)
else:
'''
# Separate special characters outside of IsAlnum character set.
regexp, substitution = self.PAD_NOT_ISALNUM
text = re.sub(regexp, substitution, text)
# Aggressively splits dashes
if aggressive_dash_splits:
regexp, substitution = self.AGGRESSIVE_HYPHEN_SPLIT
text = re.sub(regexp, substitution, text)
# Replaces multidots with "DOTDOTMULTI" literal strings.
text = self.replace_multidots(text)
# Separate out "," except if within numbers e.g. 5,300
for regexp, substitution in [
self.COMMA_SEPARATE_1,
self.COMMA_SEPARATE_2,
self.COMMA_SEPARATE_3,
]:
text = re.sub(regexp, substitution, text)
# (Language-specific) apostrophe tokenization.
if self.lang == "en":
for regexp, substitution in self.ENGLISH_SPECIFIC_APOSTROPHE:
text = re.sub(regexp, substitution, text)
elif self.lang in ["fr", "it", "ca"]:
for regexp, substitution in self.FR_IT_SPECIFIC_APOSTROPHE:
text = re.sub(regexp, substitution, text)
# FIXME!!!
##elif self.lang == "so":
## for regexp, substitution in self.SO_SPECIFIC_APOSTROPHE:
## text = re.sub(regexp, substitution, text)
else:
regexp, substitution = self.NON_SPECIFIC_APOSTROPHE
text = re.sub(regexp, substitution, text)
# Handles nonbreaking prefixes.
text = self.handles_nonbreaking_prefixes(text)
# Cleans up extraneous spaces.
regexp, substitution = self.DEDUPLICATE_SPACE
text = re.sub(regexp, substitution, text).strip()
# Split trailing ".'".
regexp, substitution = self.TRAILING_DOT_APOSTROPHE
text = re.sub(regexp, substitution, text)
# Restore the protected tokens.
if protected_patterns:
for i, token in enumerate(protected_tokens):
substitution = "THISISPROTECTED" + str(i).zfill(3)
text = text.replace(substitution, token)
# Restore multidots.
text = self.restore_multidots(text)
if escape:
# Escape XML symbols.
text = self.escape_xml(text)
return text if return_str else text.split()
class MosesDetokenizer(object):
"""
This is a Python port of the Moses Detokenizer from
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/detokenizer.perl
"""
# Currency Symbols.
IsAlnum = text_type("".join(perluniprops.chars("IsAlnum")))
IsAlpha = text_type("".join(perluniprops.chars("IsAlpha")))
IsSc = text_type("".join(perluniprops.chars("IsSc")))
AGGRESSIVE_HYPHEN_SPLIT = r" \@\-\@ ", r"-"
# Merge multiple spaces.
ONE_SPACE = re.compile(r" {2,}"), " "
# Unescape special characters.
UNESCAPE_FACTOR_SEPARATOR = r"|", r"|"
UNESCAPE_LEFT_ANGLE_BRACKET = r"<", r"<"
UNESCAPE_RIGHT_ANGLE_BRACKET = r">", r">"
UNESCAPE_DOUBLE_QUOTE = r""", r'"'
UNESCAPE_SINGLE_QUOTE = r"'", r"'"
UNESCAPE_SYNTAX_NONTERMINAL_LEFT = r"[", r"["
UNESCAPE_SYNTAX_NONTERMINAL_RIGHT = r"]", r"]"
UNESCAPE_AMPERSAND = r"&", r"&"
# The legacy regexes are used to support outputs from older Moses versions.
UNESCAPE_FACTOR_SEPARATOR_LEGACY = r"&bar;", r"|"
UNESCAPE_SYNTAX_NONTERMINAL_LEFT_LEGACY = r"&bra;", r"["
UNESCAPE_SYNTAX_NONTERMINAL_RIGHT_LEGACY = r"&ket;", r"]"
MOSES_UNESCAPE_XML_REGEXES = [
UNESCAPE_FACTOR_SEPARATOR_LEGACY,
UNESCAPE_FACTOR_SEPARATOR,
UNESCAPE_LEFT_ANGLE_BRACKET,
UNESCAPE_RIGHT_ANGLE_BRACKET,
UNESCAPE_SYNTAX_NONTERMINAL_LEFT_LEGACY,
UNESCAPE_SYNTAX_NONTERMINAL_RIGHT_LEGACY,
UNESCAPE_DOUBLE_QUOTE,
UNESCAPE_SINGLE_QUOTE,
UNESCAPE_SYNTAX_NONTERMINAL_LEFT,
UNESCAPE_SYNTAX_NONTERMINAL_RIGHT,
UNESCAPE_AMPERSAND,
]
FINNISH_MORPHSET_1 = [
u"N",
u"n",
u"A",
u"a",
u"\xc4",
u"\xe4",
u"ssa",
u"Ssa",
u"ss\xe4",
u"Ss\xe4",
u"sta",
u"st\xe4",
u"Sta",
u"St\xe4",
u"hun",
u"Hun",
u"hyn",
u"Hyn",
u"han",
u"Han",
u"h\xe4n",
u"H\xe4n",
u"h\xf6n",
u"H\xf6n",
u"un",
u"Un",
u"yn",
u"Yn",
u"an",
u"An",
u"\xe4n",
u"\xc4n",
u"\xf6n",
u"\xd6n",
u"seen",
u"Seen",
u"lla",
u"Lla",
u"ll\xe4",
u"Ll\xe4",
u"lta",
u"Lta",
u"lt\xe4",
u"Lt\xe4",
u"lle",
u"Lle",
u"ksi",
u"Ksi",
u"kse",
u"Kse",
u"tta",
u"Tta",
u"ine",
u"Ine",
]
FINNISH_MORPHSET_2 = [u"ni", u"si", u"mme", u"nne", u"nsa"]
FINNISH_MORPHSET_3 = [
u"ko",
u"k\xf6",
u"han",
u"h\xe4n",
u"pa",
u"p\xe4",
u"kaan",
u"k\xe4\xe4n",
u"kin",
]
FINNISH_REGEX = r"^({})({})?({})$".format(
text_type("|".join(FINNISH_MORPHSET_1)),
text_type("|".join(FINNISH_MORPHSET_2)),
text_type("|".join(FINNISH_MORPHSET_3)),
)
def __init__(self, lang="en"):
super(MosesDetokenizer, self).__init__()
self.lang = lang
def unescape_xml(self, text):
for regexp, substitution in self.MOSES_UNESCAPE_XML_REGEXES:
text = re.sub(regexp, substitution, text)
return text
def tokenize(self, tokens, return_str=True, unescape=True):
"""
Python port of the Moses detokenizer.
:param tokens: A list of strings, i.e. tokenized text.
:type tokens: list(str)
:return: str
"""
# Convert the list of tokens into a string and pad it with spaces.
text = r" {} ".format(" ".join(tokens))
# Converts input string into unicode.
text = text_type(text)
# Detokenize the agressive hyphen split.
regexp, substitution = self.AGGRESSIVE_HYPHEN_SPLIT
text = re.sub(regexp, substitution, text)
if unescape:
# Unescape the XML symbols.
text = self.unescape_xml(text)
# Keep track of no. of quotation marks.
quote_counts = {u"'": 0, u'"': 0, u"``": 0, u"`": 0, u"''": 0}
# The *prepend_space* variable is used to control the "effects" of
# detokenization as the function loops through the list of tokens and
# changes the *prepend_space* accordingly as it sequentially checks
# through the language specific and language independent conditions.
prepend_space = " "
detokenized_text = ""
tokens = text.split()
# Iterate through every token and apply language specific detokenization rule(s).
for i, token in enumerate(iter(tokens)):
# Check if the first char is CJK.
if is_cjk(token[0]) and self.lang != "ko":
# Perform left shift if this is a second consecutive CJK word.
if i > 0 and is_cjk(tokens[i - 1][-1]):
detokenized_text += token
# But do nothing special if this is a CJK word that doesn't follow a CJK word
else:
detokenized_text += prepend_space + token
prepend_space = " "
# If it's a currency symbol.
elif re.search(r"^[" + self.IsSc + r"\(\[\{\¿\¡]+$", token):
# Perform right shift on currency and other random punctuation items
detokenized_text += prepend_space + token
prepend_space = ""
elif re.search(r"^[\,\.\?\!\:\;\\\%\}\]\)]+$", token):
# In French, these punctuations are prefixed with a non-breakable space.
if self.lang == "fr" and re.search(r"^[\?\!\:\;\\\%]$", token):
detokenized_text += " "
# Perform left shift on punctuation items.
detokenized_text += token
prepend_space = " "
elif (
self.lang == "en"
and i > 0
and re.search(r"^['][{}]".format(self.IsAlpha), token)
):
# and re.search(u'[{}]$'.format(self.IsAlnum), tokens[i-1])):
# For English, left-shift the contraction.
detokenized_text += token
prepend_space = " "
elif (
self.lang == "cs"
and i > 1
and re.search(
r"^[0-9]+$", tokens[-2]
) # If the previous previous token is a number.
and re.search(r"^[.,]$", tokens[-1]) # If previous token is a dot.
and re.search(r"^[0-9]+$", token)
): # If the current token is a number.
# In Czech, left-shift floats that are decimal numbers.
detokenized_text += token
prepend_space = " "
elif (
self.lang in ["fr", "it", "ga", "ca"]
and i <= len(tokens) - 2
and re.search(r"[{}][']$".format(self.IsAlpha), token)
and re.search(r"^[{}]".format(self.IsAlpha), tokens[i + 1])
): # If the next token is alpha.
# For French and Italian, right-shift the contraction.
detokenized_text += prepend_space + token
prepend_space = ""
elif (
self.lang == "cs"
and i <= len(tokens) - 3
and re.search(r"[{}][']$".format(self.IsAlpha), token)
and re.search(r"^[-–]$", tokens[i + 1])
and re.search(r"^li$|^mail.*", tokens[i + 2], re.IGNORECASE)
): # In Perl, ($words[$i+2] =~ /^li$|^mail.*/i)
# In Czech, right-shift "-li" and a few Czech dashed words (e.g. e-mail)
detokenized_text += prepend_space + token + tokens[i + 1]
next(tokens, None) # Advance over the dash
prepend_space = ""
# Combine punctuation smartly.
elif re.search(r"""^[\'\"„“`]+$""", token):
normalized_quo = token
if re.search(r"^[„“”]+$", token):
normalized_quo = '"'
quote_counts[normalized_quo] = quote_counts.get(normalized_quo, 0)
if self.lang == "cs" and token == u"„":
quote_counts[normalized_quo] = 0
if self.lang == "cs" and token == u"“":
quote_counts[normalized_quo] = 1
if quote_counts[normalized_quo] % 2 == 0:
if (
self.lang == "en"
and token == u"'"
and i > 0
and re.search(r"[s]$", tokens[i - 1])
):
# Left shift on single quote for possessives ending
# in "s", e.g. "The Jones' house"
detokenized_text += token
prepend_space = " "
else:
# Right shift.
detokenized_text += prepend_space + token
prepend_space = ""
quote_counts[normalized_quo] += 1
else:
# Left shift.
detokenized_text += token
prepend_space = " "
quote_counts[normalized_quo] += 1
elif (
self.lang == "fi"
and re.search(r":$", tokens[i - 1])
and re.search(self.FINNISH_REGEX, token)
):
# Finnish : without intervening space if followed by case suffix
# EU:N EU:n EU:ssa EU:sta EU:hun EU:iin ...
detokenized_text += prepend_space + token
prepend_space = " "
else:
detokenized_text += prepend_space + token
prepend_space = " "
# Merge multiple spaces.
regexp, substitution = self.ONE_SPACE
detokenized_text = re.sub(regexp, substitution, detokenized_text)
# Removes heading and trailing spaces.
detokenized_text = detokenized_text.strip()
return detokenized_text if return_str else detokenized_text.split()
def detokenize(self, tokens, return_str=True, unescape=True):
""" Duck-typing the abstract *tokenize()*."""
return self.tokenize(tokens, return_str, unescape)
__all__ = ["MosesTokenizer", "MosesDetokenizer"]
|
the-stack_0_12589 | """#########################################################################
Author: Yingru Liu
Institute: Stony Brook University
Descriptions: Tools to build an RNN.
----2017.11.01
#########################################################################"""
import tensorflow as tf
from dl4s.cores.model import _config
import numpy as np
"""#########################################################################
Class: config - Basic configuration of the auto-regressive RNN.
#########################################################################"""
class config(_config, object):
"""
Elements outside the __init__ method are static elements.
Elements inside the __init__ method are elements of the object.
----from Stackoverflow(https://stackoverflow.com/questions/9056957/correct-way-to-define-class-variables-in-python).
"""
unitType = 'LSTM' # <string> the type of hidden units(LSTM/GRU/Tanh).
dimLayer = [] # <scalar list> the size of each layers [input, hiddens, output].
#
"""#########################################################################
hidden_net: function to build the hidden layers of the RNN
input: x - network input indicated by <tensor placeholder>.
Config - configuration class.
output: cells - tensorflow symbol for the hidden layers of the multi-layer RNN.
outputs.reshape - the output of last hidden layer.
initializer - the initializer that may be used later.
#########################################################################"""
def hidden_net(
x,
graph,
Config=config(),
):
# get the number of hidden layers.
numLayer = len(Config.dimLayer) - 2
# define the initializer.
with graph.as_default():
initializer = tf.random_uniform_initializer(-Config.init_scale, Config.init_scale)
# <list> stacks of the hidden layers.
layers = []
for i in range(numLayer):
tf.variable_scope('hidden_' + str(i + 1), initializer=initializer)
if Config.unitType == 'LSTM':
layers.append(tf.nn.rnn_cell.LSTMCell(num_units=Config.dimLayer[i + 1]))
elif Config.unitType == 'GRU':
layers.append(tf.nn.rnn_cell.GRUCell(num_units=Config.dimLayer[i + 1]))
else:
layers.append(tf.nn.rnn_cell.BasicRNNCell(num_units=Config.dimLayer[i + 1]))
cells = tf.contrib.rnn.MultiRNNCell(layers, state_is_tuple=True)
state = cells.zero_state(tf.shape(x)[0], Config.float)
#output: [batch_size, max_time, cell.output_size]
outputs, _ = tf.nn.dynamic_rnn(cells, x, initial_state=state)
return cells, outputs, initializer
|
the-stack_0_12591 | import pandas as pd
import numpy as np
import json
g1=pd.read_csv('refugees.csv')
g2=g1.iloc[:,:]
longi=[]
lati=[]
jsonvalue=[]
keys=[]
g1dict={}
#print(g1.columns)
df = pd.DataFrame()
df.fillna(2071)
df['col'] = g1.columns
#print(df)
longi = g1["longitude"].tolist()
lati = g1["latitude"].tolist()
def smallvalues(lat,lon):
small=0
reqd=lati[0]
for i in range(0,206):
small=abs(lati[i]-lat)
if small<reqd:
reqd=small
index=i
reqdvalues(index)
def reqdvalues(index):
keys=g2.head(0)
keys=keys.columns.tolist()
jsonvalue=g2.iloc[[index]]
jsonvalue=jsonvalue.values.tolist()
for i in range(5,33):
g1dict[keys[i]]= jsonvalue[0][i]
jsondumps=json.dumps(g1dict)
#print(jsondumps)
return(jsondumps)
#smallvalues(7.009,32.0987)
|
the-stack_0_12593 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import time
import calendar
import math
from collections import defaultdict
from . import wcwidth
from .displaying import colorme, FormattedValue, DEFAULT_VALUE_COLORS
from cassandra.cqltypes import EMPTY
unicode_controlchars_re = re.compile(r'[\x00-\x31\x7f-\xa0]')
controlchars_re = re.compile(r'[\x00-\x31\x7f-\xff]')
def _show_control_chars(match):
txt = repr(match.group(0))
if txt.startswith('u'):
txt = txt[2:-1]
else:
txt = txt[1:-1]
return txt
bits_to_turn_red_re = re.compile(r'\\([^uUx]|u[0-9a-fA-F]{4}|x[0-9a-fA-F]{2}|U[0-9a-fA-F]{8})')
def _make_turn_bits_red_f(color1, color2):
def _turn_bits_red(match):
txt = match.group(0)
if txt == '\\\\':
return '\\'
return color1 + txt + color2
return _turn_bits_red
default_null_placeholder = 'null'
default_time_format = ''
default_float_precision = 3
default_colormap = DEFAULT_VALUE_COLORS
empty_colormap = defaultdict(lambda: '')
def format_by_type(cqltype, val, encoding, colormap=None, addcolor=False,
nullval=None, time_format=None, float_precision=None):
if nullval is None:
nullval = default_null_placeholder
if val is None:
return colorme(nullval, colormap, 'error')
if addcolor is False:
colormap = empty_colormap
elif colormap is None:
colormap = default_colormap
if time_format is None:
time_format = default_time_format
if float_precision is None:
float_precision = default_float_precision
return format_value(cqltype, val, encoding=encoding, colormap=colormap,
time_format=time_format, float_precision=float_precision,
nullval=nullval)
def color_text(bval, colormap, displaywidth=None):
# note that here, we render natural backslashes as just backslashes,
# in the same color as surrounding text, when using color. When not
# using color, we need to double up the backslashes so it's not
# ambiguous. This introduces the unique difficulty of having different
# display widths for the colored and non-colored versions. To avoid
# adding the smarts to handle that in to FormattedValue, we just
# make an explicit check to see if a null colormap is being used or
# not.
if displaywidth is None:
displaywidth = len(bval)
tbr = _make_turn_bits_red_f(colormap['blob'], colormap['text'])
coloredval = colormap['text'] + bits_to_turn_red_re.sub(tbr, bval) + colormap['reset']
if colormap['text']:
displaywidth -= bval.count(r'\\')
return FormattedValue(bval, coloredval, displaywidth)
def format_value_default(val, colormap, **_):
val = str(val)
escapedval = val.replace('\\', '\\\\')
bval = controlchars_re.sub(_show_control_chars, escapedval)
return color_text(bval, colormap)
# Mapping cql type base names ("int", "map", etc) to formatter functions,
# making format_value a generic function
_formatters = {}
def format_value(type, val, **kwargs):
if val == EMPTY:
return format_value_default('', **kwargs)
formatter = _formatters.get(type.__name__, format_value_default)
return formatter(val, **kwargs)
def formatter_for(typname):
def registrator(f):
_formatters[typname] = f
return f
return registrator
@formatter_for('bytearray')
def format_value_blob(val, colormap, **_):
bval = '0x' + ''.join('%02x' % ord(c) for c in val)
return colorme(bval, colormap, 'blob')
formatter_for('buffer')(format_value_blob)
def format_python_formatted_type(val, colormap, color, quote=False):
bval = str(val)
if quote:
bval = "'%s'" % bval
return colorme(bval, colormap, color)
@formatter_for('Decimal')
def format_value_decimal(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'decimal')
@formatter_for('UUID')
def format_value_uuid(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'uuid')
@formatter_for('inet')
def formatter_value_inet(val, colormap, quote=False, **_):
return format_python_formatted_type(val, colormap, 'inet', quote=quote)
@formatter_for('bool')
def format_value_boolean(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'boolean')
def format_floating_point_type(val, colormap, float_precision, **_):
if math.isnan(val):
bval = 'NaN'
elif math.isinf(val):
bval = 'Infinity'
else:
bval = '%.*g' % (float_precision, val)
return colorme(bval, colormap, 'float')
formatter_for('float')(format_floating_point_type)
def format_integer_type(val, colormap, **_):
# base-10 only for now; support others?
bval = str(val)
return colorme(bval, colormap, 'int')
formatter_for('long')(format_integer_type)
formatter_for('int')(format_integer_type)
@formatter_for('date')
def format_value_timestamp(val, colormap, time_format, quote=False, **_):
bval = strftime(time_format, calendar.timegm(val.utctimetuple()))
if quote:
bval = "'%s'" % bval
return colorme(bval, colormap, 'timestamp')
formatter_for('datetime')(format_value_timestamp)
def strftime(time_format, seconds):
local = time.localtime(seconds)
formatted = time.strftime(time_format, local)
if local.tm_isdst != 0:
offset = -time.altzone
else:
offset = -time.timezone
if formatted[-4:] != '0000' or time_format[-2:] != '%z' or offset == 0:
return formatted
# deal with %z on platforms where it isn't supported. see CASSANDRA-4746.
if offset < 0:
sign = '-'
else:
sign = '+'
hours, minutes = divmod(abs(offset) / 60, 60)
return formatted[:-5] + sign + '{0:0=2}{1:0=2}'.format(hours, minutes)
@formatter_for('str')
def format_value_text(val, encoding, colormap, quote=False, **_):
escapedval = val.replace(u'\\', u'\\\\')
if quote:
escapedval = escapedval.replace("'", "''")
escapedval = unicode_controlchars_re.sub(_show_control_chars, escapedval)
bval = escapedval.encode(encoding, 'backslashreplace')
if quote:
bval = "'%s'" % bval
displaywidth = wcwidth.wcswidth(bval.decode(encoding))
return color_text(bval, colormap, displaywidth)
# name alias
formatter_for('unicode')(format_value_text)
def format_simple_collection(val, lbracket, rbracket, encoding,
colormap, time_format, float_precision, nullval):
subs = [format_value(type(sval), sval, encoding=encoding, colormap=colormap,
time_format=time_format, float_precision=float_precision,
nullval=nullval, quote=True)
for sval in val]
bval = lbracket + ', '.join(sval.strval for sval in subs) + rbracket
lb, sep, rb = [colormap['collection'] + s + colormap['reset']
for s in (lbracket, ', ', rbracket)]
coloredval = lb + sep.join(sval.coloredval for sval in subs) + rb
displaywidth = 2 * len(subs) + sum(sval.displaywidth for sval in subs)
return FormattedValue(bval, coloredval, displaywidth)
@formatter_for('list')
def format_value_list(val, encoding, colormap, time_format, float_precision, nullval, **_):
return format_simple_collection(val, '[', ']', encoding, colormap,
time_format, float_precision, nullval)
formatter_for('tuple')(format_value_list)
@formatter_for('set')
def format_value_set(val, encoding, colormap, time_format, float_precision, nullval, **_):
return format_simple_collection(sorted(val), '{', '}', encoding, colormap,
time_format, float_precision, nullval)
formatter_for('frozenset')(format_value_set)
formatter_for('sortedset')(format_value_set)
@formatter_for('dict')
def format_value_map(val, encoding, colormap, time_format, float_precision, nullval, **_):
def subformat(v):
return format_value(type(v), v, encoding=encoding, colormap=colormap,
time_format=time_format, float_precision=float_precision,
nullval=nullval, quote=True)
subs = [(subformat(k), subformat(v)) for (k, v) in sorted(val.items())]
bval = '{' + ', '.join(k.strval + ': ' + v.strval for (k, v) in subs) + '}'
lb, comma, colon, rb = [colormap['collection'] + s + colormap['reset']
for s in ('{', ', ', ': ', '}')]
coloredval = lb \
+ comma.join(k.coloredval + colon + v.coloredval for (k, v) in subs) \
+ rb
displaywidth = 4 * len(subs) + sum(k.displaywidth + v.displaywidth for (k, v) in subs)
return FormattedValue(bval, coloredval, displaywidth)
formatter_for('OrderedDict')(format_value_map)
def format_value_utype(val, encoding, colormap, time_format, float_precision, nullval, **_):
def format_field_value(v):
if v is None:
return colorme(nullval, colormap, 'error')
return format_value(type(v), v, encoding=encoding, colormap=colormap,
time_format=time_format, float_precision=float_precision,
nullval=nullval, quote=True)
def format_field_name(name):
return format_value_text(name, encoding=encoding, colormap=colormap, quote=False)
subs = [(format_field_name(k), format_field_value(v)) for (k, v) in val._asdict().items()]
bval = '{' + ', '.join(k.strval + ': ' + v.strval for (k, v) in subs) + '}'
lb, comma, colon, rb = [colormap['collection'] + s + colormap['reset']
for s in ('{', ', ', ': ', '}')]
coloredval = lb \
+ comma.join(k.coloredval + colon + v.coloredval for (k, v) in subs) \
+ rb
displaywidth = 4 * len(subs) + sum(k.displaywidth + v.displaywidth for (k, v) in subs)
return FormattedValue(bval, coloredval, displaywidth)
|
the-stack_0_12594 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
from setuptools import setup, find_packages
def get_version(*file_paths):
"""Retrieves the version from rele/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
version = get_version("rele", "__init__.py")
if sys.argv[-1] == "tag":
print("Tagging the version on git: %s" % version)
os.system('git tag -a %s -m "version %s"' % (version, version))
os.system("git push --tags")
sys.exit()
readme = open("README.md").read()
setup(
name="rele",
version=version,
description="""Relé makes integration with Google PubSub easier.""",
long_description=readme,
long_description_content_type="text/markdown",
author="MercadonaTech",
author_email="[email protected]",
url="https://github.com/mercadona/rele",
packages=find_packages(exclude=("tests",)),
include_package_data=True,
install_requires=["google-cloud-pubsub"],
extras_require={"django": ["django", "tabulate"]},
license="Apache Software License 2.0",
zip_safe=False,
keywords="rele",
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.0",
"Framework :: Django :: 2.1",
"Framework :: Django :: 2.2",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
|
the-stack_0_12595 | import matplotlib.pyplot as plt
import pymc3 as pm
from scipy.special import logit, expit
from scipy.stats import gaussian_kde
import numpy as np
plt.style.use('seaborn')
from statsmodels.distributions import ECDF
def calc_hpd(ranks, statistic=np.mean):
with pm.Model() as model:
#prior on statistic of interest:
a = pm.Normal('a', mu=statistic(ranks), sigma=10.0)
#'nuisance' parameter:
b = pm.HalfNormal('b', sigma=10.0)
#likelihood:
if statistic==np.mean:
y = pm.Normal('y', mu=a, sigma=b, observed=ranks)
elif statistic==np.median:
y = pm.Laplace('y', mu=a, b=b,observed=ranks)
trace = pm.sample(draws=500, tune=500, chains=2, target_accept=0.9)
return trace
def calc_kde(ranks, xs=np.linspace(0,243,243)):
#kde:
density = gaussian_kde(ranks)
density.covariance_factor= lambda : 0.25
density._compute_covariance()
return density(xs)
#def calc_ecdf(ranks):
# ecdf = [(ranks<i).sum()/len(ranks) for i in range(0, 243)]
# return ecdf
def plot_fig_label(ax, lab):
ax.text(0, 1.15, lab, transform=ax.transAxes,
fontsize=24, va='top', ha='left')
if __name__ == '__main__':
filenames = ['label_correlation', 'hpo_implicit_als', 'hpo_implicit_bpr',
'hpo_lightfm_warp', 'hpo_lightfm_bpr']
yrs = [2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2017]
num_targets = []
for yr in yrs:
train, test, fps = utils.load_time_split(year=yr, return_fingerprints=True)
num_targets.append(train.shape[1])
#num_targets = [225, 228, 231, 234, 237, 240, 242, 243, 243, 243]
#plot supplementary figure describing which year to use for time split
fig = plt.figure()
ax1 = plt.subplot(231)
ax2 = plt.subplot(232)
ax3 = plt.subplot(233)
ax4 = plt.subplot(234)
ax5 = plt.subplot(235)
from scipy.stats import sem
fig.set_figheight(15)
fig.set_figwidth(15)
z = 1.959964
for name, a in zip(filenames, [ax1, ax2, ax3, ax4, ax5]):
lows = list()
highs = list()
middles = list()
for count, year, num in zip(range(len(yrs)), yrs, num_targets):
ranks = np.load('./processed_data/'+str(year)+'_'+name+'.npy')/num
log_ranks = np.log10(ranks)
s = sem(log_ranks)
m = log_ranks.mean()
low = 10**(m-s*z)*num
high = 10**(m+s*z)*num
highs.append(high)
lows.append(low)
middles.append(10**m*num)
a.fill_between(yrs, y1=lows, y2=highs, label=name)
a.plot(yrs, middles, '-o', c='white',)
a.set_ylim(1, 30)
a.set_title(name)
fig.savefig('supp.pdf')
fig.savefig('supp.tif')
##Plot first figure:
fig, ax = plt.subplots(2)
fig.set_figheight(8)
fig.set_figwidth(8)
year = 2015
for count, name in enumerate(filenames):
ranks = np.load('./processed_data/'+str(year)+'_'+name+'.npy')
logit_transformed_ranks = logit(ranks/243)
mean_trace = calc_hpd(logit_transformed_ranks, np.mean)
median_trace = calc_hpd(logit_transformed_ranks, np.median)
print(name)
for j,trace in zip([0,1], [mean_trace, median_trace]):
untransformed_samples = expit(trace['a'])*244
m = np.mean(untransformed_samples)
hpd = pm.hpd(untransformed_samples)
print(m, hpd)
xs = np.linspace(m-3,m+3,100)
density = calc_kde(untransformed_samples, xs=xs)/2
ax[j].errorbar(count, m, yerr = np.array([m-hpd[0], hpd[1]-m])[:,None],
fmt='o', mfc='white', mew=2, linewidth=4, markersize=7.5, capsize=3)
ax[j].fill_betweenx(xs,density/2+count,count, alpha=0.4,label=name.strip('hpo_'))
ax[0].set_ylabel('Mean rank', fontsize=20)
ax[0].set_xticks([])
ax[0].legend(fancybox=True, framealpha=1, shadow=True, borderpad=1, ncol=2)
ax[1].set_ylabel('Median rank', fontsize=20)
ax[1].set_xticks([])
plot_fig_label(ax[0], 'A')
plot_fig_label(ax[1], 'B')
plt.tight_layout()
fig.savefig('statistics.pdf')
fig.savefig('statistics.tif')
plt.close(fig)
##Plot second figure:
filenames.append('nearest_neighbor')
fig, ax = plt.subplots(nrows=2, ncols=2)
fig.set_figheight(6)
fig.set_figwidth(12)
ax1 = ax[0,0]
ax2 = ax[0,1]
ax3 = ax[1,0]
ax4 = ax[1,1]
for name in filenames:
ranks = np.load('./processed_data/'+str(year)+'_'+name+'.npy')
##Plot histogram:
n, x = np.histogram(ranks, bins = np.linspace(1,244,244))
ax1.plot(x[:-1]+np.random.uniform(-0.15,0.15,len(n)),n, label=name)
ax2.plot(x[:-1]+np.random.uniform(-0.15,0.15,len(n)),n,'-o', mfc='white', mew=1.5, label=name, linewidth=0.5)
##Plot empirical cumulative distribution function
ecdf = np.cumsum(n)/n.sum()
ax3.plot(x[:-1]+np.random.uniform(-0.1,0.1,len(n)),ecdf)
ax4.plot(x[:-1]+np.random.uniform(-0.1,0.1,len(n)),ecdf, '-o', mfc='white', mew=1.5, linewidth=0.5)
if name == 'label_correlation':
ax4.plot([0,3],[ecdf[2],ecdf[2]],c='C0', linestyle=':',label='Label correlation\nECDF at rank 3')
ax1.set_title('Histogram of predicted ranks')
ax1.set_ylabel('Count density')
ax1.yaxis.grid()
ax1.axvline(20, linestyle='--', c='k', label='Rank 20')
ax1.set_xlim(0,244)
plot_fig_label(ax1, 'A')
ax2.set_xlim(0,21)
ax2.set_title('Histogram, top 20')
ax2.set_xticks(np.arange(1,21,1))
plot_fig_label(ax2, 'B')
ax3.set_xlim(0,244)
ax3.set_title('Empirical CDF (ECDF) of predicted ranks')
ax3.set_ylabel('Cumulative\nnormalized density')
ax3.yaxis.grid()
ax3.axvline(20, linestyle='--', c='k')
ax3.set_xlabel('Ranks')
plot_fig_label(ax3, 'C')
ax4.set_xlim(0,21)
ax4.set_ylim(0.1, 0.7)
ax4.set_title('ECDF, top 20')
ax4.legend()
ax4.set_xticks(np.arange(1,21,1))
ax4.set_xlabel('Ranks')
plot_fig_label(ax4, 'D')
ax1.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1)
plt.tight_layout()
fig.savefig('distributions.pdf')
fig.savefig('distributions.tif')
plt.close(fig)
|
the-stack_0_12597 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("LongRunningPromise", "speech_transcribe_async")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-speech
# sample-metadata
# title: Transcribe Audio File using Long Running Operation (Local File) (LRO)
# description: Transcribe a long audio file using asynchronous speech recognition
# usage: python3 samples/v1/speech_transcribe_async.py [--local_file_path "resources/brooklyn_bridge.raw"]
# [START speech_transcribe_async]
# -*- coding:utf-8 -*-
from google.cloud.speech_v1 import enums
from google.cloud import speech_v1
import io
import os
os.environ[
"GOOGLE_APPLICATION_CREDENTIALS"
] = r"/Users/noopy/ghoststation_transcript/credentials.json"
def sample_long_running_recognize(local_file_path):
"""
Transcribe a long audio file using asynchronous speech recognition
Args:
local_file_path Path to local audio file, e.g. /path/audio.wav
"""
client = speech_v1.SpeechClient()
# local_file_path = 'resources/brooklyn_bridge.raw'
# # The language of the supplied audio
# language_code = "en-US"
# # Sample rate in Hertz of the audio data sent
# sample_rate_hertz = 16000
# # Encoding of audio data sent. This sample sets this explicitly.
# # This field is optional for FLAC and WAV audio formats.
# encoding = enums.RecognitionConfig.AudioEncoding.LINEAR16
# config = {
# "language_code": language_code,
# "sample_rate_hertz": sample_rate_hertz,
# "encoding": encoding,
# }
audio_channel_count = 2
enable_separate_recognition_per_channel = True
language_code = "ko-KR"
config = {
"audio_channel_count": audio_channel_count,
"enable_separate_recognition_per_channel": enable_separate_recognition_per_channel,
"language_code": language_code,
}
with io.open(local_file_path, "rb") as f:
content = f.read()
audio = {"content": content}
operation = client.long_running_recognize(config, audio)
print(u"Waiting for operation to complete...")
response = operation.result()
for result in response.results:
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
# [END speech_transcribe_async]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--local_file_path", type=str, default="transformed_flac/test.flac"
)
args = parser.parse_args()
sample_long_running_recognize(args.local_file_path)
if __name__ == "__main__":
main()
|
the-stack_0_12599 | import discord
from redbot.core.bot import Red
from redbot.core import Config, bank, commands
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils.chat_formatting import (
bold,
box,
humanize_timedelta,
humanize_number,
)
try:
from redbot.cogs.audio.audio_dataclasses import Query
except ImportError:
Query = None
from .utils import rgetattr
from .listeners import Listeners
from .statements import (
SELECT_PERMA_GLOBAL,
SELECT_PERMA_SINGLE,
SELECT_TEMP_GLOBAL,
SELECT_TEMP_SINGLE,
)
import apsw
import lavalink
from typing import Union
from datetime import datetime
_ = Translator("MartTools", __file__)
@cog_i18n(_)
class MartTools(Listeners, commands.Cog):
"""Multiple tools that are originally used on Martine."""
__author__ = ["Predä", "Draper"]
__version__ = "1.8"
async def red_delete_data_for_user(self, **kwargs):
"""Nothing to delete."""
return
def __init__(self, bot: Red):
super().__init__()
self.bot = bot
self.uptime = datetime.utcnow()
def cog_unload(self):
self._connection.close()
def format_help_for_context(self, ctx: commands.Context) -> str:
"""Thanks Sinbad!"""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\n\nAuthors: {', '.join(self.__author__)}\nCog Version: {self.__version__}"
def fetch(self, key, id=None, raw: bool = False) -> Union[int, str]:
if id is None:
query = SELECT_PERMA_GLOBAL
condition = {"event": key}
else:
query = SELECT_PERMA_SINGLE
condition = {"event": key, "guild_id": id}
result = list(self.cursor.execute(query, condition))
if raw:
return result[0][0] if result else 0
return humanize_number(result[0][0] if result else 0)
def get(self, key, id=None, raw: bool = False) -> Union[int, str]:
if id is None:
query = SELECT_TEMP_GLOBAL
condition = {"event": key}
else:
query = SELECT_TEMP_SINGLE
condition = {"event": key, "guild_id": id}
result = list(self.cursor.execute(query, condition))
if raw:
return result[0][0] if result else 0
return humanize_number(result[0][0] if result else 0)
def get_bot_uptime(self):
delta = datetime.utcnow() - self.uptime
return str(humanize_timedelta(timedelta=delta))
def usage_counts_cpm(self, key: str, time: int = 60):
delta = datetime.utcnow() - self.uptime
minutes = delta.total_seconds() / time
total = self.get(key, raw=True)
return total / minutes
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def bankstats(self, ctx: commands.Context):
"""Show stats of the bank."""
icon = self.bot.user.avatar_url_as(static_format="png")
user_bal = await bank.get_balance(ctx.author)
credits_name = await bank.get_currency_name(ctx.guild)
pos = await bank.get_leaderboard_position(ctx.author)
bank_name = await bank.get_bank_name(ctx.guild)
bank_config = bank._config
if await bank.is_global():
all_accounts = len(await bank_config.all_users())
accounts = await bank_config.all_users()
else:
all_accounts = len(await bank_config.all_members(ctx.guild))
accounts = await bank_config.all_members(ctx.guild)
member_account = await bank.get_account(ctx.author)
created_at = str(member_account.created_at)
no = "1970-01-01 00:00:00"
overall = 0
for key, value in accounts.items():
overall += value["balance"]
em = discord.Embed(color=await ctx.embed_colour())
em.set_author(name=_("{} stats:").format(bank_name), icon_url=icon)
em.add_field(
name=_("{} stats:").format("Global" if await bank.is_global() else "Bank"),
value=_(
"Total accounts: **{all_accounts}**\nTotal amount: **{overall} {credits_name}**"
).format(
all_accounts=all_accounts,
overall=humanize_number(overall),
credits_name=credits_name,
),
)
if pos is not None:
percent = round((int(user_bal) / overall * 100), 3)
em.add_field(
name=_("Your stats:"),
value=_(
"You have **{bal} {currency}**.\n"
"It's **{percent}%** of the {g}amount in the bank.\n"
"You are **{pos}/{all_accounts}** in the {g}leaderboard."
).format(
bal=humanize_number(user_bal),
currency=credits_name,
percent=percent,
g="global " if await bank.is_global() else "",
pos=humanize_number(pos),
all_accounts=humanize_number(all_accounts),
),
inline=False,
)
if created_at != no:
em.set_footer(text=_("Bank account created on: ") + str(created_at))
await ctx.send(embed=em)
@commands.command(aliases=["usagec"])
async def usagecount(self, ctx: commands.Context):
"""
Show the usage count of the bot.
Commands processed, messages received, and music on servers.
"""
msg = _(
"**Commands processed:** `{commands_count}` commands. (`{cpm_commands:.2f}`/min)\n"
"**Commands errors:** `{errors_count}` errors.\n"
"**Messages received:** `{messages_read}` messages. (`{cpm_msgs:.2f}`/min)\n"
"**Messages sent:** `{messages_sent}` messages. (`{cpm_msgs_sent:.2f}`/min)\n"
"**Playing music on:** `{ll_players}` servers.\n"
"**Tracks played:** `{tracks_played}` tracks. (`{cpm_tracks:.2f}`/min)\n\n"
"**Servers joined:** `{guild_join}` servers. (`{cpm_guild_join:.2f}`/hour)\n"
"**Servers left:** `{guild_leave}` servers. (`{cpm_guild_leave:.2f}`/hour)"
).format(
commands_count=self.get("processed_commands"),
cpm_commands=self.usage_counts_cpm("processed_commands"),
errors_count=self.get("command_error"),
messages_read=self.get("messages_read"),
cpm_msgs=self.usage_counts_cpm("messages_read"),
messages_sent=self.get("msg_sent"),
cpm_msgs_sent=self.usage_counts_cpm("msg_sent"),
ll_players="`{}/{}`".format(
humanize_number(len(lavalink.active_players())),
humanize_number(len(lavalink.all_players())),
),
tracks_played=self.get("tracks_played"),
cpm_tracks=self.usage_counts_cpm("tracks_played"),
guild_join=self.get("guild_join"),
cpm_guild_join=self.usage_counts_cpm("guild_join", 3600),
guild_leave=self.get("guild_remove"),
cpm_guild_leave=self.usage_counts_cpm("guild_remove", 3600),
)
if await ctx.embed_requested():
em = discord.Embed(
color=await ctx.embed_colour(),
title=_("Usage count of {} since last restart:").format(self.bot.user.name),
description=msg,
)
em.set_thumbnail(url=self.bot.user.avatar_url_as(static_format="png"))
em.set_footer(text=_("Since {}").format(self.get_bot_uptime()))
await ctx.send(embed=em)
else:
await ctx.send(
_("Usage count of {} since last restart:\n").format(ctx.bot.user.name)
+ msg
+ _("\n\nSince {}").format(self.get_bot_uptime())
)
@commands.bot_has_permissions(embed_links=True)
@commands.command(aliases=["advusagec"])
async def advusagecount(self, ctx: commands.Context):
"""
Permanent stats since first time that the cog has been loaded.
"""
avatar = self.bot.user.avatar_url_as(static_format="png")
query = SELECT_PERMA_SINGLE
condition = {"event": "creation_time", "guild_id": -1000}
result = list(self.cursor.execute(query, condition))
delta = datetime.utcnow() - datetime.utcfromtimestamp(result[0][0])
uptime = humanize_timedelta(timedelta=delta)
ll_players = "{}/{}".format(
humanize_number(len(lavalink.active_players())),
humanize_number(len(lavalink.all_players())),
)
em = discord.Embed(
title=_("Usage count of {}:").format(ctx.bot.user.name),
color=await ctx.embed_colour(),
)
em.add_field(
name=_("Message Stats"),
value=box(
_(
"Messages Read : {messages_read}\n"
"Messages Sent : {msg_sent}\n"
"Messages Deleted : {messages_deleted}\n"
"Messages Edited : {messages_edited}\n"
"DMs Received : {dms_received}\n"
).format_map(
{
"messages_read": self.fetch("messages_read"),
"msg_sent": self.fetch("msg_sent"),
"messages_deleted": self.fetch("messages_deleted"),
"messages_edited": self.fetch("messages_edited"),
"dms_received": self.fetch("dms_received"),
}
),
lang="prolog",
),
inline=False,
)
em.add_field(
name=_("Commands Stats"),
value=box(
_(
"Commands Processed : {processed_commands}\n"
"Errors Occured : {command_error}\n"
"Sessions Resumed : {sessions_resumed}\n"
).format_map(
{
"processed_commands": self.fetch("processed_commands"),
"command_error": self.fetch("command_error"),
"sessions_resumed": self.fetch("sessions_resumed"),
}
),
lang="prolog",
),
inline=False,
)
em.add_field(
name=_("Guild Stats"),
value=box(
_(
"Guilds Joined : {guild_join}\n" "Guilds Left : {guild_remove}\n"
).format_map(
{
"guild_join": self.fetch("guild_join"),
"guild_remove": self.fetch("guild_remove"),
}
),
lang="prolog",
),
inline=False,
)
em.add_field(
name=_("User Stats"),
value=box(
_(
"New Users : {new_members}\n"
"Left Users : {members_left}\n"
"Banned Users : {members_banned}\n"
"Unbanned Users : {members_unbanned}\n"
).format_map(
{
"new_members": self.fetch("new_members"),
"members_left": self.fetch("members_left"),
"members_banned": self.fetch("members_banned"),
"members_unbanned": self.fetch("members_unbanned"),
}
),
lang="prolog",
),
inline=False,
)
em.add_field(
name=_("Role Stats"),
value=box(
_(
"Roles Added : {roles_added}\n"
"Roles Removed : {roles_removed}\n"
"Roles Updated : {roles_updated}\n"
).format_map(
{
"roles_added": self.fetch("roles_added"),
"roles_removed": self.fetch("roles_removed"),
"roles_updated": self.fetch("roles_updated"),
}
),
lang="prolog",
),
inline=False,
)
em.add_field(
name=_("Emoji Stats"),
value=box(
_(
"Reacts Added : {reactions_added}\n"
"Reacts Removed : {reactions_removed}\n"
"Emoji Added : {emojis_added}\n"
"Emoji Removed : {emojis_removed}\n"
"Emoji Updated : {emojis_updated}\n"
).format_map(
{
"reactions_added": self.fetch("reactions_added"),
"reactions_removed": self.fetch("reactions_removed"),
"emojis_added": self.fetch("emojis_added"),
"emojis_removed": self.fetch("emojis_removed"),
"emojis_updated": self.fetch("emojis_updated"),
}
),
lang="prolog",
),
inline=False,
)
em.add_field(
name=_("Audio Stats"),
value=box(
_(
"Users Who Joined VC : {users_joined_bot_music_room}\n"
"Tracks Played : {tracks_played}\n"
"Number Of Players : {ll_players}"
).format(
users_joined_bot_music_room=self.fetch("users_joined_bot_music_room"),
tracks_played=self.fetch("tracks_played"),
ll_players=ll_players,
),
lang="prolog",
),
inline=False,
)
if Query:
em.add_field(
name=_("Track Stats"),
value=box(
_(
"Streams : {streams_played}\n"
"YouTube Streams : {yt_streams_played}\n"
"Mixer Streams : {mixer_streams_played}\n"
"Twitch Streams : {ttv_streams_played}\n"
"Other Streams : {streams_played}\n"
"YouTube Tracks : {youtube_tracks}\n"
"Soundcloud Tracks : {soundcloud_tracks}\n"
"Bandcamp Tracks : {bandcamp_tracks}\n"
"Vimeo Tracks : {vimeo_tracks}\n"
"Mixer Tracks : {mixer_tracks}\n"
"Twitch Tracks : {twitch_tracks}\n"
"Other Tracks : {other_tracks}\n"
).format(
streams_played=self.fetch("streams_played"),
yt_streams_played=self.fetch("yt_streams_played"),
mixer_streams_played=self.fetch("mixer_streams_played"),
ttv_streams_played=self.fetch("ttv_streams_played"),
other_streams_played=self.fetch("other_streams_played"),
youtube_tracks=self.fetch("youtube_tracks"),
soundcloud_tracks=self.fetch("soundcloud_tracks"),
bandcamp_tracks=self.fetch("bandcamp_tracks"),
vimeo_tracks=self.fetch("vimeo_tracks"),
mixer_tracks=self.fetch("mixer_tracks"),
twitch_tracks=self.fetch("twitch_tracks"),
other_tracks=self.fetch("other_tracks"),
),
lang="prolog",
),
inline=False,
)
em.set_thumbnail(url=avatar)
em.set_footer(text=_("Since {}").format(uptime))
await ctx.send(embed=em)
@commands.command(aliases=["prefixes"])
async def prefix(self, ctx: commands.Context):
"""Show all prefixes of the bot"""
default_prefixes = await self.bot._config.prefix()
try:
guild_prefixes = await self.bot._config.guild(ctx.guild).prefix()
except AttributeError:
guild_prefixes = False
bot_name = ctx.bot.user.name
avatar = self.bot.user.avatar_url_as(static_format="png")
if not guild_prefixes:
to_send = [f"`\u200b{p}\u200b`" for p in default_prefixes]
plural = _("Prefixes") if len(default_prefixes) >= 2 else _("Prefix")
if await ctx.embed_requested():
em = discord.Embed(
color=await ctx.embed_colour(),
title=_("{} of {}:").format(plural, bot_name),
description=" ".join(to_send),
)
em.set_thumbnail(url=avatar)
await ctx.send(embed=em)
else:
await ctx.send(bold(_("{} of {}:\n")).format(plural, bot_name) + " ".join(to_send))
else:
to_send = [f"`\u200b{p}\u200b`" for p in guild_prefixes]
plural = _("prefixes") if len(default_prefixes) >= 2 else _("prefix")
if await ctx.embed_requested():
em = discord.Embed(
color=await ctx.embed_colour(),
title=_("Server {} of {}:").format(plural, bot_name),
description=" ".join(to_send),
)
em.set_thumbnail(url=avatar)
await ctx.send(embed=em)
else:
await ctx.send(
bold(_("Server {} of {name}:\n")).format(plural, bot_name) + " ".join(to_send)
)
@commands.command(aliases=["serverc", "serversc"])
async def servercount(self, ctx: commands.Context):
"""Send servers stats of the bot."""
visible_users = sum(len(s.members) for s in self.bot.guilds)
total_users = sum(s.member_count for s in self.bot.guilds)
msg = _(
"{name} is running on `{shard_count}` {shards}.\n"
"Serving `{servs}` servers (`{channels}` channels).\n"
"For a total of `{visible_users}` users (`{unique}` unique).\n"
"(`{visible_users}` visible now, `{total_users}` total, `{percentage_chunked:.2f}%` chunked)"
).format(
name=ctx.bot.user.name,
shard_count=humanize_number(self.bot.shard_count),
shards=_("shards") if self.bot.shard_count > 1 else _("shard"),
servs=humanize_number(len(self.bot.guilds)),
channels=humanize_number(sum(len(s.channels) for s in self.bot.guilds)),
visible_users=humanize_number(visible_users),
unique=humanize_number(len(self.bot.users)),
total_users=humanize_number(total_users),
percentage_chunked=visible_users / total_users * 100,
)
if await ctx.embed_requested():
em = discord.Embed(color=await ctx.embed_colour(), description=msg)
await ctx.send(embed=em)
else:
await ctx.send(msg)
@commands.command(aliases=["servreg"])
async def serversregions(self, ctx: commands.Context, sort: str = "guilds"):
"""
Show total of regions where the bot is.
You can also sort by number of users by using `[p]serversregions users`
By default it sort by guilds.
"""
regions_dict = {
"vip-us-east": ":flag_us:" + _(" __VIP__ US East"),
"vip-us-west": ":flag_us:" + _(" __VIP__ US West"),
"vip-amsterdam": ":flag_nl:" + _(" __VIP__ Amsterdam"),
"eu-west": ":flag_eu:" + _(" EU West"),
"eu-central": ":flag_eu:" + _(" EU Central"),
"europe": ":flag_eu:" + _(" Europe"),
"london": ":flag_gb:" + _(" London"),
"frankfurt": ":flag_de:" + _(" Frankfurt"),
"amsterdam": ":flag_nl:" + _(" Amsterdam"),
"us-west": ":flag_us:" + _(" US West"),
"us-east": ":flag_us:" + _(" US East"),
"us-south": ":flag_us:" + _(" US South"),
"us-central": ":flag_us:" + _(" US Central"),
"singapore": ":flag_sg:" + _(" Singapore"),
"sydney": ":flag_au:" + _(" Sydney"),
"brazil": ":flag_br:" + _(" Brazil"),
"hongkong": ":flag_hk:" + _(" Hong Kong"),
"russia": ":flag_ru:" + _(" Russia"),
"japan": ":flag_jp:" + _(" Japan"),
"southafrica": ":flag_za:" + _(" South Africa"),
"india": ":flag_in:" + _(" India"),
"dubai": ":flag_ae:" + _(" Dubai"),
"south-korea": ":flag_kr:" + _(" South Korea"),
}
regions = {}
for guild in self.bot.guilds:
region = str(guild.region)
if region not in regions:
regions[region] = {"guilds": 0, "users": 0}
regions[region]["users"] += guild.member_count
regions[region]["guilds"] += 1
def sort_keys(key: str):
keys = (
(key[1]["guilds"], key[1]["users"])
if sort != "users"
else (key[1]["users"], key[1]["guilds"])
)
return keys
regions_stats = dict(sorted(regions.items(), key=lambda x: sort_keys(x), reverse=True))
msg = [
_("{flag}: {guilds_len} and {users_len}").format(
flag=regions_dict[region_name],
guilds_len=(
f"`{humanize_number(values['guilds'])} {_('server') if values['guilds'] < 2 else _('servers')}`"
),
users_len=(
f"`{humanize_number(values['users'])} {_('user') if values['users'] < 2 else _('users')}`"
),
)
for region_name, values in regions_stats.items()
]
guilds_word = _("server") if len(self.bot.guilds) < 2 else _("servers")
users_word = (
_("user") if sum(k["users"] for k in regions_stats.values()) < 2 else _("users")
)
footer = _("For a total of {guilds} and {users}").format(
guilds=f"{humanize_number(len(self.bot.guilds))} {guilds_word}",
users=f"{humanize_number(sum(k['users'] for k in regions_stats.values()))} {users_word}",
)
if await ctx.embed_requested():
em = discord.Embed(
color=await ctx.embed_colour(),
title=_("Servers regions stats:"),
description="\n".join(msg),
)
em.set_footer(text=footer)
await ctx.send(embed=em)
else:
msg = bold(_("Servers regions stats:\n\n")) + "\n".join(msg) + "\n" + bold(footer)
await ctx.send(msg)
|
the-stack_0_12600 | #!/usr/bin/env python
"""
plot_hub.py: the plot tool
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plt_fidelity_vs_iter(fidelities,losses,config,indx=0):
fig, (axs1, axs2) = plt.subplots(1, 2)
axs1.plot(range(len(fidelities)), fidelities)
axs1.set_xlabel('Epoch')
axs1.set_ylabel('Fidelity between real and fake states')
axs2.plot(range(len(losses)), losses)
axs2.set_xlabel('Epoch')
axs2.set_ylabel('Wasserstein Loss')
plt.tight_layout()
plt.savefig('{}/{}qubit_{}_{}.png'.format(config.figure_path,config.system_size, config.label, indx)) |
the-stack_0_12602 | # -*- coding: utf-8 -*-
import os
import sys
import numpy as np
IMAGE_SIZE = 64
#按照指定图像大小调整尺寸
def resize_image(image, height = IMAGE_SIZE, width = IMAGE_SIZE):
top, bottom, left, right = (0, 0, 0, 0)
#获取图像尺寸
h, w, _ = image.shape
#对于长宽不相等的图片,找到最长的一边
longest_edge = max(h, w)
#计算短边需要增加多上像素宽度使其与长边等长
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
BLACK = [0, 0, 0]
#给图像增加边界,是图片长、宽等长,cv2.BORDER_CONSTANT指定边界颜色由value指定
constant = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value = BLACK)
#调整图像大小并返回
return cv2.resize(constant, (height, width))
#读取训练数据
images = []
labels = []
def read_images(path_name):
for dir_item in os.listdir(path_name):
full_path = os.path.abspath(os.path.join(path_name, dir_item))
if os.path.isdir(full_path):
read_images(full_path)
else:
if dir_item.endswith('.jpg'):
print(full_path)
image = cv2.imread(full_path)
image = resize_image(image, IMAGE_SIZE, IMAGE_SIZE)
images.append(image)
labels.append(path_name)
return images,labels
#从指定路径读取训练数据
def load_dataset(path_name):
images,labels = read_images(path_name)
#将输入的所有图片转成四维数组,尺寸为(图片数量*IMAGE_SIZE*IMAGE_SIZE*3)
#图片为64 * 64像素,一个像素3个颜色值(RGB)
images = np.array(images)
labels = np.array([0 if label.endswith('yangwk') else 1 for label in labels])
return images, labels
if __name__ == '__main__':
path_name = './data/'
images, labels = load_dataset(path_name)
print(images.shape)
print(labels.shape) |
the-stack_0_12603 | """
============================================================================
Decoding in time-frequency space data using the Common Spatial Pattern (CSP)
============================================================================
The time-frequency decomposition is estimated by iterating over raw data that
has been band-passed at different frequencies. This is used to compute a
covariance matrix over each epoch or a rolling time-window and extract the CSP
filtered signals. A linear discriminant classifier is then applied to these
signals.
"""
# Authors: Laura Gwilliams <[email protected]>
# Jean-Remi King <[email protected]>
# Alex Barachant <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import Epochs, find_events, create_info
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
from mne.time_frequency import AverageTFR
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
###############################################################################
# Set parameters and read data
event_id = dict(hands=2, feet=3) # motor imagery: hands vs feet
subject = 1
runs = [6, 10, 14]
raw_fnames = eegbci.load_data(subject, runs)
raw_files = [read_raw_edf(f, stim_channel='auto', preload=True)
for f in raw_fnames]
raw = concatenate_raws(raw_files)
# Extract information from the raw file
sfreq = raw.info['sfreq']
events = find_events(raw, shortest_event=0, stim_channel='STI 014')
raw.pick_types(meg=False, eeg=True, stim=False, eog=False, exclude='bads')
# Assemble the classifier using scikit-learn pipeline
clf = make_pipeline(CSP(n_components=4, reg=None, log=True, norm_trace=False),
LinearDiscriminantAnalysis())
n_splits = 5 # how many folds to use for cross-validation
cv = StratifiedKFold(n_splits=n_splits, shuffle=True)
# Classification & Time-frequency parameters
tmin, tmax = -.200, 2.000
n_cycles = 10. # how many complete cycles: used to define window size
min_freq = 5.
max_freq = 25.
n_freqs = 8 # how many frequency bins to use
# Assemble list of frequency range tuples
freqs = np.linspace(min_freq, max_freq, n_freqs) # assemble frequencies
freq_ranges = list(zip(freqs[:-1], freqs[1:])) # make freqs list of tuples
# Infer window spacing from the max freq and number of cycles to avoid gaps
window_spacing = (n_cycles / np.max(freqs) / 2.)
centered_w_times = np.arange(tmin, tmax, window_spacing)[1:]
n_windows = len(centered_w_times)
# Instantiate label encoder
le = LabelEncoder()
###############################################################################
# Loop through frequencies, apply classifier and save scores
# init scores
freq_scores = np.zeros((n_freqs - 1,))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
X = epochs.get_data()
# Save mean scores over folds for each frequency and time window
freq_scores[freq] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot frequency results
plt.bar(left=freqs[:-1], height=freq_scores, width=np.diff(freqs)[0],
align='edge', edgecolor='black')
plt.xticks(freqs)
plt.ylim([0, 1])
plt.axhline(len(epochs['feet']) / len(epochs), color='k', linestyle='--',
label='chance level')
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.ylabel('Decoding Scores')
plt.title('Frequency Decoding Scores')
###############################################################################
# Loop through frequencies and time, apply classifier and save scores
# init scores
tf_scores = np.zeros((n_freqs - 1, n_windows))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
# Roll covariance, csp and lda over time
for t, w_time in enumerate(centered_w_times):
# Center the min and max of the window
w_tmin = w_time - w_size / 2.
w_tmax = w_time + w_size / 2.
# Crop data into time-window of interest
X = epochs.copy().crop(w_tmin, w_tmax).get_data()
# Save mean scores over folds for each frequency and time window
tf_scores[freq, t] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot time-frequency results
# Set up time frequency object
av_tfr = AverageTFR(create_info(['freq'], sfreq), tf_scores[np.newaxis, :],
centered_w_times, freqs[1:], 1)
chance = np.mean(y) # set chance level to white in the plot
av_tfr.plot([0], vmin=chance, title="Time-Frequency Decoding Scores",
cmap=plt.cm.Reds)
|
the-stack_0_12604 | """
Copyright 2019-present NAVER Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#-*- coding: utf-8 -*-
import os
import json
import math
import random
import argparse
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.optim as optim
import Levenshtein as Lev
import label_loader
from data_loader import AudioDataLoader, SpectrogramDataset, BucketingSampler
from models import EncoderRNN, DecoderRNN, Seq2Seq
# @Kwang-Ho
import time
import datetime
from initialize import initialize
char2index = dict()
index2char = dict()
SOS_token = 0
EOS_token = 0
PAD_token = 0
def label_to_string(labels):
if len(labels.shape) == 1:
sent = str()
for i in labels:
if i.item() == EOS_token:
break
sent += index2char[i.item()]
return sent
elif len(labels.shape) == 2:
sents = list()
for i in labels:
sent = str()
for j in i:
if j.item() == EOS_token:
break
sent += index2char[j.item()]
sents.append(sent)
return sents
def char_distance(ref, hyp):
ref = ref.replace(' ', '')
hyp = hyp.replace(' ', '')
dist = Lev.distance(hyp, ref)
length = len(ref.replace(' ', ''))
return dist, length
def get_distance(ref_labels, hyp_labels):
total_dist = 0
total_length = 0
transcripts = []
for i in range(len(ref_labels)):
ref = label_to_string(ref_labels[i])
hyp = label_to_string(hyp_labels[i])
transcripts.append('{hyp}\t{ref}'.format(hyp=hyp, ref=ref))
dist, length = char_distance(ref, hyp)
total_dist += dist
total_length += length
return total_dist, total_length, transcripts
def train(model, data_loader, criterion, optimizer, device, epoch, train_sampler, max_norm=400, teacher_forcing_ratio=1):
total_loss = 0.
total_num = 0
total_dist = 0
total_length = 0
total_sent_num = 0
model.train()
for i, (data) in enumerate(data_loader):
feats, scripts, feat_lengths, script_lengths = data
optimizer.zero_grad()
feats = feats.to(device)
scripts = scripts.to(device)
feat_lengths = feat_lengths.to(device)
src_len = scripts.size(1)
target = scripts[:, 1:]
logit = model(feats, feat_lengths, scripts, teacher_forcing_ratio=teacher_forcing_ratio)
logit = torch.stack(logit, dim=1).to(device)
y_hat = logit.max(-1)[1]
loss = criterion(logit.contiguous().view(-1, logit.size(-1)), target.contiguous().view(-1))
batch_size = logit.size(0)
loss = loss / batch_size
total_loss += loss.item()
total_num += sum(feat_lengths).item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
dist, length, _ = get_distance(target, y_hat)
total_dist += dist
total_length += length
cer = float(dist / length) * 100
total_sent_num += target.size(0)
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss:.4f}\t'
'Cer {cer:.4f}'.format(
(epoch + 1), (i + 1), len(train_sampler), loss=loss, cer=cer))
# return total_loss / total_num, (total_dist / total_length) * 100
return total_loss / len(data_loader), (total_dist / total_length) * 100
def evaluate(model, data_loader, criterion, device, save_output=False, teacher_forcing_ratio=0.0):
total_loss = 0.
total_num = 0
total_dist = 0
total_length = 0
total_sent_num = 0
transcripts_list = []
model.eval()
with torch.no_grad():
for i, (data) in tqdm(enumerate(data_loader), total=len(data_loader)):
feats, scripts, feat_lengths, script_lengths = data
feats = feats.to(device)
scripts = scripts.to(device)
feat_lengths = feat_lengths.to(device)
src_len = scripts.size(1)
target = scripts[:, 1:]
logit = model(feats, feat_lengths, scripts, teacher_forcing_ratio=teacher_forcing_ratio) # 3-th args: None
logit = torch.stack(logit, dim=1).to(device)
y_hat = logit.max(-1)[1]
logit = logit[:,:target.size(1),:] # cut over length to calculate loss
loss = criterion(logit.contiguous().view(-1, logit.size(-1)), target.contiguous().view(-1))
batch_size = logit.size(0)
loss = loss / batch_size
total_loss += loss.item()
total_num += sum(feat_lengths).item()
dist, length, transcripts = get_distance(target, y_hat)
cer = float(dist / length) * 100
total_dist += dist
total_length += length
if save_output == True:
transcripts_list += transcripts
total_sent_num += target.size(0)
# aver_loss = total_loss / total_num
aver_loss = total_loss / len(data_loader)
aver_cer = float(total_dist / total_length) * 100
return aver_loss, aver_cer, transcripts_list
def main():
global char2index
global index2char
global SOS_token
global EOS_token
global PAD_token
parser = argparse.ArgumentParser(description='LAS')
parser.add_argument('--model-name', type=str, default='LAS')
# Dataset
parser.add_argument('--train-file', type=str,
help='data list about train dataset', default='data/ClovaCall/train_ClovaCall.json')
parser.add_argument('--test-file-list', nargs='*',
help='data list about test dataset', default=['data/ClovaCall/test_ClovCall.json'])
parser.add_argument('--labels-path', default='data/kor_syllable.json', help='Contains large characters over korean')
parser.add_argument('--dataset-path', default='data/ClovaCall/clean', help='Target dataset path')
# Hyperparameters
parser.add_argument('--rnn-type', default='lstm', help='Type of the RNN. rnn|gru|lstm are supported')
parser.add_argument('--encoder_layers', type=int, default=3, help='number of layers of model (default: 3)')
parser.add_argument('--encoder_size', type=int, default=512, help='hidden size of model (default: 512)')
parser.add_argument('--decoder_layers', type=int, default=2, help='number of pyramidal layers (default: 2)')
parser.add_argument('--decoder_size', type=int, default=512, help='hidden size of model (default: 512)')
parser.add_argument('--dropout', type=float, default=0.3, help='Dropout rate in training (default: 0.3)')
parser.add_argument('--no-bidirectional', dest='bidirectional', action='store_false', default=True, help='Turn off bi-directional RNNs, introduces lookahead convolution')
parser.add_argument('--batch_size', type=int, default=32, help='Batch size in training (default: 32)')
parser.add_argument('--num_workers', type=int, default=4, help='Number of workers in dataset loader (default: 4)')
parser.add_argument('--num_gpu', type=int, default=1, help='Number of gpus (default: 1)')
parser.add_argument('--epochs', type=int, default=100, help='Number of max epochs in training (default: 100)')
parser.add_argument('--lr', type=float, default=3e-4, help='Learning rate (default: 3e-4)')
parser.add_argument('--learning-anneal', default=1.1, type=float, help='Annealing learning rate every epoch')
parser.add_argument('--teacher_forcing', type=float, default=1.0, help='Teacher forcing ratio in decoder (default: 1.0)')
parser.add_argument('--max_len', type=int, default=80, help='Maximum characters of sentence (default: 80)')
parser.add_argument('--max-norm', default=400, type=int, help='Norm cutoff to prevent explosion of gradients')
# Audio Config
parser.add_argument('--sample-rate', default=16000, type=int, help='Sampling Rate')
parser.add_argument('--window-size', default=.02, type=float, help='Window size for spectrogram')
parser.add_argument('--window-stride', default=.01, type=float, help='Window stride for spectrogram')
# System
parser.add_argument('--save-folder', default='models', help='Location to save epoch models')
parser.add_argument('--model-path', default='models/las_final.pth', help='Location to save best validation model')
parser.add_argument('--log-path', default='log/', help='path to predict log about valid and test dataset')
parser.add_argument('--cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=123456, help='random seed (default: 123456)')
parser.add_argument('--mode', type=str, default='train', help='Train or Test')
parser.add_argument('--load-model', action='store_true', default=False, help='Load model')
parser.add_argument('--finetune', dest='finetune', action='store_true', default=False,
help='Finetune the model after load model')
args = parser.parse_args()
args.max_norm = 5.0
args.dropout = 0.0
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
char2index, index2char = label_loader.load_label_json(args.labels_path)
SOS_token = char2index['<s>']
EOS_token = char2index['</s>']
PAD_token = char2index['_']
device = torch.device('cuda' if args.cuda else 'cpu')
audio_conf = dict(sample_rate=args.sample_rate,
window_size=args.window_size,
window_stride=args.window_stride)
# Batch Size
batch_size = args.batch_size * args.num_gpu
print(">> Train dataset : ", args.train_file)
trainData_list = []
with open(args.train_file, 'r', encoding='utf-8') as f:
trainData_list = json.load(f)
if args.num_gpu != 1:
last_batch = len(trainData_list) % batch_size
if last_batch != 0 and last_batch < args.num_gpu:
trainData_list = trainData_list[:-last_batch]
train_dataset = SpectrogramDataset(audio_conf=audio_conf,
dataset_path=args.dataset_path,
data_list=trainData_list,
char2index=char2index, sos_id=SOS_token, eos_id=EOS_token,
normalize=True)
train_sampler = BucketingSampler(train_dataset, batch_size=batch_size)
train_loader = AudioDataLoader(train_dataset, num_workers=args.num_workers, batch_sampler=train_sampler)
print(">> Test dataset : ", args.test_file_list)
testLoader_dict = {}
for test_file in args.test_file_list:
testData_list = []
with open(test_file, 'r', encoding='utf-8') as f:
testData_list = json.load(f)
test_dataset = SpectrogramDataset(audio_conf=audio_conf,
dataset_path=args.dataset_path,
data_list=testData_list,
char2index=char2index, sos_id=SOS_token, eos_id=EOS_token,
normalize=True)
testLoader_dict[test_file] = AudioDataLoader(test_dataset, batch_size=1, num_workers=args.num_workers)
# input_size = int(math.floor((args.sample_rate * args.window_size) / 2) + 1)
input_size = 80
enc = EncoderRNN(input_size, args.encoder_size, n_layers=args.encoder_layers,
dropout_p=args.dropout, bidirectional=args.bidirectional,
rnn_cell=args.rnn_type, variable_lengths=False)
dec = DecoderRNN(len(char2index), args.max_len, args.decoder_size, args.encoder_size,
SOS_token, EOS_token, PAD_token,
n_layers=args.decoder_layers, rnn_cell=args.rnn_type,
dropout_p=args.dropout, bidirectional_encoder=args.bidirectional)
model = Seq2Seq(enc, dec)
initialize(model, init='xavier_uniform')
save_folder = args.save_folder
os.makedirs(save_folder, exist_ok=True)
optim_state = None
if args.load_model: # Starting from previous model
print("Loading checkpoint model %s" % args.model_path)
state = torch.load(args.model_path)
model.load_state_dict(state['model'])
print('Model loaded')
if not args.finetune: # Just load model
optim_state = state['optimizer']
model = model.to(device)
# optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)
optimizer = optim.Adadelta(model.parameters(), lr=1.0, rho=0.95, eps=1e-08, weight_decay=0)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=1, verbose=True)
if optim_state is not None:
optimizer.load_state_dict(optim_state)
# criterion = nn.CrossEntropyLoss(reduction='mean').to(device)
criterion = nn.CrossEntropyLoss(reduction='sum').to(device) # ignore_index=PAD_token
print(model)
print("Number of parameters: %d" % Seq2Seq.get_param_size(model))
train_model = nn.DataParallel(model)
if args.mode != "train":
for test_file in args.test_file_list:
test_loader = testLoader_dict[test_file]
test_loss, test_cer, transcripts_list = evaluate(model, test_loader, criterion, device, save_output=True)
for idx, line in enumerate(transcripts_list):
# print(line)
hyp, ref = line.split('\t')
print("({:3d}/{:3d}) [REF]: {}".format(idx+1, len(transcripts_list), ref))
print("({:3d}/{:3d}) [HYP]: {}".format(idx+1, len(transcripts_list), hyp))
print()
print("Test {} CER : {}".format(test_file, test_cer))
else:
best_cer = 1e10
begin_epoch = 0
# start_time = time.time()
start_time = datetime.datetime.now()
for epoch in range(begin_epoch, args.epochs):
train_loss, train_cer = train(train_model, train_loader, criterion, optimizer, device, epoch, train_sampler, args.max_norm, args.teacher_forcing)
# end_time = time.time()
# elapsed_time = end_time - start_time
elapsed_time = datetime.datetime.now() - start_time
train_log = 'Train({name}) Summary Epoch: [{0}]\tAverage Loss {loss:.3f}\tAverage CER {cer:.3f}\tTime {time:}'.format(epoch + 1, name='train', loss=train_loss, cer=train_cer, time=elapsed_time)
print(train_log)
cer_list = []
for test_file in args.test_file_list:
test_loader = testLoader_dict[test_file]
test_loss_tf, test_cer_tf, _ = evaluate(model, test_loader, criterion, device, save_output=False, teacher_forcing_ratio=1.0)
test_log = '(TF=1.0) Test({name}) Summary Epoch: [{0}]\tAverage Loss {loss:.3f}\tAverage CER {cer:.3f}\t'.format(
epoch + 1, name=test_file, loss=test_loss_tf, cer=test_cer_tf)
print(test_log)
test_loss, test_cer, _ = evaluate(model, test_loader, criterion, device, save_output=False, teacher_forcing_ratio=0.0)
test_log = '(TF=0.0) Test({name}) Summary Epoch: [{0}]\tAverage Loss {loss:.3f}\tAverage CER {cer:.3f}\t'.format(
epoch + 1, name=test_file, loss=test_loss, cer=test_cer)
print(test_log)
cer_list.append(test_cer)
if best_cer > cer_list[0]:
print("Found better validated model, saving to %s" % args.model_path)
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict()
}
torch.save(state, args.model_path)
best_cer = cer_list[0]
print("Shuffling batches...")
train_sampler.shuffle(epoch)
scheduler.step(float(test_loss_tf))
# print('Learning rate annealed to: {lr:.6f}'.format(lr=scheduler.get_lr()))
# for g in optimizer.param_groups:
# g['lr'] = g['lr'] / args.learning_anneal
# print('Learning rate annealed to: {lr:.6f}'.format(lr=g['lr']))
if __name__ == "__main__":
main()
|
the-stack_0_12605 | """
Create a class to measure the average time lapsed between mark() calls
This is useful to measure how frequent is the price update (i.e. we call mark() method on every price update)
"""
import time
import random
class LatencyMetric:
def __init__(self):
self._last_received_timestamp = time.time_ns()
self._max_duration = 0
self._sum = 0
self._count = 0
def mark(self):
# calculate time lapsed
ts = time.time_ns()
duration = ts - self._last_received_timestamp
self._last_received_timestamp = ts
self._sum += duration
self._count += 1
if duration > self._max_duration:
self._max_duration = duration
def get_max(self) -> int:
return self._max_duration
def get_mean(self) -> int:
""" get mean in milliseconds """
return self._sum / self._count / 1000000
# A simple driver class to demonstrate the usage
if __name__ == '__main__':
metric = LatencyMetric()
while True:
# a random time between 0.9 and 1.1 seconds
random_duration = float(random.randint(90, 110)) / 100.0
time.sleep(random_duration)
metric.mark()
# we expect to print an average time of ~1 second
print('Average: {}, max: {}'.format(metric.get_mean(), metric.get_max()))
|
the-stack_0_12606 | #
# Copyright Contributors to the OpenTimelineIO project
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from PySide2 import QtWidgets, QtGui, QtCore
import opentimelineio as otio
class Details(QtWidgets.QTextEdit):
"""Text widget with the JSON string of the specified OTIO object."""
def __init__(self, *args, **kwargs):
super(Details, self).__init__(*args, **kwargs)
self.setReadOnly(True)
self.font = QtGui.QFontDatabase.systemFont(
QtGui.QFontDatabase.FixedFont)
self.font.setPointSize(12)
self.setFont(self.font)
self.backgroundColor = QtGui.QColor(33, 33, 33)
self.textColor = QtGui.QColor(180, 180, 180)
self.highlightColor = QtGui.QColor(255, 198, 109)
self.keywordColor = QtGui.QColor(204, 120, 50)
self.palette = QtGui.QPalette()
self.palette.setColor(QtGui.QPalette.Base, self.backgroundColor)
self.palette.setColor(QtGui.QPalette.Text, self.textColor)
self.palette.setColor(QtGui.QPalette.BrightText, self.highlightColor)
self.palette.setColor(QtGui.QPalette.Link, self.keywordColor)
self.setPalette(self.palette)
self.highlighter = OTIOSyntaxHighlighter(self.palette, self.document())
def set_item(self, item):
if item is None:
self.setPlainText('')
else:
s = otio.adapters.write_to_string(item, 'otio_json')
self.setPlainText(s)
class OTIOSyntaxHighlighter(QtGui.QSyntaxHighlighter):
def __init__(self, palette, parent=None):
super(OTIOSyntaxHighlighter, self).__init__(parent)
self.punctuation_format = QtGui.QTextCharFormat()
self.punctuation_format.setForeground(palette.link())
self.punctuation_format.setFontWeight(QtGui.QFont.Bold)
self.key_format = QtGui.QTextCharFormat()
# self.key_format.setFontItalic(True)
self.literal_format = QtGui.QTextCharFormat()
self.literal_format.setForeground(palette.brightText())
self.literal_format.setFontWeight(QtGui.QFont.Bold)
self.value_format = QtGui.QTextCharFormat()
self.value_format.setForeground(palette.brightText())
self.value_format.setFontWeight(QtGui.QFont.Bold)
self.schema_format = QtGui.QTextCharFormat()
self.schema_format.setForeground(QtGui.QColor(161, 194, 97))
self.schema_format.setFontWeight(QtGui.QFont.Bold)
def highlightBlock(self, text):
expression = QtCore.QRegExp("(\\{|\\}|\\[|\\]|\\:|\\,)")
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, self.punctuation_format)
index = expression.indexIn(text, index + length)
text.replace("\\\"", " ")
expression = QtCore.QRegExp("\".*\" *\\:")
expression.setMinimal(True)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length - 1, self.key_format)
index = expression.indexIn(text, index + length)
expression = QtCore.QRegExp("\\: *\".*\"")
expression.setMinimal(True)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
firstQuoteIndex = text.index('"', index)
valueLength = length - (firstQuoteIndex - index) - 2
self.setFormat(firstQuoteIndex + 1, valueLength, self.value_format)
index = expression.indexIn(text, index + length)
expression = QtCore.QRegExp(r"\\: (null|true|false|[0-9\.]+)")
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, self.literal_format)
index = expression.indexIn(text, index + length)
expression = QtCore.QRegExp(r"\"OTIO_SCHEMA\"\s*:\s*\".*\"")
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, self.schema_format)
index = expression.indexIn(text, index + length)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.