id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
464469
|
import re
import string
from urllib.parse import quote as urlquote, urlsplit
from libweasyl.cache import region
from weasyl import define as d
_BANDCAMP_EMBED = re.compile(r"(album|track)=(\d+)")
_OEMBED_MAP = {
"youtube": "https://www.youtube.com/oembed?url=%s&maxwidth=640&maxheight=360",
"vimeo": "https://vimeo.com/api/oembed.json?url=%s&maxwidth=400&maxheight=300",
"soundcloud": "https://soundcloud.com/oembed?format=json&url=%s",
"sketchfab": "https://sketchfab.com/oembed?url=%s&maxwidth=640&maxheight=480",
}
def _service(link):
"""
Returns the content service name based on the URL provided.
"""
url = urlsplit(link)
domain = "." + url.netloc.lower()
if domain.endswith((".youtube.com", ".youtu.be")):
return "youtube"
elif domain.endswith(".vimeo.com"):
return "vimeo"
elif domain.endswith(".bandcamp.com"):
return "bandcamp"
elif domain.endswith(".soundcloud.com"):
return "soundcloud"
elif domain.endswith(".sketchfab.com"):
return "sketchfab"
def _targetid(link):
"""
Returns the content service's identifier based on the URL provided.
"""
service = _service(link)
if service in _OEMBED_MAP:
# Validate with oEmbed
link = link.strip()
alpha = string.printable
elif service == "bandcamp":
content = d.http_get(link).text
match = _BANDCAMP_EMBED.search(content)
if match:
return match.groups()
else:
return None
else:
return None
for i in range(len(link)):
if link[i] not in alpha:
return link[:i]
return link
@region.cache_on_arguments(expiration_time=60 * 60 * 24)
@d.record_timing
def _embed_json(service, targetid):
"""
Returns oEmbed JSON for a given URL and service
"""
return d.http_get(_OEMBED_MAP[service] % (urlquote(targetid),)).json()
def html(link):
"""
Returns the HTML code to be used in a template for a given identifier.
"""
targetid, service = _targetid(link), _service(link)
if targetid:
if service in _OEMBED_MAP:
try:
return _embed_json(service, targetid)["html"]
except (ValueError, KeyError):
return "There was an error retrieving the embedded media"
elif service == "bandcamp":
return (
'<iframe width="400" height="100" '
'style="position:relative;display:block;width:400px;height:100px;" '
'src="https://bandcamp.com/EmbeddedPlayer/v=2/%s=%s/size=venti/bgcol=F0F0F0/linkcol=4285BB/" '
'allowtransparency="false" frameborder="0"></iframe>' % (targetid[0], targetid[1]))
def thumbnail(link):
"""
Returns the URL to a thumbnail for a given identifier.
"""
targetid, service = _targetid(link), _service(link)
if targetid:
if service in _OEMBED_MAP:
try:
return _embed_json(service, targetid)["thumbnail_url"]
except (ValueError, KeyError):
return None
elif service == "bandcamp":
# Sometime in the future, parse the HTML for the image_src meta tag
return None
return None
def check_valid(link):
"""
Returns True if a given URL is embeddable.
"""
targetid, service = _targetid(link), _service(link)
if targetid:
if service in _OEMBED_MAP:
try:
return bool(_embed_json(service, targetid)["html"])
except (ValueError, KeyError):
return
elif service == "bandcamp":
return True
|
464520
|
import json
from django.core.exceptions import ValidationError
from django_sorcery import fields
from django_sorcery.forms import (
apply_limit_choices_to_form_field,
modelform_factory,
)
from .base import TestCase
from .testapp.models import CompositePkModel, Owner, Vehicle, VehicleType, db
class TestEnumField(TestCase):
def test_field(self):
field = fields.EnumField(enum_class=VehicleType)
with self.assertRaises(ValidationError) as ctx:
value = field.clean(None)
self.assertEqual(ctx.exception.code, "required")
with self.assertRaises(ValidationError) as ctx:
value = field.clean("")
self.assertEqual(ctx.exception.code, "required")
value = field.to_python("")
value = field.clean("car")
self.assertEqual(value, VehicleType.car)
value = field.clean("Car")
self.assertEqual(value, VehicleType.car)
value = field.to_python(VehicleType.car)
self.assertEqual(value, VehicleType.car)
with self.assertRaises(ValidationError):
field.clean("blue")
value = field.valid_value(None)
self.assertFalse(value)
value = field.prepare_value(VehicleType.car)
self.assertEqual(value, "car")
value = field.bound_data(VehicleType.car, None)
self.assertEqual(value, "car")
value = field.bound_data(None, None)
self.assertIsNone(value)
value = field.bound_data(None, VehicleType.car)
self.assertIsNone(value, "car")
value = field.prepare_value(None)
self.assertFalse(value)
def test_field_not_required(self):
field = fields.EnumField(enum_class=VehicleType, required=False)
value = field.clean(None)
self.assertIsNone(value)
value = field.clean("")
self.assertIsNone(value)
value = field.clean("car")
self.assertEqual(value, VehicleType.car)
value = field.clean("Car")
self.assertEqual(value, VehicleType.car)
with self.assertRaises(ValidationError):
field.to_python("blue")
value = field.bound_data(VehicleType.car, None)
self.assertEqual(value, "car")
value = field.bound_data(None, None)
self.assertIsNone(value)
value = field.bound_data(None, VehicleType.car)
self.assertIsNone(value, "car")
class TestModelChoiceField(TestCase):
def setUp(self):
super().setUp()
db.add_all([Owner(first_name="first_name {}".format(i), last_name="last_name {}".format(i)) for i in range(10)])
db.flush()
def test_apply_limit_value(self):
field = fields.ModelChoiceField(Owner, db, limit_choices_to=[Owner.first_name == "first_name 1"])
apply_limit_choices_to_form_field(field)
self.assertEqual(field.queryset.count(), 1)
def test_apply_limit_callable(self):
def limit_choices_to():
return [Owner.first_name == "first_name 1"]
field = fields.ModelChoiceField(Owner, db, required=True, limit_choices_to=limit_choices_to)
apply_limit_choices_to_form_field(field)
self.assertEqual(field.queryset.count(), 1)
def test_choices(self):
field = fields.ModelChoiceField(Owner, db)
self.assertListEqual(
list(field.choices), [("", field.empty_label)] + [(owner.id, str(owner)) for owner in Owner.query]
)
field = fields.ModelChoiceField(Owner, db, required=True, initial=1)
self.assertListEqual(list(field.choices), [(owner.id, str(owner)) for owner in Owner.query])
def test_get_object(self):
owner = Owner.objects.first()
field = fields.ModelChoiceField(Owner, db)
self.assertIsNone(field.get_object(None))
owner = field.get_object(owner.id)
self.assertIsNotNone(owner)
self.assertIsInstance(owner, Owner)
with self.assertRaises(ValidationError) as ctx:
field.get_object(0)
self.assertEqual(
ctx.exception.args,
("Select a valid choice. That choice is not one of the available choices.", "invalid_choice", None),
)
def test_get_object_composite_pk(self):
instance = CompositePkModel(id=1, pk=2)
db.add(instance)
db.flush()
field = fields.ModelChoiceField(CompositePkModel, db)
pks = field.prepare_value(instance)
obj = field.get_object(json.dumps(pks))
self.assertIsNotNone(obj)
self.assertIs(obj, instance)
def test_to_python(self):
owner = Owner.objects.first()
field = fields.ModelChoiceField(Owner, db)
owner = field.to_python(owner.id)
self.assertIsNotNone(owner)
self.assertIsInstance(owner, Owner)
def test_label_from_instance(self):
owner = Owner.objects.first()
field = fields.ModelChoiceField(Owner, db)
self.assertEqual(field.label_from_instance(Owner.query.get(owner.id)), repr(owner))
def test_prepare_instance_value(self):
field = fields.ModelChoiceField(Owner, db)
owner = Owner.objects.first()
pks = field.prepare_instance_value(Owner.query.get(owner.id))
self.assertEqual(pks, owner.id)
def test_prepare_instance_value_composite(self):
field = fields.ModelChoiceField(CompositePkModel, db)
pks = field.prepare_instance_value(CompositePkModel(id=1, pk="a"))
self.assertDictEqual(pks, {"id": 1, "pk": "a"})
def test_prepare_value(self):
field = fields.ModelChoiceField(CompositePkModel, db)
pks = field.prepare_value(CompositePkModel(id=1, pk="a"))
self.assertDictEqual(pks, {"id": 1, "pk": "a"})
def test_validate(self):
field = fields.ModelChoiceField(Owner, db, required=True)
self.assertIsNone(field.validate(1))
with self.assertRaises(ValidationError):
field.validate(None)
def test_get_bound_field(self):
db.rollback()
form = modelform_factory(Vehicle, fields=("owner",), session=db)()
field = form.fields["owner"]
bf = field.get_bound_field(form, "owner")
self.assertHTMLEqual(
str(bf), '<select name="owner" id="id_owner"><option value="" selected>---------</option></select>'
)
class TestModelMultipleChoiceField(TestCase):
def setUp(self):
super().setUp()
db.add_all([Owner(first_name="first_name {}".format(i), last_name="last_name {}".format(i)) for i in range(10)])
db.flush()
def test_to_python(self):
field = fields.ModelMultipleChoiceField(Owner, db)
owner1, owner2, owner3 = Owner.objects[:3]
self.assertEqual(field.to_python(None), [])
self.assertEqual(field.to_python([owner1.id, owner2.id, owner3.id]), [owner1, owner2, owner3])
def test_prepare_value(self):
field = fields.ModelMultipleChoiceField(Owner, db)
owner1, owner2, owner3 = Owner.objects[:3]
self.assertIsNone(field.prepare_value(None))
self.assertEqual(field.prepare_value([owner1, owner2, owner3]), [owner1.id, owner2.id, owner3.id])
|
464533
|
from unittest import TestCase
import pyrealsense as pyrs
from pyrealsense.utils import RealsenseError
class Test_Service_Device(TestCase):
def test_is_started(self):
try:
service = pyrs.Service()
except RealsenseError as e:
self.assertTrue(e.function == 'rs_create_context')
else:
try:
dev = service.Device()
except RealsenseError as e:
self.assertTrue(e.function == 'rs_get_device')
else:
dev.stop()
finally:
service.stop()
|
464535
|
from torch.onnx import register_custom_op_symbolic
# Register symbolic op for torch.quantize_function op.
def _fake_quantize_learnable_per_tensor_affine(g, x, scale, zero_point, quant_min, quant_max, grad_factor):
return g.op("::LearnablePerTensorAffine", x, scale, zero_point, quant_min, quant_max)
register_custom_op_symbolic('::_fake_quantize_learnable_per_tensor_affine', _fake_quantize_learnable_per_tensor_affine, 11)
def fake_quantize_per_channel_affine(g, x, scale, zero_point, ch_axis, quant_min, quant_max):
return g.op("::FixedPerChannelAffine", x, scale, zero_point, ch_axis, quant_min, quant_max)
register_custom_op_symbolic('::fake_quantize_per_channel_affine', fake_quantize_per_channel_affine, 11)
def fake_quantize_per_tensor_affine(g, x, scale, zero_point, quant_min, quant_max):
return g.op("::FixedPerTensorAffine", x, scale, zero_point, quant_min, quant_max)
register_custom_op_symbolic('::fake_quantize_per_tensor_affine', fake_quantize_per_tensor_affine, 11)
|
464537
|
from FWCore.PythonFramework.CmsRun import CmsRun
import FWCore.ParameterSet.Config as cms
process = cms.Process("Test")
process.source = cms.Source("EmptySource")
nEvents = 10
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(nEvents))
var = 5
outList = []
process.m = cms.EDProducer("edmtest::PythonTestProducer", inputVariable = cms.string("var"),
outputListVariable = cms.string("outList"),
source = cms.InputTag("ints"))
process.ints = cms.EDProducer("IntProducer", ivalue = cms.int32(1))
process.p = cms.Path(process.m, cms.Task(process.ints))
cmsRun = CmsRun(process)
cmsRun.run()
assert (outList == [1]*nEvents)
|
464590
|
import json
import os
import re
import datetime
import tempfile
import six
from requests_toolbelt.multipart import decoder
import asposewordscloud.models
from asposewordscloud.api_client import rest
class BaseRequestObject(object):
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # pylint: disable=undefined-variable
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def getparts(self, response):
return decoder.MultipartDecoder(response.data, response.getheader('Content-Type'), 'UTF-8').parts
def deserialize(self, response_data, klass, api_client):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if response_data is None:
return None
data = response_data
if isinstance(response_data, rest.RESTResponse):
# fetch data from response object
data = response_data.data
if six.PY3:
data = response_data.data.decode('utf8')
try:
data = json.loads(data)
except:
pass
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.deserialize(sub_data, sub_kls, api_client)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.deserialize(v, sub_kls, api_client)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(asposewordscloud.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass, api_client)
def deserialize_file(self, data, headers, api_client):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=api_client.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
if 'Content-Disposition' in headers.keys():
content_disposition = headers["Content-Disposition"]
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
filename = filename.replace('/', '_')
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
if not six.PY3:
return data.encode('utf8')
return six.u(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datatime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass, api_client):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
if klass is None:
return data
if not klass.swagger_types and not hasattr(klass,
'get_real_child_model'):
return data
kwargs = {}
if klass.swagger_types is not None:
for attr, attr_type in six.iteritems(klass.swagger_types):
if (data is not None and
klass.attribute_map[attr] in data and
isinstance(data, (list, dict))):
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.deserialize(value, attr_type, api_client)
instance = klass(**kwargs)
if hasattr(instance, 'get_real_child_model'):
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.deserialize(data, klass_name, api_client)
return instance
|
464594
|
from bip.gui import *
import pytest
"""
Test for class :class:`BipUserSelect` in ``bip/gui/userselect.py``.
"""
def test_bipuserselect00():
# staticmethod
# as complicated to test only call them for checking of API problems
BipUserSelect.get_curr_highlighted_str()
BipUserSelect.get_curr_highlighted_int()
|
464602
|
import numpy as np
import scipy.signal
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
import utils_hw as utils
from dataset import BaseDataset
class HandWritingDataset(BaseDataset):
"""
Customized for handwriting dataset.
Stroke data is assumed to be consisting of 3 dimensions x, y and pen, respectively. If the stroke data is required
to be concatenated with other modalities, then stroke data relies in the first 3 dimensions.
Args:
data_path (str): path to numpy dataset file. See data_scripts/preprocessing.py for details.
var_len_seq (bool): whether the dataset consists of variable-length sequences or not. If set to False, then
it is determined from the dataset samples.
"""
def __init__(self, data_path, var_len_seq=False):
super(HandWritingDataset, self).__init__(data_path)
# TODO new_dataset
#self.samples = self.data_dict['strokes']
self.samples = self.data_dict['samples'] if 'samples' in self.data_dict.keys() else self.data_dict['strokes']
self.char_labels = self.data_dict['char_labels']
self.subject_labels = self.data_dict['subject_labels']
self.texts= self.data_dict['texts']
self.feature_size = self.samples[0].shape[-1] # x,y,pen
# Models require input and target dimensionality. They are useful if the inputs and targets are concatenation
# of different modalities. They are used to split the input/target into components.
self.input_dims = [self.feature_size]
self.target_dims = [2, 1] # Stroke, pen
# The dimensions with None will be padded if seq_len isn't passed.
self.sequence_length = None if var_len_seq else self.extract_seq_len()
self.is_dynamic = self.sequence_length == None
# sequence length, strokes, targets (i.e., strokes).
self.sample_shape = [[], [self.sequence_length, self.feature_size], [self.sequence_length, self.feature_size]]
self.sample_np_type = [np.int32, np.float32, np.float32]
self.num_samples = len(self.samples)
# Preprocessing
self.normalization = 'normalization' in self.data_dict['preprocessing']
if not self.normalization:
print("Warning: data is not normalized.")
elif not ('mean' in self.data_dict):
raise Exception("Normalization statistics (mean and std) are missing.")
else:
self.norm_mean = self.data_dict['mean']
self.norm_std = self.data_dict['std']
self.relative_representation = 'relative_representation' in self.data_dict['preprocessing']
self.offset_removal = 'origin_translation' in self.data_dict['preprocessing']
self.scale = 'scale' in self.data_dict['preprocessing']
if self.scale and not('min' in self.data_dict):
pass
#raise Exception("Scaling statistics (min and max) are missing.")
else:
self.scale_min = self.data_dict['min']
self.scale_max = self.data_dict['max']
def preprocess_raw_sample(self, sample):
"""
Gets a raw (!) sample and applies preprocessing steps that the dataset has been applied.
Args:
sample: [seq_len, 3]
Returns:
"""
sample_copy = np.copy(sample[:, :3])
statistics = {}
if self.scale:
sample_copy[:, [0, 1]] = ((sample-self.scale_min)/(self.scale_max-self.scale_min))[:, [0, 1]]
if self.offset_removal:
statistics['x_offset'] = sample_copy[0, 0]
statistics['y_offset'] = sample_copy[0, 1]
sample_copy[:, 0] -= statistics['x_offset']
sample_copy[:, 1] -= statistics['y_offset']
if self.relative_representation:
source = np.vstack((sample_copy[0], sample_copy))
sample_copy = np.diff(source, axis=0)
sample_copy[:, 2] = sample[:, 2] # Keep original pen information since it is already relative.
if self.normalization:
sample_copy[:, [0, 1]] = ((sample_copy-self.norm_mean)/self.norm_std)[:, [0, 1]]
return sample_copy, statistics
def undo_preprocess(self, sample, statistics=None):
"""
Applies preprocessing in reverse order by using statistics parameters.
Args:
sample (numpy.ndarray): [seq_len, 3]
statistics (dict): Contains dataset ("min", "max", "mean", "std") and sample ("x_offset", "y_offset")
statistics. If a (dataset statistics) key is not found in the dictionary or has None value, then class
statistics will be used.
Returns:
(numpy.ndarray): [seq_len, 3]
"""
if statistics is None:
statistics = {}
sample_copy = np.copy(sample[:, :3])
if self.normalization:
mean_ = self.norm_mean
std_ = self.norm_std
if ('mean' in statistics) and (statistics['mean'] is not None):
mean_ = statistics['mean']
std_ = statistics['std']
sample_copy[:, :2] = (sample_copy*std_ + mean_)[:, :2]
if self.relative_representation:
sample_copy = np.cumsum(sample_copy, 0) # Assuming that the sequence always starts with 0.
if self.offset_removal and 'x_offset' in statistics:
sample_copy[:, 0] += statistics['x_offset']
sample_copy[:, 1] += statistics['y_offset']
if self.scale:
min_ = self.scale_min
max_ = self.scale_max
if ('min' in statistics) and (statistics['min'] is not None):
min_ = statistics['min']
max_ = statistics['max']
sample_copy[:, :2] = (sample_copy[:,:3]*(max_-min_) + min_)[:, :2]
sample_copy[:, 2] = sample[:, 2]
return sample_copy
def prepare_for_visualization(self, sample, detrend_sample=False):
"""
TODO: Move this method into a more proper class.
Args:
sample:
Returns:
"""
sample_copy = np.copy(sample[:,:3])
if self.normalization:
sample_copy = sample_copy*self.norm_std+self.norm_mean
if detrend_sample:
sample_copy[:,1] = scipy.signal.detrend(sample_copy[:,1])
if self.relative_representation:
sample_copy = np.cumsum(sample_copy, 0) # Assuming that the sequence always starts with 0.
sample_copy[:,2] = sample[:,2]
return sample_copy
def undo_normalization(self, sample, detrend_sample=False):
"""
TODO: Move this method into a more proper class.
Args:
sample:
Returns:
"""
sample_copy = np.copy(sample[:,:3])
if self.normalization:
sample_copy = sample_copy*self.norm_std+self.norm_mean
if detrend_sample:
sample_copy[:,1] = scipy.signal.detrend(sample_copy[:,1])
sample_copy[:,2] = sample[:,2]
return sample_copy
def sample_generator(self):
"""
Creates a generator object which returns one data sample at a time. It is used by DataFeeder objects.
Returns:
(generator): each sample is a list of data elements.
"""
for stroke in self.samples:
yield [stroke.shape[0], stroke, stroke]
def fetch_sample(self, sample_idx):
"""
Prepares one data sample (i.e. return of sample_generator) given index.
Args:
sample_idx:
Returns:
"""
stroke = self.samples[sample_idx]
return [stroke.shape[0], stroke, stroke]
# TODO Auxiliary methods can be in utils.
def get_seq_len_histogram(self, num_bins=10, collapse_first_and_last_bins=[1, -1]):
"""
Creates a histogram of sequence-length.
Args:
num_bins:
collapse_first_and_last_bins: selects bin edges between the provided indices by discarding from the
first
and last bins.
Returns:
(list): bin edges.
"""
seq_lens = [s.shape[0] for s in self.samples]
h, bins = np.histogram(seq_lens, bins=num_bins)
if collapse_first_and_last_bins is not None:
return [int(b) for b in bins[collapse_first_and_last_bins[0]:collapse_first_and_last_bins[1]]]
else:
return [int(b) for b in bins]
def extract_seq_len(self):
seq_lens = [s.shape[0] for s in self.samples]
if max(seq_lens) == min(seq_lens):
return min(seq_lens)
else:
return None
class HandWritingDatasetConditional(HandWritingDataset):
"""
Uses character labels.
In contrast to HandWritingDataset dataset (i.e., non-conditional), concatenates one-hot-vector char labels with
strokes.
Args:
data_path (str): path to numpy dataset file. See data_scripts/preprocessing.py for details.
var_len_seq (bool): whether the dataset consists of variable-length sequences or not. If set to False, then
it is determined from the dataset samples.
use_bow_labels (bool): whether beginning-of-word labels (bow_labels) are yielded as model inputs or not.
"""
def __init__(self, data_path, var_len_seq=None, use_bow_labels=True):
super(HandWritingDatasetConditional, self).__init__(data_path, var_len_seq)
self.use_bow_labels = use_bow_labels
if not('alphabet' in self.data_dict):
raise Exception("Alphabet is missing.")
self.alphabet = self.data_dict['alphabet']
self.alphabet_size = len(self.alphabet)
self.feature_size = self.samples[0].shape[-1] # x,y,pen
# Models require input and target dimensionality. They are useful if the inputs and targets are concatenation
# of different modalities. They are used to split the input/target into components.
self.input_dims = [self.feature_size, len(self.alphabet)]
self.target_dims = [2, 1, len(self.alphabet), 1] # Stroke, pen, character labels, eoc
if use_bow_labels:
self.input_dims = [self.feature_size, len(self.alphabet), 1]
self.target_dims = [2, 1, len(self.alphabet), 1, 1] # Stroke, pen, character labels, eoc, bow
int_alphabet = np.expand_dims(np.array(range(self.alphabet_size)), axis=1)
self.char_encoder = LabelEncoder()
self.char_encoder.fit(self.alphabet)
self.one_hot_encoder = OneHotEncoder(sparse=False)
self.one_hot_encoder.fit(int_alphabet)
self.__encode_labels()
self.eoc_labels = self.data_dict['eoc_labels']
self.boc_labels = self.data_dict['boc_labels'] if 'boc_labels' in self.data_dict.keys() else self.data_dict['soc_labels']
self.eow_labels = self.data_dict['eow_labels']
self.bow_labels = self.data_dict['bow_labels'] if 'bow_labels' in self.data_dict.keys() else self.data_dict['sow_labels']
# sequence length, strokes, targets (i.e., strokes+end-of-character).
# The dimensions with None will be padded if seq_len isn't passed.
self.sample_shape = [[], [self.sequence_length, sum(self.input_dims)], [self.sequence_length, sum(self.target_dims)]]
def text_to_one_hot(self, text):
integer_labels = self.char_encoder.transform(list(text))
return self.one_hot_encoder.transform(np.expand_dims(integer_labels, axis=1))
def int_labels_to_one_hot(self, int_labels):
return self.one_hot_encoder.transform(np.expand_dims(int_labels, axis=1))
def logit_to_one_hot(self, one_hot):
integer_labels = np.argmax(one_hot, -1)
return self.int_labels_to_one_hot(integer_labels)
def one_hot_to_int_labels(self, one_hot):
return np.argmax(one_hot, -1)
def int_labels_to_text(self, int_labels):
text_labels = utils.simplify_int_labels(int_labels)
text = self.char_encoder.inverse_transform(text_labels)
return text
def __encode_labels(self):
"""
Encodes integer character labels as one-hot vectors.
Returns:
"""
self.one_hot_char_labels = []
for idx, label in enumerate(self.data_dict['char_labels']):
self.one_hot_char_labels .append(self.one_hot_encoder.transform(np.expand_dims(label, axis=1)))
def sample_generator(self):
"""
Creates a generator object which returns one data sample at a time. It is used by DataFeeder objects.
Returns:
(generator): each sample is a list of data elements.
"""
for stroke, char_label, eoc_label, bow_label in zip(self.samples, self.one_hot_char_labels, self.eoc_labels, self.bow_labels):
bow_label_ = np.expand_dims(bow_label, axis=1)
eoc_label_ = np.expand_dims(eoc_label, axis=1)
if self.use_bow_labels:
yield [stroke.shape[0], np.float32(np.hstack([stroke, char_label, bow_label_])), np.float32(np.hstack([stroke, char_label, eoc_label_, bow_label_]))]
else:
yield [stroke.shape[0], np.float32(np.hstack([stroke, char_label])), np.float32(np.hstack([stroke, char_label, eoc_label_]))]
def fetch_sample(self, sample_idx):
"""
Prepares one data sample (i.e. return of sample_generator) given index.
Args:
sample_idx:
Returns:
"""
stroke = self.samples[sample_idx]
char_label = self.one_hot_char_labels[sample_idx]
eoc_label = np.expand_dims(self.eoc_labels[sample_idx], axis=1)
if self.use_bow_labels:
bow_label = np.expand_dims(self.bow_labels[sample_idx], axis=1)
return [stroke.shape[0], np.expand_dims(np.float32(np.hstack([stroke, char_label, bow_label])), axis=0), np.expand_dims(np.float32(np.hstack([stroke, char_label, eoc_label, bow_label])), axis=0)]
else:
return [stroke.shape[0], np.expand_dims(np.float32(np.hstack([stroke, char_label])), axis=0), np.expand_dims(np.float32(np.hstack([stroke, char_label, eoc_label])), axis=0)]
class HandWritingClassificationDataset(HandWritingDatasetConditional):
"""
Handwriting dataset for character classification/segmentation models. In contrast to parent class
HandWritingDatasetConditional, its sample_generator method yields only strokes as model input and
[char_label, eoc_label, (bow_label)] as model target.
Args:
data_path (str): path to numpy dataset file. See data_scripts/preprocessing.py for details.
var_len_seq (bool): whether the dataset consists of variable-length sequences or not. If set to False, then
it is determined from the dataset samples.
use_bow_labels (bool): whether beginning-of-word labels (bow_labels) are yielded as model targets or not.
data_augmentation (bool): whether to apply data augmentation or not. If set True, strokes are scaled randomly.
"""
def __init__(self, data_path, var_len_seq=None, use_bow_labels=False, data_augmentation=False):
super(HandWritingClassificationDataset, self).__init__(data_path, var_len_seq)
self.bow_target = use_bow_labels
self.data_augmentation = data_augmentation
self.input_dims = [self.samples[0].shape[-1]]
self.feature_size = sum(self.input_dims)
if self.bow_target:
self.target_dims = [self.alphabet_size, 1, 1] # char_labels, end-of-character, sow
else:
self.target_dims = [self.alphabet_size, 1] # char_labels, end-of-character
# sequence length, strokes, targets
# The dimensions with None will be padded if sequence_length isn't passed.
self.sample_shape = [[], [self.sequence_length, sum(self.input_dims)], [self.sequence_length, sum(self.target_dims)]]
def sample_generator(self):
"""
Creates a generator object which returns one data sample at a time. It is used by DataFeeder objects.
Returns:
(generator): each sample is a list of data elements.
"""
if self.bow_target:
for stroke, char_label, eoc_label, bow_label in zip(self.samples, self.one_hot_char_labels, self.eoc_labels, self.bow_labels):
if self.data_augmentation:
stroke_augmented = stroke.copy()
stroke_augmented *= np.random.uniform(0.7,1.3, (1))
else:
stroke_augmented = stroke
yield [stroke.shape[0], stroke_augmented, np.float32(np.hstack([char_label, np.expand_dims(eoc_label,-1), np.expand_dims(bow_label,-1)]))]
else:
for stroke, char_label, eoc_label in zip(self.samples, self.one_hot_char_labels, self.eoc_labels):
if self.data_augmentation:
stroke_augmented = stroke.copy()
stroke_augmented *= np.random.uniform(0.7,1.3, (1))
else:
stroke_augmented = stroke
yield [stroke.shape[0], stroke_augmented, np.float32(np.hstack([char_label, np.expand_dims(eoc_label,-1)]))]
def fetch_sample(self, sample_idx):
"""
Prepares one data sample (i.e. return of sample_generator) given index.
Args:
sample_idx:
Returns:
"""
stroke = np.expand_dims(self.samples[sample_idx], axis=0)
char_label = self.one_hot_char_labels[sample_idx]
eoc_label = np.expand_dims(self.eoc_labels[sample_idx], -1)
bow_label = np.expand_dims(self.bow_labels[sample_idx], -1)
if self.bow_target:
return [stroke.shape[0], stroke, np.expand_dims(np.float32(np.hstack([char_label, eoc_label, bow_label])), axis=1)]
else:
return [stroke.shape[0], stroke, np.expand_dims(np.float32(np.hstack([char_label, eoc_label])), axis=1)]
|
464614
|
from neuron.neuron import Neuron
import numpy as np
class NeuronBuilder:
zero_quantization_error = None
def __init__(self, tau_2, growing_metric):
self.__growing_metric = growing_metric
self.__tau_2 = tau_2
def new_neuron(self, weights_map, position):
assert self.zero_quantization_error is not None, "Zero quantization error has not been set yet"
return Neuron(
weights_map,
position,
self.zero_quantization_error,
self.__tau_2,
self.__growing_metric
)
def zero_neuron(self, input_dataset):
input_dimension = input_dataset.shape[1]
zero_neuron = Neuron(
[self.__calc_input_mean(input_dataset).reshape(1, 1, input_dimension)],
(0, 0),
None,
None,
self.__growing_metric
)
zero_neuron.input_dataset = input_dataset
self.zero_quantization_error = zero_neuron.compute_quantization_error()
return zero_neuron
@staticmethod
def __calc_input_mean(input_dataset):
return input_dataset.mean(axis=0)
|
464625
|
import torch
import numpy as np
def get_sigmas(config):
if config.model.sigma_dist == 'geometric':
sigmas = torch.tensor(
np.exp(np.linspace(np.log(config.model.sigma_begin), np.log(config.model.sigma_end),
config.model.num_classes))).float().to(config.device)
elif config.model.sigma_dist == 'uniform':
sigmas = torch.tensor(
np.linspace(config.model.sigma_begin, config.model.sigma_end, config.model.num_classes)
).float().to(config.device)
else:
raise NotImplementedError('sigma distribution not supported')
return sigmas
@torch.no_grad()
def anneal_Langevin_dynamics(x_mod, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False):
images = []
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
# denoise_x = x_mod + sigma ** 2 * grad
# concat_x = torch.cat([x_mod, denoise_x], dim=0)
# images.append(concat_x.to('cpu'))
noise = torch.randn_like(x_mod)
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print("level: {}, step_size: {}, grad_norm: {}, image_norm: {}, snr: {}, grad_mean_norm: {}".format(
c, step_size, grad_norm.item(), image_norm.item(), snr.item(), grad_mean_norm.item()))
# if c >= 0:
# return images
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size,
n_steps_each=100, step_lr=0.000008):
"""
Currently only good for 32x32 images. Assuming the right half is missing.
"""
images = []
refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1)
refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size)
x_mod = x_mod.view(-1, 3, image_size, image_size)
cols = image_size // 2
half_refer_image = refer_image[..., :cols]
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
images.append(x_mod.to('cpu'))
corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma
x_mod[:, :, :, :cols] = corrupted_half_image
noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)
grad = scorenet(x_mod, labels)
x_mod = x_mod + step_size * grad + noise
print("class: {}, step_size: {}, mean {}, max {}".format(c, step_size, grad.abs().mean(),
grad.abs().max()))
return images
@torch.no_grad()
def anneal_Langevin_dynamics_interpolation(x_mod, scorenet, sigmas, n_interpolations, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False):
images = []
n_rows = x_mod.shape[0]
x_mod = x_mod[:, None, ...].repeat(1, n_interpolations, 1, 1, 1)
x_mod = x_mod.reshape(-1, *x_mod.shape[2:])
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
noise_p = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
noise_q = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
angles = torch.linspace(0, np.pi / 2., n_interpolations, device=x_mod.device)
noise = noise_p[:, None, ...] * torch.cos(angles)[None, :, None, None, None] + \
noise_q[:, None, ...] * torch.sin(angles)[None, :, None, None, None]
noise = noise.reshape(-1, *noise.shape[2:])
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print(
"level: {}, step_size: {}, image_norm: {}, grad_norm: {}, snr: {}".format(
c, step_size, image_norm.item(), grad_norm.item(), snr.item()))
if final_only:
return [x_mod.to('cpu')]
else:
return images
|
464654
|
default_app_config = "rest_live.apps.RestLiveConfig"
DEFAULT_GROUP_BY_FIELD = "pk"
def get_group_name(model_label) -> str:
return f"RESOURCE-{model_label}"
CREATED = "CREATED"
UPDATED = "UPDATED"
DELETED = "DELETED"
|
464675
|
import yaml, os
from github import Github
class GitHubController(object):
def __init__(self):
token = self.__get_token_from_env_variable()
if not token:
token = self.__get_token_from_config()
self.github = Github(token)
def __get_token_from_env_variable(self):
if 'GH_TOKEN' in os.environ:
return os.environ['GH_TOKEN']
return None
def __get_token_from_config(self):
cfg = ''
with open("./config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
if 'GitHub' in cfg:
if 'token' in cfg['GitHub']:
return cfg['GitHub']['token']
|
464699
|
from torch.utils.data import Dataset
import glob
import os
from PIL import Image
import cv2
import numpy as np
import h5py
import skimage.io
import skimage.color
import scipy.io as io
class BigDataset(Dataset):
def __init__(self, mode="train", **kwargs):
self.big_list = self.get_big_data()
self.root = "./data/shtu_dataset/original/part_A_final/train_data/" if mode == "train" else \
"./data/shtu_dataset/original/part_A_final/test_data/"
self.temp = glob.glob(self.root + "images/*.jpg")
self.paths = []
for img_path in self.temp:
if img_path in self.big_list:
self.paths.append(img_path)
if mode == "train":
self.paths *= 4
self.transform = kwargs['transform']
self.length = len(self.paths)
self.dataset = self.load_data()
def __len__(self):
return self.length
def __getitem__(self, item):
img, den = self.dataset[item]
if self.transform is not None:
img = self.transform(img)
return img, den
def load_data(self):
result = []
index = 0
for img_path in self.paths:
gt_path = img_path.replace('.jpg', '.h5').replace('images', 'ground_truth')
img = Image.open(img_path).convert('RGB')
gt_file = h5py.File(gt_path)
den = np.asarray(gt_file['density'])
h = den.shape[0]
w = den.shape[1]
h_trans = h // 8
w_trans = w // 8
den = cv2.resize(den, (w_trans, h_trans),
interpolation=cv2.INTER_CUBIC) * (h * w) / (h_trans * w_trans)
result.append([img, den])
if index % 100 == 99 or index == self.length - 1:
print("load {0}/{1} images".format(index + 1, self.length))
index += 1
return result
def get_big_data(self):
big_root = './data/shtu_dataset/original/'
part_A_train = os.path.join(big_root, 'part_A_final/train_data', 'images')
part_A_test = os.path.join(big_root, 'part_A_final/test_data', 'images')
path_sets = [part_A_train, part_A_test]
big_list = []
for path in path_sets:
for img_path in glob.glob(os.path.join(path, '*.jpg')):
mat = io.loadmat(
img_path.replace('.jpg', '.mat').replace('images', 'ground_truth').replace('IMG_', 'GT_IMG_'))
number = mat["image_info"][0, 0][0, 0][1]
if number[0, 0] >= 400:
big_list.append(img_path)
return big_list
|
464725
|
while True:
num = int(input())
if num == 0:
break
a = 0
b = 0
turn_a = True
i = 0
while num != 0:
bit = num & 1
num = num >> 1
if bit == 1:
if turn_a:
a ^= (1 << i)
else:
b ^= (1 << i)
turn_a = not turn_a
i += 1
print ("{} {}".format(a, b))
|
464727
|
import scipy.optimize as opt
import numpy as np
import pylab as plt
#define model function and pass independant variables x and y as a list
def twoD_Gaussian(xy, amplitude, xo, yo, sigma_x, sigma_y, theta, offset):
x = xy[0]
y = xy[1]
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = offset + amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
return g.ravel()
# Create x and y indices
x = np.linspace(0, 200, 201)
y = np.linspace(0, 200, 201)
x, y = np.meshgrid(x, y)
#create data
data = twoD_Gaussian((x, y), amplitude=3, xo=100, yo=100, sigma_x=20, sigma_y=40, theta=0, offset=10)
# plot twoD_Gaussian data generated above
plt.figure()
plt.imshow(np.reshape(data, (201, 201)))
plt.colorbar()
plt.show()
# add some noise to the data and try to fit the data generated beforehand
initial_guess = (3,100,100,20,40,0,10)
data_noisy = data + 0.2*np.random.normal(size=data.shape)
popt, pcov = opt.curve_fit(twoD_Gaussian, (x, y), data_noisy, p0=initial_guess)
data_fitted = twoD_Gaussian((x, y), *popt)
fig, ax = plt.subplots(1, 1)
ax.hold(True)
ax.imshow(data_noisy.reshape(201, 201), cmap=plt.cm.jet, origin='bottom',
extent=(x.min(), x.max(), y.min(), y.max()))
ax.contour(x, y, data_fitted.reshape(201, 201), 8, colors='w')
plt.show()
|
464741
|
import uuid
import kubernetes
import pytest
import pykorm
class Score(pykorm.models.Nested):
exterior: int = pykorm.fields.DataField('exterior')
delicious: int = pykorm.fields.DataField('delicious', 10)
class ScoreMixin(object):
score: Score = pykorm.fields.DictNestedField(Score, path=['spec', 'score'])
@pykorm.k8s_custom_object('pykorm.infomaniak.com', 'v1', 'apples')
class Apple(ScoreMixin, pykorm.ClusterModel):
variety: str = pykorm.fields.Spec('variety', 'default-variety')
price: str = pykorm.fields.Spec('price', 1)
@pykorm.k8s_custom_object('pykorm.infomaniak.com', 'v1', 'peaches')
class Peach(ScoreMixin, pykorm.NamespacedModel):
variety: str = pykorm.fields.Spec('variety', 'default-variety')
price: str = pykorm.fields.Spec('price', 1)
colours: list = pykorm.fields.Spec('colours', [])
@pykorm.pykorm.k8s_core(kind='Namespace')
class Namespace(pykorm.models.ClusterModel):
pass
@pykorm.pykorm.k8s_core(kind='Pod')
class Pod(pykorm.models.NamespacedModel):
pass
@pytest.fixture
def random_name():
return uuid.uuid4().hex
@pytest.fixture
def custom_objects_api():
kubernetes.config.load_kube_config()
return kubernetes.client.CustomObjectsApi()
@pytest.fixture
def pk():
return pykorm.Pykorm()
def remove_all_apples(custom_objects_api):
apples = custom_objects_api.list_cluster_custom_object('pykorm.infomaniak.com', 'v1', 'apples')
for apple in apples['items']:
custom_objects_api.delete_cluster_custom_object('pykorm.infomaniak.com', 'v1', 'apples',
apple['metadata']['name'])
def remove_all_peaches(custom_objects_api):
corev1 = kubernetes.client.CoreV1Api()
ns = corev1.list_namespace()
for ns in ns.items:
ns_name = ns.metadata.name
peaches = custom_objects_api.list_namespaced_custom_object('pykorm.infomaniak.com', 'v1', ns_name, 'peaches')
for peach in peaches['items']:
custom_objects_api.delete_namespaced_custom_object('pykorm.infomaniak.com', 'v1', ns_name, 'peaches',
peach['metadata']['name'])
@pytest.fixture(autouse=True, scope='function')
def remove_all_CR(custom_objects_api):
remove_all_apples(custom_objects_api)
remove_all_peaches(custom_objects_api)
def assertIsSubsetOf(d1, d2):
''' A crusty function to deal with nested dictionaries '''
if isinstance(d1, str) or isinstance(d1, int):
return d1 == d2
elif isinstance(d1, dict):
for key, value in d1.items():
assert key in d2
assert assertIsSubsetOf(value, d2[key])
return True
else:
return False
|
464754
|
from . import vocab
from . import tokenizers
from . import batchify
from .vocab import *
__all__ = ['batchify', 'tokenizers'] + vocab.__all__
|
464797
|
from pybilt.bilayer_analyzer import BilayerAnalyzer
def test_leaflet_builder():
sel_string = "resname POPC DOPE TLCL2"
name_dict = {'DOPE':['P'],'POPC':['P'],'TLCL2':['P1','P3']}
analyzer = BilayerAnalyzer(structure='../pybilt/sample_bilayer/sample_bilayer.psf',
trajectory='../pybilt/sample_bilayer/sample_bilayer_10frames.dcd',
selection="resname POPC DOPE TLCL2")
analyzer.rep_settings['com_frame']['name_dict'] = name_dict
# Assign the leaflets using the 'avg_norm' method. (This is actually the
# default).
analyzer.adjust_rep_setting('leaflets', 'assign_method', 'avg_norm')
analyzer.run_analysis()
analyzer.reset()
# Now redo it using the 'orientation' mehtod to assign leaflets.""
analyzer.adjust_rep_setting('leaflets', 'assign_method', 'orientation')
analyzer.adjust_rep_setting('leaflets', 'orientation_atoms', {'DOPE': ['C218','P'],
'POPC': ['C218', 'P'], 'TLCL2': ['CA18', 'P1']})
analyzer.run_analysis()
return
if __name__ == '__main__':
test_leaflet_builder()
|
464802
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Nereset Neighbor interpolation
def nn_interpolate(img, ax=1, ay=1):
H, W, C = img.shape
aH = int(ay * H)
aW = int(ax * W)
y = np.arange(aH).repeat(aW).reshape(aH, -1)
x = np.tile(np.arange(aW), (aH, 1))
y = np.round(y / ay).astype(np.int)
x = np.round(x / ax).astype(np.int)
out = img[y,x]
out = out.astype(np.uint8)
return out
# Read image
img = cv2.imread("imori.jpg").astype(np.float)
# Nearest Neighbor
out = nn_interpolate(img, ax=1.5, ay=1.5)
# Save result
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.imwrite("out.jpg", out)
|
464884
|
import cmath
import math
import numpy
import scipy.sparse.linalg
import time
import sys
from pauxy.propagation.operations import local_energy_bound
from pauxy.utils.linalg import exponentiate_matrix, reortho
from pauxy.walkers.single_det import SingleDetWalker
class PlaneWave(object):
"""PlaneWave class
"""
def __init__(self, system, trial, qmc, options={}, verbose=False):
if verbose:
print ("# Parsing plane wave propagator input options.")
# Derived Attributes
self.dt = qmc.dt
self.sqrt_dt = qmc.dt**0.5
self.isqrt_dt = 1j*self.sqrt_dt
self.mf_core = 0
self.num_vplus = system.nfields // 2
self.vbias = numpy.zeros(system.nfields, dtype=numpy.complex128)
# Mean-field shift is zero for UEG.
self.mf_shift = numpy.zeros(system.nfields, dtype=numpy.complex128)
optimised = options.get('optimised', True)
if optimised:
self.construct_force_bias = self.construct_force_bias_incore
self.construct_VHS = self.construct_VHS_incore
else:
print("# Slow routines not available. Please Implement.")
sys.exit()
# self.construct_force_bias = self.construct_force_bias_slow
# self.construct_VHS = self.construct_VHS_slow
# Input options
if verbose:
print ("# Finished setting up plane wave propagator.")
def construct_one_body_propagator(self, system, dt):
"""Construct the one-body propagator Exp(-dt/2 H0)
Parameters
----------
system :
system class
dt : float
time-step
Returns
-------
self.BH1 : numpy array
Exp(-dt/2 H0)
"""
H1 = system.h1e_mod
# No spin dependence for the moment.
self.BH1 = numpy.array([scipy.linalg.expm(-0.5*dt*H1[0]),
scipy.linalg.expm(-0.5*dt*H1[1])])
def construct_force_bias_incore(self, system, walker, trial):
"""Compute the force bias term as in Eq.(33) of DOI:10.1002/wcms.1364
Parameters
----------
system :
system class
G : numpy array
Green's function
Returns
-------
force bias : numpy array
-sqrt(dt) * vbias
"""
G = walker.G
Gvec = G.reshape(2, system.nbasis*system.nbasis)
self.vbias[:self.num_vplus] = Gvec[0].T*system.iA + Gvec[1].T*system.iA
self.vbias[self.num_vplus:] = Gvec[0].T*system.iB + Gvec[1].T*system.iB
# print(-self.sqrt_dt*self.vbias)
# sys.exit()
return - self.sqrt_dt * self.vbias
def construct_VHS_incore(self, system, xshifted):
"""Construct the one body potential from the HS transformation
Parameters
----------
system :
system class
xshifted : numpy array
shifited auxiliary field
Returns
-------
VHS : numpy array
the HS potential
"""
return construct_VHS_incore(system, xshifted, self.sqrt_dt)
def construct_VHS_incore(system, xshifted, sqrt_dt):
"""Construct the one body potential from the HS transformation
Parameters
----------
system :
system class
xshifted : numpy array
shifited auxiliary field
Returns
-------
VHS : numpy array
the HS potential
"""
VHS = numpy.zeros((system.nbasis, system.nbasis),
dtype=numpy.complex128)
VHS = (system.iA * xshifted[:system.nchol] +
system.iB * xshifted[system.nchol:])
VHS = VHS.reshape(system.nbasis, system.nbasis)
return sqrt_dt * VHS
def construct_propagator_matrix_planewave(system, BT2, config, dt):
"""Construct the full projector from a configuration of auxiliary fields.
For use with generic system object.
Parameters
----------
system : class
System class.
BT2 : :class:`numpy.ndarray`
One body propagator.
config : numpy array
Auxiliary field configuration.
conjt : bool
If true return Hermitian conjugate of matrix.
Returns
-------
B : :class:`numpy.ndarray`
Full propagator matrix.
"""
VHS = construct_VHS_incore(system, config, dt**0.5)
EXP_VHS = exponentiate_matrix(VHS)
Bup = BT2[0].dot(EXP_VHS).dot(BT2[0])
Bdown = BT2[1].dot(EXP_VHS).dot(BT2[1])
return numpy.array([Bup, Bdown])
def back_propagate_planewave(phi, stack, system, nstblz, BT2, dt, store=False):
r"""Perform back propagation for RHF/UHF style wavefunction.
For use with generic system hamiltonian.
Parameters
---------
system : system object in general.
Container for model input options.
psi : :class:`pauxy.walkers.Walkers` object
CPMC wavefunction.
trial : :class:`pauxy.trial_wavefunction.X' object
Trial wavefunction class.
nstblz : int
Number of steps between GS orthogonalisation.
BT2 : :class:`numpy.ndarray`
One body propagator.
dt : float
Timestep.
Returns
-------
psi_bp : list of :class:`pauxy.walker.Walker` objects
Back propagated list of walkers.
"""
nup = system.nup
psi_store = []
for (i, c) in enumerate(stack.get_block()[0][::-1]):
B = construct_propagator_matrix_planewave(system, BT2, c, dt)
phi[:,:nup] = numpy.dot(B[0].conj().T, phi[:,:nup])
phi[:,nup:] = numpy.dot(B[1].conj().T, phi[:,nup:])
if i != 0 and i % nstblz == 0:
(phi[:,:nup], R) = reortho(phi[:,:nup])
(phi[:,nup:], R) = reortho(phi[:,nup:])
if store:
psi_store.append(phi.copy())
return psi_store
def unit_test():
from pauxy.systems.ueg import UEG
from pauxy.qmc.options import QMCOpts
from pauxy.trial_wavefunction.hartree_fock import HartreeFock
from pauxy.propagation.continuous import Continuous
inputs = {'nup':1, 'ndown':1,
'rs':1.0, 'ecut':1.0, 'dt':0.05, 'nwalkers':10}
system = UEG(inputs, True)
qmc = QMCOpts(inputs, system, True)
trial = HartreeFock(system, False, inputs, True)
propagator = Continuous(system, trial, qmc, verbose=True)
if __name__=="__main__":
unit_test()
|
464885
|
class DistanceMeasureBase(object):
@staticmethod
def distances(fixed_x, fixed_y, x_vec, y_vec):
'''
:param fixed_x: float
:param fixed_y: float
:param x_vec: np.array[float]
:param y_vec: np.array[float]
:return: np.array[float]
'''
raise NotImplementedError()
|
464921
|
from schema.parsers.opengraph import format_og
def test_format_og():
data = {
"namespace": {"og": "http://ogp.me/ns#"},
"properties": [
("og:description",
"Free Shipping on orders over $35. Buy Dungeons & Dragons Player's Handbook (Dungeons & Dragons Core Rulebooks) at Walmart.com"),
("og:image",
"https://i5.walmartimages.com/asr/ce1033ea-4934-4098-af07-16d0136689fd_1.5cebe0dbf47d95ddc489e506c8cc28f7.jpeg"),
("og:url",
"/ip/Dungeons-Dragons-Player-s-Handbook-Dungeons-Dragons-Core-Rulebooks-9780786965601/37784457"),
("og:title",
"Dungeons & Dragons Player's Handbook (Dungeons & Dragons Core Rulebooks) - Walmart.com"),
("og:site_name", "Walmart.com"),
("og:type", "product.item"),
],
}
expected = {
"description": "Free Shipping on orders over $35. Buy Dungeons & Dragons Player's Handbook (Dungeons & Dragons Core Rulebooks) at Walmart.com",
"image": "https://i5.walmartimages.com/asr/ce1033ea-4934-4098-af07-16d0136689fd_1.5cebe0dbf47d95ddc489e506c8cc28f7.jpeg",
"type": "product.item",
"url": "/ip/Dungeons-Dragons-Player-s-Handbook-Dungeons-Dragons-Core-Rulebooks-9780786965601/37784457",
"site_name": "Walmart.com",
"title": "Dungeons & Dragons Player's Handbook (Dungeons & Dragons Core Rulebooks) - Walmart.com",
}
assert format_og(data) == expected
|
465012
|
import sys
import GPUtil
import importlib
import logging
import multiprocessing as mp
import os
import traceback
import click
from toml import TomlDecodeError
from tqdm import tqdm
from multiprocessing import Queue, Event, Process
from multiprocessing.queues import Empty as QueueEmpty
from exp.params import ParamSpace, ParamDecodeError
def load_module(runnable_path):
""" Loads a python file with the module to be evaluated.
The module is a python file with a run function member. Each worker then
passes each configuration it receives from a Queue to run as keyword arguments
Args:
runnable_path:
Raises:
TypeError: if the loaded module doesn't have a run function
Returns:
a reference to the newly loaded module so
"""
runnable_path = os.path.abspath(runnable_path)
spec = importlib.util.spec_from_file_location("runnable", location=runnable_path)
runnable = importlib.util.module_from_spec(spec)
spec.loader.exec_module(runnable)
try:
getattr(runnable, "run")
except AttributeError:
raise TypeError("module in {} does not contain a \"run\" method".format(runnable_path))
return runnable
def worker(pid: int,
module_path: str,
config_queue: Queue,
result_queue: Queue,
error_queue: Queue,
terminated: QueueEmpty,
cancel):
""" Worker to be executed in its own process
Args:
cancel: if true terminates when an error is encountered, otherwise keeps running
error_queue: used to pass formatted stack traces to the main process
module_path: path to model runnable that is imported. It's method run is called on a given configuration
terminated: each worker should have its own flag
pid: (int) with worker id
config_queue: configuration queue used to receive the parameters for this worker, each configuration is a task
result_queue: queue where the worker deposits the results
Returns:
each time a new result is returned from calling the run function on the module, the worker puts this in to its
result multiprocessing Queue in the form (worker_id, configuration_id, result)
If an exception occurs during run(...) the worker puts that exception as the result into the queue instead
"""
os.environ["CUDA_VISIBLE_DEVICES"] = str(pid)
module = load_module(module_path)
while not terminated.is_set():
try:
kwargs = config_queue.get(timeout=0.5)
cfg_id = kwargs["id"]
kwargs["pid"] = pid
result = module.run(**kwargs)
result_queue.put((pid, cfg_id, result))
except QueueEmpty:
pass
except Exception as e:
if cancel:
terminated.set()
error_queue.put((pid, cfg_id, traceback.format_exc()))
result_queue.put((pid, cfg_id, e))
@click.command(help='runs all the configurations in a defined space')
@click.option('-p', '--params', required=True, type=click.Path(exists=True), help='path to parameter space file')
@click.option('-m', '--module', required=True, type=click.Path(exists=True), help='path to python module file')
@click.option('-r', '--runs', default=1, type=int, help='number of configuration runs')
@click.option('--name', default="exp", type=str, help='experiment name: used as prefix for some output files')
@click.option('-w', '--workers', default=1, type=int, help="number of workers: limited to CPU core count or GPU "
"count, cannot be <=0.")
@click.option('-g', '--gpu', is_flag=True,
help="bounds the number of workers to the number of available GPUs (not under load)."
"Each process only sees a single GPU.")
@click.option('-c', '--config-ids', type=int, multiple=True)
@click.option('--cancel', is_flag=True, help="cancel all tasks if one fails")
def main(params, module, runs, name, workers, gpu, config_ids, cancel):
logger = logging.getLogger(__name__)
handler = logging.FileHandler('{name}.log'.format(name=name), delay=True)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
try:
if gpu:
# detecting available gpus with load < 0.1
worker_ids = [g.id for g in GPUtil.getGPUs() if g.load < 0.2]
num_workers = min(workers, len(worker_ids))
if num_workers <= 0:
logger.log(logging.ERROR, "no gpus available")
sys.exit(1)
else:
num_workers = min(workers, mp.cpu_count())
if num_workers <= 0:
logger.log(logging.ERROR, "--workers cannot be 0")
sys.exit(1)
ps = ParamSpace(filename=params)
ps.write_configs('{}_params.csv'.format(name))
param_grid = ps.param_grid(runs=runs)
n_tasks = ps.size * runs
if len(config_ids) > 0:
n_tasks = len(config_ids) * runs
param_grid = [p for p in param_grid if p["id"] in config_ids]
param_grid = iter(param_grid)
num_workers = min(n_tasks, num_workers)
print("----------Parameter Space Runner------------")
print(":: tasks: {}".format(n_tasks))
print(":: workers: {}".format(num_workers))
print("--------------------------------------------")
config_queue = Queue()
result_queue = Queue()
error_queue = Queue()
progress_bar = tqdm(total=n_tasks, leave=True)
terminate_flags = [Event() for _ in range(num_workers)]
processes = [
Process(target=worker,
args=(i, module, config_queue, result_queue, error_queue, terminate_flags[i], cancel))
for i in range(num_workers)]
scores = {}
configs = {}
# submit num worker jobs
for _ in range(num_workers):
next_cfg = next(param_grid)
configs[next_cfg["id"]] = next_cfg
config_queue.put(next_cfg)
for p in processes:
p.daemon = True
p.start()
num_completed = 0
pending = num_workers
done = False
successful = set()
while num_completed < n_tasks and not done:
try:
res = result_queue.get(timeout=1)
pid, cfg_id, result = res
if not isinstance(result, Exception):
successful.add(cfg_id)
# cfg = configs[cfg_id]
scores[cfg_id] = result
num_completed += 1
pending -= 1
if (num_completed + pending) != n_tasks:
next_cfg = next(param_grid)
configs[next_cfg["id"]] = next_cfg
config_queue.put(next_cfg)
pending += 1
else:
# signal the current worker for termination no more work to be done
terminate_flags[pid].set()
progress_bar.update()
else:
# retrieve one error from queue, might not be exactly the one that failed
# since other worker can write to the queue, but we will have at least one error to retrieve
_, cfg_id_err, err = error_queue.get()
logger.error("configuration {} failed".format(cfg_id_err))
logger.error(err)
if cancel:
done = True
else:
num_completed += 1
pending -= 1
if (num_completed + pending) != n_tasks:
next_cfg = next(param_grid)
configs[next_cfg["id"]] = next_cfg
config_queue.put(next_cfg)
pending += 1
else:
# signal the current worker for termination no more work to be done
terminate_flags[pid].set()
progress_bar.update()
except QueueEmpty:
pass
# try to wait for process termination
for process in processes:
process.join(timeout=0.5)
if process.is_alive():
process.terminate()
if len(config_ids) > 0:
all_ids = set(config_ids)
else:
all_ids = set(range(ps.size))
failed_tasks = all_ids.difference(successful)
if len(failed_tasks) > 0:
ids = " ".join(map(str, failed_tasks))
fail_runs = "failed runs: {}".format(ids)
print(fail_runs, file=sys.stderr)
logger.warn(fail_runs)
progress_bar.close()
except TomlDecodeError as e:
logger.error(traceback.format_exc())
print("\n\n[Invalid parameter file] TOML decode error:\n {}".format(e), file=sys.stderr)
except ParamDecodeError as e:
logger.error(traceback.format_exc())
print("\n\n[Invalid parameter file]\n {}".format(e), file=sys.stderr)
if __name__ == '__main__':
main()
|
465050
|
from .entity import EntityType, Entity
from .validator import PropertyValidator
from .endpoint import EndpointType
# Endpoint Payload
class EndpointPayloadType(EntityType):
__schema_name__ = "EndpointPayload"
__openapi_type__ = "endpoint_payload"
class EndpointPayloadValidator(PropertyValidator, openapi_type="endpoint_payload"):
__default__ = None
__kind__ = EndpointPayloadType
def _endpoint_payload(**kwargs):
name = kwargs.get("name", None)
bases = (Entity,)
return EndpointPayloadType(name, bases, kwargs)
EndpointPayload = _endpoint_payload()
def create_endpoint_payload(UserEndpoint):
err = {"error": "", "code": -1}
if UserEndpoint is None:
err["error"] = "Given endpoint is empty."
return None, err
if not isinstance(UserEndpoint, EndpointType):
err["error"] = "Given endpoint is not of type Endpoint"
return None, err
spec = {
"name": UserEndpoint.__name__,
"description": UserEndpoint.__doc__ or "",
"resources": UserEndpoint,
}
metadata = {"spec_version": 1, "kind": "endpoint", "name": UserEndpoint.__name__}
UserEndpointPayload = _endpoint_payload()
UserEndpointPayload.metadata = metadata
UserEndpointPayload.spec = spec
return UserEndpointPayload, None
|
465057
|
import panel as pn
from .compatibility import logger
from .sigslot import SigSlot
TEXT = """
Convert data variables to coordinates to use them as axes. For more information,
please refer to the [documentation](https://xrviz.readthedocs.io/en/latest/interface.html#set-coords).
"""
class CoordSetter(SigSlot):
"""
An input pane for choosing which variables are considered coordinates.
It uses a `Cross Selector <https://panel.pyviz.org/reference/widgets/CrossSelector.html>`_
to display a list of simple and coordinate variables.
Simple variables (which are not data coordinates) are available on
left side and default coordinates are available on right side.
To set variables as coordinates, make selection on left side and click
``>>``. Similarly making selection on right side and clicking ``<<``
will reset the coordinates. Other panes update themselves
accordingly, in response to this change.
"""
def __init__(self, data):
super().__init__()
self.data = data
self.name = 'Set Coords'
self.coord_selector = pn.widgets.CrossSelector(
value=list(self.data.coords),
options=list(self.data.variables)
)
self.panel = pn.Column(
pn.pane.Markdown(TEXT, margin=(0, 20)),
self.coord_selector, name=self.name)
def set_coords(self, data):
"""
Called when the data attribute of the interface has change in variables considered as coordinates.
"""
self.data = data
logger.debug(self.coord_selector.value)
self.coord_selector.value = list(self.data.coords)
def setup_initial_values(self, init_params={}):
"""
To set the variables, whose names have been passed, as coordinates.
"""
if self.name in init_params:
self.coord_selector.value = init_params[self.name]
|
465059
|
from lepo.apidef.doc import APIDefinition
from lepo.parameter_utils import read_parameters
from lepo_tests.tests.utils import make_request_for_operation
doc = APIDefinition.from_yaml('''
openapi: 3.0.0
servers: []
paths:
/single/{thing}/data{format}:
get:
parameters:
- name: format
in: path
style: label
schema:
type: string
- name: thing
in: path
schema:
type: string
/array/{thing}/data{format}:
get:
parameters:
- name: format
in: path
style: label
explode: true
schema:
type: array
items:
type: string
- name: thing
in: path
schema:
type: string
/object/{thing}/data{format}:
get:
parameters:
- name: format
in: path
style: label
explode: false
schema:
type: object
items:
type: string
- name: thing
in: path
schema:
type: string
''')
def test_label_parameter():
request = make_request_for_operation(doc.get_path('/single/{thing}/data{format}').get_operation('get'))
params = read_parameters(request, {
'thing': 'blorp',
'format': '.json',
})
assert params == {
'thing': 'blorp',
'format': 'json', # Notice the missing dot
}
def test_label_array_parameter():
request = make_request_for_operation(doc.get_path('/array/{thing}/data{format}').get_operation('get'))
params = read_parameters(request, {
'thing': 'blorp',
'format': '.json.yaml.xml.pdf',
})
assert params == {
'thing': 'blorp',
'format': ['json', 'yaml', 'xml', 'pdf'], # An eldritch monstrosity
}
def test_label_object_parameter():
request = make_request_for_operation(doc.get_path('/object/{thing}/data{format}').get_operation('get'))
params = read_parameters(request, {
'thing': 'blorp',
'format': '.some,body,once,told',
})
assert params == {
'thing': 'blorp',
'format': {'some': 'body', 'once': 'told'},
}
|
465070
|
import re
from dateutil.parser import parse
from django.utils.translation import ugettext_lazy as _
from .classes import MetadataParser
class DateAndTimeParser(MetadataParser):
label = _('Date and time parser')
def execute(self, input_data):
return parse(input_data).isoformat()
class DateParser(MetadataParser):
label = _('Date parser')
def execute(self, input_data):
return parse(input_data).date().isoformat()
class RegularExpressionParser(MetadataParser):
arguments = ('pattern', 'replacement')
label = _('Regular expression parser')
def execute(self, input_data):
return re.sub(
pattern=self.kwargs['pattern'], repl=self.kwargs['replacement'],
string=input_data
)
class TimeParser(MetadataParser):
label = _('Time parser')
def execute(self, input_data):
return parse(input_data).time().isoformat()
|
465110
|
import logging
import typing
from .node_data import NodeData, NodeDataModel, NodeDataType
from .type_converter import TypeConverter
logger = logging.getLogger(__name__)
class DataModelRegistry:
def __init__(self):
self.type_converters = {}
self._models_category = {}
self._item_creators = {}
self._categories = set()
def register_model(self, creator, category='', *, style=None, **init_kwargs):
name = creator.name
self._item_creators[name] = (creator, {'style': style, **init_kwargs})
self._categories.add(category)
self._models_category[name] = category
def register_type_converter(self,
type_in: NodeDataType,
type_out: NodeDataType,
type_converter: TypeConverter):
"""
Register a type converter for a given data type.
Parameters
----------
type_in : NodeDataType or NodeData subclass
The input type.
type_out : NodeDataType or NodeData subclass
The output type.
type_converter : TypeConverter
The type converter to use for the conversion.
"""
# TODO typing annotation
if hasattr(type_in, 'data_type'):
type_in = typing.cast(NodeData, type_in).data_type
if hasattr(type_out, 'data_type'):
type_out = typing.cast(NodeData, type_out).data_type
self.type_converters[(type_in, type_out)] = type_converter
def create(self, model_name: str) -> NodeDataModel:
"""
Create a :class:`NodeDataModel` given its user-friendly name.
Parameters
----------
model_name : str
Returns
-------
data_model_instance : NodeDataModel
The instance of the given data model.
Raises
------
ValueError
If the model name is not registered.
"""
cls, kwargs = self.get_model_by_name(model_name)
return cls(**kwargs)
def get_model_by_name(self, model_name: str
) -> typing.Tuple[typing.Type[NodeDataModel], dict]:
"""
Get information on how to create a specific :class:`NodeDataModel`
node given its user-friendly name.
Parameters
----------
model_name : str
Returns
-------
data_model : NodeDataModel
The data model class.
init_kwargs : dict
Default init keyword arguments.
Raises
------
ValueError
If the model name is not registered.
"""
try:
return self._item_creators[model_name]
except KeyError:
raise ValueError(f'Unknown model: {model_name}') from None
def registered_model_creators(self) -> dict:
"""
Registered model creators
Returns
-------
value : dict
"""
return dict(self._item_creators)
def registered_models_category_association(self) -> dict:
"""
Registered models category association
Returns
-------
value : DataModelRegistry.RegisteredModelsCategoryMap
"""
return self._models_category
def categories(self) -> set:
"""
Categories
Returns
-------
value : DataModelRegistry.CategoriesSet
"""
return self._categories
def get_type_converter(self, d1: NodeDataType, d2: NodeDataType) -> TypeConverter:
"""
Get type converter
Parameters
----------
d1 : NodeDataType
d2 : NodeDataType
Returns
-------
value : TypeConverter
"""
return self.type_converters.get((d1, d2), None)
|
465138
|
import random
from rlkit.exploration_strategies.base import RawExplorationStrategy
class EpsilonGreedy(RawExplorationStrategy):
"""
Take a random discrete action with some probability.
"""
def __init__(self, action_space, prob_random_action=0.1):
self.prob_random_action = prob_random_action
self.action_space = action_space
def get_action_from_raw_action(self, action, **kwargs):
if random.random() <= self.prob_random_action:
return self.action_space.sample()
return action
|
465181
|
import os
class Config:
API_KEY = os.environ['OST_KYC_API_KEY']
API_SECRET = os.environ['OST_KYC_API_SECRET']
API_BASE_URL = os.environ['OST_KYC_API_ENDPOINT']
test_obj_for_signature = {
'k1': 'Rachin',
'k2': 'tejas',
'list2': [
{'a': 'L21A', 'b': 'L21B'},
{'a': 'L22A', 'b': 'L22B'},
{'a': 'L23A', 'b': 'L23B'}
],
'make_mistakes': None,
'nice_param': [],
'empty_obj': {},
'empty_str': '',
'garbage_str': "~^[]%$#@!&*~,./?~()-_'this is garbage",
'id': 11003,
'email': '<EMAIL>'
}
test_obj_for_requests = {
'garbage_str': "~^[]%$#@!&*~,./?~()-_'this is garbage",
'id': 11003,
'email': '<EMAIL>'
}
test_endpoint = '/api/v2/users'
GENERATED_SIGNATURE = "c42188c53bfdf84e542a0a9c0a78d19c9f497c61e816f169e1907bc98477eb82"
USER_ID = os.environ['USER_ID']
API_SECRET_TO_TEST_SIGNATURE = "35f346e5ef825ed4499da98a6ac6b401"
|
465191
|
from typing import List
import numpy as np
from bartpy.mutation import TreeMutation
from bartpy.node import TreeNode, LeafNode, DecisionNode, deep_copy_node
class Tree:
"""
An encapsulation of the structure of a single decision tree
Contains no logic, but keeps track of 4 different kinds of nodes within the tree:
- leaf nodes
- decision nodes
- splittable leaf nodes
- prunable decision nodes
Parameters
----------
nodes: List[Node]
All nodes contained in the tree, i.e. decision and leaf nodes
"""
def __init__(self, nodes: List[TreeNode]):
self._nodes = nodes
self.cache_up_to_date = False
self._prediction = None
@property
def nodes(self) -> List[TreeNode]:
"""
List of all nodes contained in the tree
"""
return self._nodes
@property
def leaf_nodes(self) -> List[LeafNode]:
"""
List of all of the leaf nodes in the tree
"""
return [x for x in self._nodes if type(x) == LeafNode]
@property
def splittable_leaf_nodes(self) -> List[LeafNode]:
"""
List of all leaf nodes in the tree which can be split in a non-degenerate way
i.e. not all rows of the covariate matrix are duplicates
"""
return [x for x in self.leaf_nodes if x.is_splittable()]
@property
def decision_nodes(self) -> List[DecisionNode]:
"""
List of decision nodes in the tree.
Decision nodes are internal split nodes, i.e. not leaf nodes
"""
return [x for x in self._nodes if type(x) == DecisionNode]
@property
def prunable_decision_nodes(self) -> List[DecisionNode]:
"""
List of decision nodes in the tree that are suitable for pruning
In particular, decision nodes that have two leaf node children
"""
return [x for x in self.decision_nodes if x.is_prunable()]
def update_y(self, y: np.ndarray) -> None:
"""
Update the cached value of the target array in all nodes
Used to pass in the residuals from the sum of all of the other trees
"""
self.cache_up_to_date = False
for node in self.nodes:
node.update_y(y)
def predict(self, X: np.ndarray=None) -> np.ndarray:
"""
Generate a set of predictions with the same dimensionality as the target array
Note that the prediction is from one tree, so represents only (1 / number_of_trees) of the target
"""
if X is not None:
return self._out_of_sample_predict(X)
if self.cache_up_to_date:
return self._prediction
for leaf in self.leaf_nodes:
if self._prediction is None:
self._prediction = np.zeros(self.nodes[0].data.X.n_obsv)
self._prediction[leaf.split.condition()] = leaf.predict()
self.cache_up_to_date = True
return self._prediction
def _out_of_sample_predict(self, X) -> np.ndarray:
"""
Prediction for a covariate matrix not used for training
Note that this is quite slow
Parameters
----------
X: pd.DataFrame
Covariates to predict for
Returns
-------
np.ndarray
"""
prediction = np.array([0.] * len(X))
for leaf in self.leaf_nodes:
prediction[leaf.split.condition(X)] = leaf.predict()
return prediction
def remove_node(self, node: TreeNode) -> None:
"""
Remove a single node from the tree
Note that this is non-recursive, only drops the node and not any children
"""
self._nodes.remove(node)
def add_node(self, node: TreeNode) -> None:
"""
Add a node to the tree
Note that this is non-recursive, only adds the node and not any children
"""
self._nodes.append(node)
def mutate(tree: Tree, mutation: TreeMutation) -> None:
"""
Apply a change to the structure of the tree
Modifies not only the tree, but also the links between the TreeNodes
Parameters
----------
tree: Tree
The tree to mutate
mutation: TreeMutation
The mutation to apply to the tree
"""
tree.cache_up_to_date = False
if mutation.kind == "prune":
tree.remove_node(mutation.existing_node)
tree.remove_node(mutation.existing_node.left_child)
tree.remove_node(mutation.existing_node.right_child)
tree.add_node(mutation.updated_node)
if mutation.kind == "grow":
tree.remove_node(mutation.existing_node)
tree.add_node(mutation.updated_node.left_child)
tree.add_node(mutation.updated_node.right_child)
tree.add_node(mutation.updated_node)
for node in tree.nodes:
if node.right_child == mutation.existing_node:
node._right_child = mutation.updated_node
if node.left_child == mutation.existing_node:
node._left_child = mutation.updated_node
def deep_copy_tree(tree: Tree):
"""
Efficiently create a copy of the tree for storage
Creates a memory-light version of the tree with access to important information
Parameters
----------
tree: Tree
Tree to copy
Returns
-------
Tree
Version of the tree optimized to be low memory
"""
return Tree([deep_copy_node(x) for x in tree.nodes])
|
465259
|
import pytest
from .conftest import CollectingQueryRunner
from graphdatascience.graph_data_science import GraphDataScience
from graphdatascience.model.graphsage_model import GraphSageModel
from graphdatascience.model.model import Model
from graphdatascience.server_version.server_version import ServerVersion
MODEL_NAME = "dummy"
@pytest.fixture
def model(runner: CollectingQueryRunner, server_version: ServerVersion) -> Model:
return GraphSageModel(MODEL_NAME, runner, server_version)
def test_store_model(runner: CollectingQueryRunner, gds: GraphDataScience, model: Model) -> None:
gds.alpha.model.store(model, False)
assert runner.last_query() == "CALL gds.alpha.model.store($model_name, $fail_flag)"
assert runner.last_params() == {"model_name": MODEL_NAME, "fail_flag": False}
def test_list_models(runner: CollectingQueryRunner, gds: GraphDataScience, model: Model) -> None:
gds.beta.model.list(model)
assert runner.last_query() == "CALL gds.beta.model.list($model_name)"
assert runner.last_params() == {"model_name": MODEL_NAME}
gds.beta.model.list()
assert runner.last_query() == "CALL gds.beta.model.list()"
assert runner.last_params() == {}
def test_exists_model(runner: CollectingQueryRunner, gds: GraphDataScience) -> None:
gds.alpha.model.exists("my_model")
assert runner.last_query() == "CALL gds.alpha.model.exists($model_name)"
assert runner.last_params() == {"model_name": "my_model"}
def test_drop_model(runner: CollectingQueryRunner, gds: GraphDataScience, model: Model) -> None:
gds.alpha.model.drop(model)
assert runner.last_query() == "CALL gds.alpha.model.drop($model_name)"
assert runner.last_params() == {"model_name": MODEL_NAME}
def test_delete_model(runner: CollectingQueryRunner, gds: GraphDataScience, model: Model) -> None:
gds.alpha.model.delete(model)
assert runner.last_query() == "CALL gds.alpha.model.delete($model_name)"
assert runner.last_params() == {"model_name": MODEL_NAME}
|
465335
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScPresidenteGetulioSpider(FecamGazetteSpider):
name = "sc_presidente_getulio"
FECAM_QUERY = "cod_entidade:211"
TERRITORY_ID = "4214003"
|
465350
|
import numpy as np
import tensorflow as tf
def f_net(dimH, name):
print 'add in 1 hidden layer mlp...'
W1 = tf.Variable(tf.random_normal(shape=(dimH, ))*0.01, name = name + '_W1')
b1 = tf.Variable(tf.random_normal(shape=(dimH, ))*0.01, name = name + '_b1')
#W2 = tf.Variable(tf.random_normal(shape=(dimH, dimH))*0.01, name = name + '_W2')
#b2 = tf.Variable(tf.random_normal(shape=(dimH, ))*0.01, name = name + '_b2')
W3 = tf.Variable(tf.random_normal(shape=(dimH,))*0.01, name = name + '_W3')
b3 = tf.Variable(tf.random_normal(shape=())*0.01, name = name + '_b3')
def apply(z):
x = tf.expand_dims(z, 2)
x = tf.nn.relu(x * W1 + b1) # (K, dimX, dimH)
#x = tf.expand_dims(x, 3)
#x = tf.nn.relu(tf.reduce_sum(x * W2, 2) + b2)
x = tf.reduce_sum(x * W3, 2) + b3 # (K, dimX)
return x
return apply
def g_net(dimH, name):
print 'add in 1 hidden layer mlp...'
W1 = tf.Variable(tf.random_normal(shape=(dimH, ))*0.01, name = name + '_W1')
b1 = tf.Variable(tf.random_normal(shape=(dimH, ))*0.01, name = name + '_b1')
#W2 = tf.Variable(tf.random_normal(shape=(dimH, dimH))*0.01, name = name + '_W2')
#b2 = tf.Variable(tf.random_normal(shape=(dimH, ))*0.01, name = name + '_b2')
W3 = tf.Variable(tf.random_normal(shape=(dimH,))*0.01, name = name + '_W3')
b3 = tf.Variable(tf.random_normal(shape=())*0.01, name = name + '_b3')
def apply(z):
x = tf.expand_dims(z, 2)
x = tf.nn.relu(x * W1 + b1) # (K, dimX, dimH)
x = tf.reduce_sum(x, 1) # (K, dimH)
#x = tf.expand_dims(tf.reduce_sum(x, 1), 2) # (K, dimH, 1)
#x = tf.nn.relu(tf.reduce_sum(x * W2, 1) + b2) # (K, dimH)
x = tf.reduce_sum(x * W3, 1) + b3 # (K, )
return tf.expand_dims(x, 1)
return apply
def init_nn_sampler(dimH, name = 'nn_sampler'):
# parameters
print 'construct two MLPs with %d units...' % dimH
f = f_net(dimH, name=name+'_f_net')
g = g_net(dimH, name=name+'_g_net')
def network_transform(z, grad_z, eps, data_N, compute_gamma=True):
# compute D matrix
f_out = f(z) # (K, dimX)
g_out = g(z) # (K, 1)
mean_g = (grad_z + z) / data_N # assume gaussian prior
square_grad = tf.reduce_sum(mean_g ** 2, 1, keep_dims=True) # (K, 1)
h_out = 1e-5 + tf.sqrt(square_grad)
D_matrix = tf.nn.relu(f_out + g_out) / h_out
direction = D_matrix * grad_z
if compute_gamma:
# now compute gamma vector
print "use gamma vector"
df = tf.gradients(f_out, z)[0]
dg = tf.gradients(g_out, z)[0]
dlogh = tf.gradients(tf.log(h_out), z)[0]
gamma_vector = (df + dg - dlogh * (f_out + g_out)) / h_out
gamma_vector = tf.where(D_matrix > 0, gamma_vector, tf.zeros(z.get_shape()))
direction += gamma_vector
# compute output
noise = tf.random_normal(z.get_shape())
delta = eps * direction + tf.sqrt(2 * eps * D_matrix) * noise
return delta
def nn_sampler(z, X, y, data_N, grad_logp_func, shapes = None,
eps = 1e-5, compute_gamma = True):
print "calling the sampler, shape(Theta)=", z.get_shape()
noise = tf.random_normal(z.get_shape())
grad_logp = grad_logp_func(X, y, z, data_N, shapes)
delta = network_transform(z, grad_logp, eps, data_N, compute_gamma)
z = z + delta
return z
return nn_sampler
|
465365
|
from stix2.v21 import (Bundle)
for obj in bundle.objects:
if obj == threat_actor:
print("------------------")
print("== THREAT ACTOR ==")
print("------------------")
print("ID: " + obj.id)
print("Created: " + str(obj.created))
print("Modified: " + str(obj.modified))
print("Name: " + obj.name)
print("Description: " + obj.description)
print("Threat Actor Types: " + str(obj.threat_actor_types))
elif obj == identity:
print("------------------")
print("== IDENTITY ==")
print("------------------")
print("ID: " + obj.id)
print("Created: " + str(obj.created))
print("Modified: " + str(obj.modified))
print("Name: " + obj.name)
print("Description: " + obj.description)
print("Identity Class: " + obj.identity_class)
elif obj == malware:
print("------------------")
print("== MALWARE ==")
print("------------------")
print("ID: " + obj.id)
print("Created: " + str(obj.created))
print("Modified: " + str(obj.modified))
print("Name: " + obj.name)
print("Type: " + obj.type)
print("Malware Types: " + str(obj.malware_types))
print("Is Family:" + str(obj.is_family))
print("Kill Chain: " + str(obj.kill_chain_phases))
elif obj == attack_pattern:
print("------------------")
print("== ATTACK PATTERN ==")
print("------------------")
print("ID: " + obj.id)
print("Created: " + str(obj.created))
print("Modified: " + str(obj.modified))
print("Name: " + obj.name)
print("Description: " + obj.description)
print("Type: " + obj.type)
print("Kill Chain Phases: " + str(obj.kill_chain_phases))
print("External References: " + str(obj.external_references))
elif obj == relationship1:
print("------------------")
print("== RELATIONSHIP ==")
print("------------------")
print("ID: " + obj.id)
print("Created: " + str(obj.created))
print("Modified: " + str(obj.modified))
print("Type: " + obj.type)
print("Relationship Type: " + obj.relationship_type)
print("Source Ref: " + obj.source_ref)
print("Target Ref: " + obj.target_ref)
elif obj == relationship2:
print("------------------")
print("== RELATIONSHIP ==")
print("------------------")
print("ID: " + obj.id)
print("Created: " + str(obj.created))
print("Modified: " + str(obj.modified))
print("Type: " + obj.type)
print("Relationship Type: " + obj.relationship_type)
print("Source Ref: " + obj.source_ref)
print("Target Ref: " + obj.target_ref)
elif obj == relationship3:
print("------------------")
print("== RELATIONSHIP ==")
print("------------------")
print("ID: " + obj.id)
print("Created: " + str(obj.created))
print("Modified: " + str(obj.modified))
print("Type: " + obj.type)
print("Relationship Type: " + obj.relationship_type)
print("Source Ref: " + obj.source_ref)
print("Target Ref: " + obj.target_ref)
|
465376
|
from django.utils.timezone import now
from model_bakery.recipe import Recipe
from tests.generic.models import Person
person = Recipe(
Person,
name="Uninstalled",
nickname="uninstalled",
age=18,
bio="Uninstalled",
blog="http://uninstalled.com",
days_since_last_login=4,
birthday=now().date(),
appointment=now(),
birth_time=now(),
)
|
465420
|
import dataclasses
import enum
import typing
# An account identifier may be a byte array of 33 bytes,
# a hexadecimal string of 66 characters.
AccountID = typing.Union[bytes, str]
# A block identifier may be a byte array of 32 bytes,
# a hexadecimal string of 64 characters or a positive integer.
BlockID = typing.Union[bytes, str, int]
# On chain contract identifier.
ContractID = typing.NewType("Static contract pointer", bytes)
# On chain contract version.
ContractVersion = typing.NewType("U32 integer representing", int)
# A deploy identifier is a 32 byte array or it's hexadecimal string equivalent.
DeployID = typing.Union[bytes, str]
class GlobalStateIDType(enum.Enum):
"""Enumeration over set of CL type keys.
"""
STATE_ROOT = enum.auto()
BLOCK = enum.auto()
@dataclasses.dataclass
class GlobalStateID():
# 32 byte global state identifier, either a block or state root hash.
identifier: bytes
# Type of identifier.
id_type: GlobalStateIDType
# Root hash of a node's global state.
StateRootHash = typing.NewType(
"Cumulative hash of block execution effects over global state.",
bytes
)
@dataclasses.dataclass
class DictionaryID():
"""A set of variants for performation dictionary item state queries.
"""
pass
@dataclasses.dataclass
class DictionaryID_AccountNamedKey(DictionaryID):
"""Encapsulates information required to query a dictionary item via an Account's named keys.
"""
# The dictionary item key.
dictionary_item_key: str
# The named key under which the dictionary seed URef is stored.
dictionary_name: str
# The account key as a formatted string whose named keys contains dictionary_name.
key: str
def __eq__(self, other) -> bool:
return super().__eq__(other) and \
self.dictionary_item_key == other.dictionary_item_key and \
self.dictionary_name == other.dictionary_name and \
self.key == other.key
@dataclasses.dataclass
class DictionaryID_ContractNamedKey(DictionaryID):
"""Encapsulates information required to query a dictionary item via a Contract's named keys.
"""
# The dictionary item key.
dictionary_item_key: str
# The named key under which the dictionary seed URef is stored.
dictionary_name: str
# The contract key as a formatted string whose named keys contains dictionary_name.
key: str
def __eq__(self, other) -> bool:
return super().__eq__(other) and \
self.dictionary_item_key == other.dictionary_item_key and \
self.dictionary_name == other.dictionary_name and \
self.key == other.key
@dataclasses.dataclass
class DictionaryID_SeedURef(DictionaryID):
"""Encapsulates information required to query a dictionary item
via it's seed unforgeable reference.
"""
# The dictionary item key.
dictionary_item_key: str
# The dictionary's seed URef.
seed_uref: object
def __eq__(self, other) -> bool:
return super().__eq__(other) and \
self.dictionary_item_key == other.dictionary_item_key and \
self.seed_uref == other.seed_uref
@dataclasses.dataclass
class DictionaryID_UniqueKey(DictionaryID):
"""Encapsulates information required to query a dictionary item via it's unique key.
"""
# The globally unique dictionary key.
key: str
def __eq__(self, other) -> bool:
return super().__eq__(other) and self.key == other.key
|
465433
|
from app.db.base_class import Base
from sqlalchemy import ForeignKey, Integer, Column, String, Boolean
from app.core.config import settings
class Role(Base):
"""
A world can have multiple roles.
"""
role_id = Column(Integer, primary_key=True, autoincrement=True)
world_id = Column(
Integer, ForeignKey(settings.SCHEMA_NAME + ".world.world_id"),
nullable=False
)
name = Column(String(30), nullable=False)
# alembic requires the attribute server default to work
is_default = Column(Boolean, server_default='f', default=False)
interact = Column(Boolean, server_default='f', default=False)
walk = Column(Boolean, server_default='f', default=False)
talk = Column(Boolean, server_default='f', default=False)
talk_conference = Column(Boolean, server_default='f', default=False)
world_mute = Column(Boolean, server_default='f', default=False)
role_manage = Column(Boolean, server_default='f', default=False)
conference_manage = Column(Boolean, server_default='f', default=False)
chat = Column(Boolean, server_default='f', default=False)
invite = Column(Boolean, server_default='f', default=False)
ban = Column(Boolean, server_default='f', default=False)
|
465447
|
import unittest
import numpy
import pandas as pd
import scipy.sparse
import scipy.sparse.csr
import sklearn.linear_model
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import willump.evaluation.willump_executor as wexec
with open("tests/test_resources/simple_vocabulary.txt") as simple_vocab:
simple_vocab_dict = {word: index for index, word in
enumerate(simple_vocab.read().splitlines())}
vectorizer = CountVectorizer(analyzer='char', ngram_range=(3, 5), min_df=0.005, max_df=1.0,
lowercase=False, stop_words=None, binary=False, decode_error='replace',
vocabulary=simple_vocab_dict)
@wexec.willump_execute()
def sample_stack_sparse(array_one, input_vect):
df = pd.DataFrame()
df["strings"] = array_one
np_input = list(df["strings"].values)
transformed_result = input_vect.transform(np_input)
transformed_result = scipy.sparse.hstack([transformed_result, transformed_result], format="csr")
return transformed_result
model = sklearn.linear_model.LogisticRegression(solver='lbfgs')
model.intercept_ = numpy.array([0.2], dtype=numpy.float64)
model.classes_ = numpy.array([0, 1], dtype=numpy.int64)
@wexec.willump_execute(num_workers=1)
def stack_sparse_then_linear_regression(array_one, array_two, input_vect):
transformed_result_one = input_vect.transform(array_one)
transformed_result_two = input_vect.transform(array_two)
combined_result = scipy.sparse.hstack([transformed_result_one, transformed_result_two], format="csr")
predicted_result = model.predict(combined_result)
return predicted_result
@wexec.willump_execute(num_workers=1)
def stack_sparse_then_linear_regression_coalesce_parallel(array_one, array_two, array_three, input_vect):
transformed_result_one = input_vect.transform(array_one)
transformed_result_two = input_vect.transform(array_two)
transformed_result_three = input_vect.transform(array_three)
combined_result = scipy.sparse.hstack([transformed_result_one, transformed_result_two, transformed_result_three],
format="csr")
predicted_result = model.predict(combined_result)
return predicted_result
tf_idf_vec = \
TfidfVectorizer(analyzer='char', ngram_range=(2, 5), vocabulary=simple_vocab_dict,
lowercase=False)
tf_idf_vec.fit(["theaancatdog house", "bobthe builder", "dogisgooddog"])
@wexec.willump_execute()
def stack_sparse_tfidf(array_one, array_two, input_vect, tf_idf_vect):
transformed_result_one = input_vect.transform(array_one)
transformed_result_two = tf_idf_vect.transform(array_two)
combined_result = scipy.sparse.hstack([transformed_result_one, transformed_result_two], format="csr")
return combined_result
@wexec.willump_execute(num_workers=0)
def stack_sparse_then_linear_regression_tfidf(array_one, array_two, input_vect, tf_idf_vect):
transformed_result_one = input_vect.transform(array_one)
transformed_result_two = tf_idf_vect.transform(array_two)
combined_result = scipy.sparse.hstack([transformed_result_one, transformed_result_two], format="csr")
predicted_result = model.predict(combined_result)
return predicted_result
class StackingNodeTests(unittest.TestCase):
def test_sparse_stacking(self):
print("\ntest_sparse_stacking")
string_array = ["theaancatdog house", "bobthe builder"]
transformed_result = vectorizer.transform(string_array)
correct_result = scipy.sparse.hstack([transformed_result, transformed_result], format="csr").toarray()
sample_stack_sparse(string_array, vectorizer)
sample_stack_sparse(string_array, vectorizer)
weld_csr_matrix = sample_stack_sparse(string_array, vectorizer)
weld_matrix = weld_csr_matrix.toarray()
numpy.testing.assert_almost_equal(weld_matrix, correct_result)
def test_sparse_stacking_linear_model(self):
print("\ntest_sparse_stacking_linear_model")
model.coef_ = numpy.array([[0, 0.2, 0.3, 0.4, -0.5, 0.6, 0.2, 0.2, 0.3, 0.4, -0.5, 0.6]], dtype=numpy.float64)
array_one = ["dogdogdogdog house", "bobthe builder", "dog the the the the", "dog"]
array_two = ["dogdogdogdog house", "bobthe builder", "dog the the the", "dogthethe the the the the the"]
result_one = vectorizer.transform(array_one)
result_two = vectorizer.transform(array_two)
correct_result = model.predict(scipy.sparse.hstack([result_one, result_two], format="csr"))
stack_sparse_then_linear_regression(array_one, array_two, vectorizer)
stack_sparse_then_linear_regression(array_one, array_two, vectorizer)
weld_output = stack_sparse_then_linear_regression(array_one, array_two, vectorizer)
numpy.testing.assert_equal(weld_output, correct_result)
def test_sparse_stacking_linear_model_parallel_coalesce(self):
print("\ntest_sparse_stacking_linear_model_parallel_coalesce")
model.coef_ = numpy.array([[0, 0.2, 0.3, 0.4, -0.5, 0.6, 0.2, 0.2, 0.3, 0.4, -0.5, 0.6, 0.1,
0.2, 0.3, 0.4, 0.5, 0.6]], dtype=numpy.float64)
array_one = ["dogdogdogdog house", "bobthe builder", "dog the the the the", "dog"]
array_two = ["dogdogdogdog house", "bobthe builder", "dog the the the", "dogthethe the the the the the"]
array_three = ["dogdogdogdog house", "bobthe builder", "dog the the the the", "bbbbb"]
result_one = vectorizer.transform(array_one)
result_two = vectorizer.transform(array_two)
result_three = vectorizer.transform(array_three)
correct_result = model.predict(scipy.sparse.hstack([result_one, result_two, result_three], format="csr"))
stack_sparse_then_linear_regression_coalesce_parallel(array_one, array_two, array_three, vectorizer)
stack_sparse_then_linear_regression_coalesce_parallel(array_one, array_two, array_three, vectorizer)
weld_output = stack_sparse_then_linear_regression_coalesce_parallel(array_one, array_two, array_three,
vectorizer)
numpy.testing.assert_equal(weld_output, correct_result)
def test_sparse_stacking_tfidf(self):
print("\ntest_sparse_stacking_tfidf")
model.coef_ = numpy.array([[0, 0.2, 0.3, 0.4, -0.5, 0.6, 0.2, 0.2, 0.3, 0.4, -0.5, 0.6]], dtype=numpy.float64)
array_one = ["dogdogdogdog house", "bobthe builder", "dog the the the the", "dog", "bbbbb"]
array_two = ["dogdogdogdog house", "bobthe builder", "dog the the the", "dogthethe the the the the the", "bb"]
result_one = vectorizer.transform(array_one)
result_two = tf_idf_vec.transform(array_two)
correct_result = scipy.sparse.hstack([result_one, result_two], format="csr").toarray()
stack_sparse_tfidf(array_one, array_two, vectorizer, tf_idf_vec)
stack_sparse_tfidf(array_one, array_two, vectorizer, tf_idf_vec)
weld_csr_matrix = stack_sparse_tfidf(array_one, array_two, vectorizer, tf_idf_vec)
weld_matrix = weld_csr_matrix.toarray()
numpy.testing.assert_almost_equal(weld_matrix, correct_result)
def test_sparse_stacking_linreg_tfidf(self):
print("\ntest_sparse_stacking_linreg_tfidf")
model.coef_ = numpy.array([[0, 0.2, 0.3, 0.4, -0.5, 0.6, 0.2, 0.2, 0.3, 0.4, -0.5, 0.6]], dtype=numpy.float64)
array_one = ["dogdogdogdog house", "bobthe builder", "dog the the the the", "dog", "bbbbb"]
array_two = ["dogdogdogdog house", "bobthe builder", "dog the the the", "dogthethe the the the the the", "bb"]
result_one = vectorizer.transform(array_one)
result_two = tf_idf_vec.transform(array_two)
correct_result = model.predict(scipy.sparse.hstack([result_one, result_two], format="csr"))
stack_sparse_then_linear_regression_tfidf(array_one, array_two, vectorizer, tf_idf_vec)
stack_sparse_then_linear_regression_tfidf(array_one, array_two, vectorizer, tf_idf_vec)
weld_output = stack_sparse_then_linear_regression_tfidf(array_one, array_two, vectorizer, tf_idf_vec)
numpy.testing.assert_equal(weld_output, correct_result)
|
465450
|
import torch
import torch.nn as nn
from fewshots.models import register_model
from fewshots.models import r2d2, lrd2
from fewshots.utils import norm
def _norm(num_channels, bn_momentum, groupnorm=False):
if groupnorm:
return norm.GroupNorm(num_channels)
else:
return nn.BatchNorm2d(num_channels, momentum=bn_momentum)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class RRFeatures(nn.Module):
def __init__(self, x_dim, parameters, lrelu_slope, drop, groupnorm, bn_momentum):
super(RRFeatures, self).__init__()
self.features1 = nn.Sequential(
nn.Conv2d(x_dim, parameters[0], 3, padding=1),
_norm(parameters[0], bn_momentum, groupnorm=groupnorm),
nn.MaxPool2d(2, stride=2),
nn.LeakyReLU(lrelu_slope))
self.features2 = nn.Sequential(
nn.Conv2d(parameters[0], parameters[1], 3, padding=1),
_norm(parameters[1], bn_momentum, groupnorm=groupnorm),
nn.MaxPool2d(2, stride=2),
nn.LeakyReLU(lrelu_slope))
self.features3 = nn.Sequential(
nn.Conv2d(parameters[1], parameters[2], 3, padding=1),
_norm(parameters[2], bn_momentum, groupnorm=groupnorm),
nn.MaxPool2d(2, stride=2),
nn.LeakyReLU(lrelu_slope),
nn.Dropout(drop))
self.features4 = nn.Sequential(
nn.Conv2d(parameters[2], parameters[3], 3, padding=1),
_norm(parameters[3], bn_momentum, groupnorm=groupnorm),
nn.MaxPool2d(2, stride=1),
nn.LeakyReLU(lrelu_slope),
nn.Dropout(drop))
self.pool3 = nn.MaxPool2d(2, stride=1)
def forward(self, x):
x = self.features1(x)
x = self.features2(x)
x = self.features3(x)
x3 = self.pool3(x)
x3 = x3.view(x3.size(0), -1)
x = self.features4(x)
x4 = x.view(x.size(0), -1)
x = torch.cat((x3, x4), 1)
return x
@register_model('RRNet')
def load_rrnet(**kwargs):
lrelu = kwargs['lrelu']
drop = kwargs['drop']
groupnorm = kwargs['groupnorm']
bn_momentum = kwargs['bn_momentum']
out_dim = kwargs['out_dim']
debug = kwargs['debug']
learn_lambda = kwargs['learn_lambda']
init_lambda = kwargs['init_lambda']
init_adj_scale = kwargs['init_adj_scale']
lambda_base = kwargs['lambda_base']
adj_base = kwargs['adj_base']
n_augment = kwargs['n_augment']
linsys = kwargs['linsys']
method = kwargs['method']
iterations = kwargs['iterations']
dataset = kwargs['dataset']
if dataset == 'omniglot':
x_dim = 1
else:
x_dim = 3
parameters = [96, 192, 384, 512]
encoder = RRFeatures(x_dim, parameters, lrelu, drop, groupnorm, bn_momentum)
if method == 'R2D2':
return r2d2.RRNet(encoder, debug, out_dim, learn_lambda, init_lambda, init_adj_scale, lambda_base, adj_base,
n_augment, linsys)
else:
return lrd2.LRD2(encoder, debug, out_dim, learn_lambda, init_lambda, init_adj_scale, lambda_base, adj_base,
n_augment, iterations, linsys)
@register_model('RRNet_small')
def load_rrnet_small(**kwargs):
lrelu = kwargs['lrelu']
drop = kwargs['drop']
groupnorm = kwargs['groupnorm']
bn_momentum = kwargs['bn_momentum']
out_dim = kwargs['out_dim']
debug = kwargs['debug']
learn_lambda = kwargs['learn_lambda']
init_lambda = kwargs['init_lambda']
init_adj_scale = kwargs['init_adj_scale']
lambda_base = kwargs['lambda_base']
adj_base = kwargs['adj_base']
n_augment = kwargs['n_augment']
linsys = kwargs['linsys']
method = kwargs['method']
iterations = kwargs['iterations']
dataset = kwargs['dataset']
if dataset == 'omniglot':
x_dim = 1
else:
x_dim = 3
parameters = [64, 64, 64, 64]
encoder = RRFeatures(x_dim, parameters, lrelu, drop, groupnorm, bn_momentum)
if method == 'R2D2':
return r2d2.RRNet(encoder, debug, out_dim, learn_lambda, init_lambda, init_adj_scale, lambda_base, adj_base,
n_augment, linsys)
else:
return lrd2.LRD2(encoder, debug, out_dim, learn_lambda, init_lambda, init_adj_scale, lambda_base, adj_base,
n_augment, iterations, linsys)
|
465467
|
from dataclasses import dataclass
from typing import List
from osbenchmark.telemetry import Telemetry
@dataclass
class Node:
"""A representation of a node within a host"""
name: str
port: int
pid: int
root_dir: str
binary_path: str
log_path: str
heap_dump_path: str
data_paths: List[str]
telemetry: Telemetry
|
465486
|
from AC3utils import PLUGIN_BASE, PLUGIN_VERSION
from Components.ActionMap import NumberActionMap
from Components.Button import Button
from Components.ConfigList import ConfigListScreen
from Components.Label import Label
from Components.config import config, getConfigListEntry
from Screens.Screen import Screen
class AC3LipSyncSetup(ConfigListScreen, Screen):
skin = """
<screen position="center,center" size="560,400" title="AC3 Lip Sync Setup">
<ePixmap pixmap="~/img/button-red.png" position="0,0" zPosition="0" size="140,40" transparent="1" alphatest="on" />
<ePixmap pixmap="~/img/button-green.png" position="140,0" zPosition="0" size="140,40" transparent="1" alphatest="on" />
<ePixmap pixmap="~/img/button-yellow.png" position="280,0" zPosition="0" size="140,40" transparent="1" alphatest="on" />
<ePixmap pixmap="~/img/button-blue.png" position="420,0" zPosition="0" size="140,40" transparent="1" alphatest="on" />
<widget name="key_red" position="0,0" zPosition="1" size="140,40"
font="Regular;20" valign="center" halign="center" backgroundColor="#9f1313" transparent="1"
shadowColor="#000000" shadowOffset="-1,-1" />
<widget name="key_green" position="140,0" zPosition="1" size="140,40"
font="Regular;20" valign="center" halign="center" backgroundColor="#1f771f" transparent="1"
shadowColor="#000000" shadowOffset="-1,-1" />
<widget name="key_yellow" position="280,0" zPosition="1" size="140,40"
font="Regular;20" valign="center" halign="center" backgroundColor="#a08500" transparent="1"
shadowColor="#000000" shadowOffset="-1,-1" />
<widget name="key_blue" position="420,0" zPosition="1" size="140,40"
font="Regular;20" valign="center" halign="center" backgroundColor="#18188b" transparent="1"
shadowColor="#000000" shadowOffset="-1,-1" />
<widget name="config" position="10,40" size="540,320" scrollbarMode="showOnDemand" />
<widget name="PluginInfo" position="10,370" size="540,20" zPosition="4" font="Regular;18" foregroundColor="#cccccc" />
</screen>"""
def __init__(self, session, plugin_path):
Screen.__init__(self, session)
# Lets get a list of elements for the config list
self.list = [
getConfigListEntry(_("Outer Bound (+/-)"), config.plugins.AC3LipSync.outerBounds),
getConfigListEntry(_("Step in ms for arrow keys"), config.plugins.AC3LipSync.arrowStepSize),
getConfigListEntry(_("Wait time in ms before activation:"), config.plugins.AC3LipSync.activationDelay),
getConfigListEntry(_("Step in ms for keys '%s'") % ("1/3"), config.plugins.AC3LipSync.stepSize13),
getConfigListEntry(_("Step in ms for keys '%s'") % ("4/6"), config.plugins.AC3LipSync.stepSize46),
getConfigListEntry(_("Step in ms for keys '%s'") % ("7/9"), config.plugins.AC3LipSync.stepSize79),
getConfigListEntry(_("Step in ms for key %i") % (2), config.plugins.AC3LipSync.absoluteStep2),
getConfigListEntry(_("Step in ms for key %i") % (5), config.plugins.AC3LipSync.absoluteStep5),
getConfigListEntry(_("Step in ms for key %i") % (8), config.plugins.AC3LipSync.absoluteStep8)
]
ConfigListScreen.__init__(self, self.list)
self["config"].list = self.list
self.skin_path = plugin_path
# Plugin Information
self["PluginInfo"] = Label(_("Plugin: %(plugin)s , Version: %(version)s") %dict(plugin=PLUGIN_BASE,version=PLUGIN_VERSION))
# BUTTONS
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("Save"))
self["key_yellow"] = Button(_(" "))
self["key_blue"] = Button(" ")
self["setupActions"] = NumberActionMap(["SetupActions", "ColorActions"],
{
"save": self.save,
"cancel": self.cancel,
"green": self.save,
"red": self.cancel,
"ok": self.save,
}, -2)
def save(self):
for x in self.list:
x[1].save()
self.close()
def cancel(self):
for x in self["config"].list:
x[1].cancel()
self.close()
|
465537
|
from lib.mmonit import MmonitBaseAction
class MmonitDismissEvent(MmonitBaseAction):
def run(self, event_id):
self.login()
data = {"id": event_id}
self.session.post("{}/reports/events/dismiss".format(self.url), data=data)
self.logout()
return True
|
465561
|
import os
import subprocess
import pptx
from pptx.chart.data import ChartData
from pptx.enum.chart import XL_CHART_TYPE
from pptx.util import Inches
import pdfrw
from pd2ppt import df_to_table
import pandas as pd
def load_excel_files():
df_times = pd.read_excel("input_data/project_hours.xlsx")
df_expenses = pd.read_excel("input_data/project_expenses.xlsx")
df_rates = pd.read_excel("input_data/project_rates.xlsx")
return df_times, df_expenses, df_rates
def transform_excel(df_times, df_expenses, df_rates):
df_times_rate = df_times.merge(df_rates, how="outer", on="Person")
times_diff = df_times_rate["TimeStop"] - df_times_rate["TimeStart"]
df_times_rate["Cost"] = times_diff * df_times_rate["Rate"]
df_times_cost_pivot = df_times_rate.pivot_table(
values="Cost", index=["Project", "Person"]).reset_index()
df_times_cost_pivot["Cost Type"] = "hours"
df_expenses_pivot = df_expenses.pivot_table(
values="Cost", index=["Project", "Person"]).reset_index()
df_expenses_pivot["Cost Type"] = "expenses"
df_all_costs = pd.concat([df_expenses_pivot, df_times_cost_pivot], sort=False)
return df_times_cost_pivot, df_expenses_pivot, df_all_costs
def create_introsheet(workbook):
introsheet = workbook.add_worksheet("Introduction")
bold = workbook.add_format(
{'bold': True, "align": "right", "font_color": "blue"})
introsheet.write(0, 0, 'Title', bold)
intro_text = 'Overall Costs'
introsheet.write(0, 1, 'Overall Costs')
introsheet.set_column(1, 1, len(intro_text) + 5)
introsheet.insert_image(1, 0, "input_data/logo.jpg",
{'x_scale': 0.5, 'y_scale': 0.5})
def export_to_xlsx_sheets(df_times_cost_pivot, df_expenses_pivot, df_all_costs):
writer = pd.ExcelWriter('scrap_data/pandas_simple.xlsx')
df_all_costs.to_excel(writer, index=False, sheet_name='df_all_costs')
df_expenses_pivot.to_excel(writer, index=False, sheet_name='df_expenses_pivot')
df_times_cost_pivot.to_excel(writer, index=False, sheet_name='df_times_cost_pivot')
writer.close()
def create_sheets_from_pandas_intro(df_times_cost_pivot, df_expenses_pivot, df_all_costs):
writer = pd.ExcelWriter('scrap_data/pandas_simple_intro.xlsx',
engine='xlsxwriter')
workbook = writer.book
create_introsheet(workbook)
df_all_costs.to_excel(writer, index=False,
sheet_name='df_all_costs')
df_expenses_pivot.to_excel(writer, index=False,
sheet_name='df_expenses_pivot')
df_times_cost_pivot.to_excel(writer, index=False,
sheet_name='df_times_cost_pivot')
def create_pandas_by_hand_1(workbook, sheet_title, dataframe):
sheet = workbook.add_worksheet(sheet_title)
sheet.write_row(0, 0, dataframe.columns)
for i, row in enumerate(dataframe.values):
sheet.write_row(i + 1, 0, row)
def create_pandas_by_hand_2(workbook, sheet_title, dataframe):
sheet = workbook.add_worksheet(sheet_title)
large_text = workbook.add_format({'bold': True, "font_size": 14})
red_bold = workbook.add_format({'bold': True, "font_color": "red"})
sheet.write_row(0, 0, dataframe.columns, large_text)
for i, header in enumerate(dataframe.columns):
sheet.set_column(i, i, len(header) * 1.2 + 5)
percentile75 = dataframe["Cost"].describe()["75%"]
for i, row in enumerate(dataframe.values):
for i2, value in enumerate(row):
if i2 == 0:
if value > percentile75:
sheet.write_number(i + 1, i2, value, red_bold)
else:
sheet.write_number(i + 1, i2, value)
else:
sheet.write_string(i + 1, i2, value)
def create_pandas_by_hand_3(workbook, sheet_title, dataframe):
num_format = workbook.add_format({'num_format': "####.#"})
sheet = workbook.add_worksheet(sheet_title)
nrows, ncols = dataframe.shape
columns_desc = [{"header": v} for v in dataframe.columns]
sheet.add_table(0, 0, nrows, ncols - 1, {"data": dataframe.values,
"columns": columns_desc})
sheet.set_column(0, 0, 10, num_format)
conditional_options = {
'type': '3_color_scale',
"min_color": "green",
"mid_color": "yellow",
"max_color": "red"
}
sheet.conditional_format(1, 0, nrows, 0, conditional_options)
def create_chart_1(workbook, sheet_title, df_all_costs):
sheet = workbook.add_worksheet(sheet_title)
df_chart = df_all_costs.pivot_table(
values="Cost", index="Person", columns="Cost Type")
df_chart.reset_index(inplace=True)
sheet.write_row(0, 0, [s.upper() for s in df_chart.columns])
sheet.write_column(1, 0, df_chart['Person'])
sheet.write_column(1, 1, df_chart['expenses'])
sheet.write_column(1, 2, df_chart['hours'])
chart = workbook.add_chart({'type': 'column', 'subtype': 'stacked'})
chart.set_style(12)
nrows = df_chart.shape[0]
for i in [1, 2]:
chart.add_series({
'name': [sheet.get_name(), 0, i],
'categories': [sheet.get_name(), 1, 0, nrows, 0],
'values': [sheet.get_name(), 1, i, nrows, i]})
sheet.insert_chart('A8', chart, {'x_offset': 25, 'y_offset': 10})
def prepare_excel_xlsxwriter(df_all_costs, df_expenses_pivot, df_times_cost_pivot):
export_to_xlsx_sheets(df_times_cost_pivot, df_expenses_pivot, df_all_costs)
create_sheets_from_pandas_intro(df_times_cost_pivot, df_expenses_pivot, df_all_costs)
writer = pd.ExcelWriter('scrap_data/pandas_complex.xlsx', engine='xlsxwriter')
workbook = writer.book
create_pandas_by_hand_1(workbook, "All Costs", df_all_costs)
create_pandas_by_hand_2(workbook, "All Costs 2", df_all_costs)
create_pandas_by_hand_3(workbook, "All Costs 3", df_all_costs)
create_chart_1(workbook, "Sheet with Chart 1", df_all_costs)
def create_slide(presentation, title, layout=5):
layout = presentation.slide_layouts[5]
slide = presentation.slides.add_slide(layout)
if title is not None:
slide_title = slide.shapes.title
slide_title.text = title
return slide
def prepare_pptx(df_all_costs):
create_presentation_1()
presentation = pptx.Presentation("input_data/template.pptx")
slide = create_slide(presentation, "Introduction")
create_intro_slide_with_graphic(slide)
slide = create_slide(presentation, "Data Table")
create_table_slide(df_all_costs, slide)
slide = create_slide(presentation, "Charts")
create_chart_slide(df_all_costs, slide)
presentation.save('./output_data/test.pptx')
def prepare_pptx_and_convert(df_all_costs, pptx_filename, export_format="pdf"):
presentation_plain = pptx.Presentation("input_data/template_plain.pptx")
slide = create_slide(presentation_plain, "Charts")
create_chart_slide(df_all_costs, slide)
presentation_plain.save(pptx_filename)
libre_office_binary = "/Applications/LibreOffice.app/Contents/MacOS/soffice"
cmd = [libre_office_binary, "--headless", "--convert-to", export_format,
"--outdir", os.path.dirname(pptx_filename),
pptx_filename]
subprocess.run(cmd, check=True)
def combine_pdf(pptx_filename):
pdf_filename = pptx_filename.replace(".pptx", ".pdf")
pdf_report_pages = pdfrw.PdfReader(pdf_filename).pages
pdf_template_pages = pdfrw.PdfReader('input_data/pdf_template.pdf').pages
outdata = pdfrw.PdfWriter('output_data/plain_with_template.pdf')
outdata.addpage(pdf_template_pages[0])
outdata.addpages(pdf_report_pages)
outdata.addpage(pdf_template_pages[1])
outdata.write()
def create_chart_slide(df_all_costs, slide):
df_chart = df_all_costs.pivot_table(values="Cost",
index="Person", columns="Cost Type")
df_chart.reset_index(inplace=True)
chart_data = ChartData()
chart_data.categories = list(df_chart['Person'])
chart_data.add_series('Expenses', list(df_chart["expenses"]))
chart_data.add_series('Hours', list(df_chart["hours"]))
CHART_TYPE = XL_CHART_TYPE.COLUMN_CLUSTERED
chart_left = Inches(1);
chart_top = Inches(2)
chart_width = Inches(12);
chart_height = Inches(4)
chart = slide.shapes.add_chart(CHART_TYPE, chart_left, chart_top,
chart_width, chart_height, chart_data).chart
chart.has_legend = True
chart.legend.include_in_layout = False
def create_table_slide(df_all_costs, slide):
table_left = Inches(1);
table_top = Inches(2)
table_width = Inches(12);
table_height = Inches(4)
df_to_table(slide, df_all_costs, table_left, table_top,
table_width, table_height)
def create_intro_slide_with_graphic(slide):
left = width = height = Inches(1)
top = Inches(2)
txBox = slide.shapes.add_textbox(left, top, width, height)
tf = txBox.text_frame
tf.text = "A Short but meaningful text for the slide"
top = Inches(4)
slide.shapes.add_picture("./input_data/logo.jpg", left, top)
def create_presentation_1():
presentation = pptx.Presentation("input_data/template.pptx")
title_slide_layout = presentation.slide_layouts[0]
slide = presentation.slides.add_slide(title_slide_layout)
title = slide.shapes.title
title.text = "Meaningful Title"
subtitle = slide.placeholders[1]
subtitle.text = "Some text for the placeholder defined in the layout"
presentation.save("./output_data/presentation_1.pptx")
def prepare_pdf(df_all_costs):
pptx_filename = './output_data/plain.pptx'
prepare_pptx_and_convert(df_all_costs, pptx_filename)
combine_pdf(pptx_filename)
def main():
# Just load the data from Excel files and rename some columns
df_times, df_expenses, df_rates = load_excel_files()
# Build some Pivot tables, because everybody _loves_ pivot tables
df_times_cost_pivot, df_expenses_pivot, df_all_costs = transform_excel(df_times, df_expenses, df_rates)
# Create the different versions of Excel file, in increasing order of colorfulness...
prepare_excel_xlsxwriter(df_all_costs, df_expenses_pivot, df_times_cost_pivot)
# Prepare a PPTX, based on the pivots and an existing PPTX 'template'
prepare_pptx(df_all_costs)
# Finally, create a version of the PPTX to turn into a PDF via Libreoffice, and process the resulting file
# with Python
prepare_pdf(df_all_costs)
if __name__ == '__main__':
main()
|
465632
|
import os
import math
import numpy as np
root_path = '/home/project/I3D/data/Kinetics/train_256'
num_frames = 16
data_list = []
id_list = []
label_list = []
erro_data = []
label = 0
id = 0
for file_path in sorted(os.listdir(root_path)):
for video_path in sorted(os.listdir(os.path.join(root_path, file_path))):
frame_num = len(os.listdir(os.path.join(root_path, file_path, video_path)))
print('Process: ' + os.path.join(root_path, file_path, video_path), frame_num)
if frame_num > 0:
data_list.append(os.path.join(root_path, file_path, video_path))
id_list.append(id)
label_list.append(label)
id += 1
else:
erro_data.append(os.path.join(root_path, file_path, video_path))
label += 1
if label == 100:
break
print(erro_data)
print(len(data_list))
print(len(id_list))
print(len(label_list))
np.save('./train_data_list_%d.npy'%label, data_list)
np.save('./train_label_list_%d.npy'%label, label_list)
|
465646
|
r"""Distributed TensorFlow with Monitored Training Session.
This implements the 1a image recognition benchmark task, see https://mlbench.readthedocs.io/en/latest/benchmark-tasks.html#a-image-classification-resnet-cifar-10
for more details
Adapted from official tutorial::
https://www.tensorflow.org/deploy/distributed
Launch::
mpirun -n 3 --allow-run-as-root python ....
"""
import argparse
import logging
import os
import tensorflow as tf
from mlbench_core.controlflow.tensorflow.train_validation import train_round, \
validation_round
from mlbench_core.dataset.imagerecognition.tensorflow.cifar10 import \
DatasetCifar
from mlbench_core.evaluation.goals import task1_time_to_accuracy_light_goal, \
task1_time_to_accuracy_goal
from mlbench_core.evaluation.tensorflow.criterion import \
softmax_cross_entropy_with_logits_v2_l2_regularized
from mlbench_core.evaluation.tensorflow.metrics import TopKAccuracy
from mlbench_core.lr_scheduler.tensorflow.lr import manual_stepping
from mlbench_core.models.tensorflow.resnet_model import Cifar10Model
from mlbench_core.utils import Tracker
def define_graph(inputs, labels, is_training, batch_size, replicas_to_aggregate):
"""
Define graph for synchronized training.
"""
model = Cifar10Model(
resnet_size=20,
data_format='channels_last',
resnet_version=2,
dtype=tf.float32)
logits = model(inputs, is_training)
loss = softmax_cross_entropy_with_logits_v2_l2_regularized(
logits=logits,
labels=labels,
l2=2e-4,
# Exclude BN weights from L2 regularizer
loss_filter_fn=lambda name: 'batch_normalization' not in name)
# Use Top K accuracy as metrics
metrics = [
TopKAccuracy(logits, labels, topk=1),
TopKAccuracy(logits, labels, topk=5),
]
global_step = tf.train.get_or_create_global_step()
# scheduling learning steps.
lr_scheduler = manual_stepping(
global_step=global_step,
boundaries=[32000 // replicas_to_aggregate,
48000 // replicas_to_aggregate],
rates=[0.1, 0.01, 0.001],
warmup=False)
# Define the optimizer
optimizer_ = tf.train.MomentumOptimizer(
learning_rate=lr_scheduler,
momentum=0.9,
use_nesterov=True)
# Wrap optimizer with `SyncReplicasOptimizer`
optimizer = tf.train.SyncReplicasOptimizer(
optimizer_,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=replicas_to_aggregate)
hooks = [
optimizer.make_session_run_hook((rank == 0), num_tokens=0)
]
# The update for batch normalization.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Not all of the processes contribute one update. Some faster procs can push more updates.
grads_and_vars = list(optimizer.compute_gradients(
loss, tf.trainable_variables()))
train_op = optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
return train_op, loss, metrics, hooks
def main(is_ps, run_id, rank, world_size, cluster_spec, batch_size,
replicas_to_aggregate, light_target=False):
logging.info("Initial.")
job_name = "ps" if is_ps else "worker"
cluster = tf.train.ClusterSpec(cluster_spec)
gpu_options = tf.GPUOptions(allow_growth=True,
per_process_gpu_memory_fraction=0.2)
session_conf = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True,
log_device_placement=False)
server = tf.train.Server(
cluster, job_name=job_name, task_index=rank, config=session_conf)
if is_ps:
server.join()
else:
# Pin variables to parameter server.
device_fn = tf.train.replica_device_setter(
ps_tasks=None,
ps_device="/job:ps",
worker_device="/job:{}/task:{}/device:GPU:{}".format(
job_name, rank, rank),
merge_devices=True,
cluster=cluster,
ps_ops=None,
ps_strategy=None)
with tf.Graph().as_default():
with tf.device(device_fn):
data_loader = DatasetCifar(
dataset='cifar-10',
dataset_root='/datasets',
batch_size=batch_size,
world_size=world_size,
rank=rank,
seed=42,
tf_dtype=tf.float32)
train_op, loss, metrics, hooks = define_graph(
data_loader.inputs,
data_loader.labels,
data_loader.training,
batch_size,
replicas_to_aggregate)
local_init_op = tf.group(
tf.local_variables_initializer(),
data_loader.train_init_op,
data_loader.validation_init_op)
scaffold = tf.train.Scaffold(
init_op=None,
init_feed_dict=None,
init_fn=None,
ready_op=None,
ready_for_local_init_op=None,
local_init_op=local_init_op)
lr_tensor_name = tf.get_default_graph().get_tensor_by_name("learning_rate:0")
with tf.train.MonitoredTrainingSession(config=session_conf,
master=server.target,
scaffold=scaffold,
is_chief=(rank == 0),
checkpoint_dir=None,
save_checkpoint_secs=None,
save_summaries_steps=None,
stop_grace_period_secs=5,
hooks=hooks) as sess:
logging.info("Begin training.")
final_epoch = 164
if light_target:
goal = task1_time_to_accuracy_light_goal()
else:
goal = task1_time_to_accuracy_goal()
tracker = Tracker(metrics, run_id, rank, goal=goal)
tracker.start()
for i_epoch in range(final_epoch):
logging.debug("=> Epoch {}".format(i_epoch))
train_round(sess, data_loader.train_init_op, train_op,
loss, metrics, batch_size,
data_loader.num_batches_per_epoch_for_train,
tracker, lr_tensor=lr_tensor_name,
lr_scheduler_level='epoch')
validation_round(sess, data_loader.validation_init_op,
loss, metrics, batch_size,
data_loader.num_batches_per_epoch_for_eval,
tracker)
tracker.epoch_end()
if tracker.goal_reached:
print("Goal Reached!")
return
logging.info("Finish.")
def configure_logger(log_dir, is_ps, rank):
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'{:6} rank={} : %(message)s'.format("ps" if is_ps else "worker", rank),
"%Y-%m-%d %H:%M:%S")
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
log_name = '{}-{}.log'.format("ps" if is_ps else "worker", rank)
log_name = os.path.join(log_dir, log_name)
if os.path.exists(log_name):
os.remove(log_name)
fh = logging.FileHandler(log_name)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process run parameters')
parser.add_argument('--run_id', type=str, help='The id of the run')
parser.add_argument('--hosts', type=str, help='The hosts participating in this run')
parser.add_argument('--light', action='store_true', default=False,
help='Train to light target metric goal')
args = parser.parse_args()
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
hosts = args.hosts.split(",")
if len(hosts) < 2:
raise ValueError("At least 2 pods are needed for this benchmark (1 parameter server, 1 worker)")
workers = [h + ":22222" for h in hosts[1:]]
ps = hosts[0] + ":22222" # First worker is the parameter server
cluster_spec = {"worker": workers,
"ps": [ps]}
# Parse role in the cluster by rank.
is_ps = rank < len(cluster_spec['ps'])
rank = rank if is_ps else rank - len(cluster_spec['ps'])
world_size = size - len(cluster_spec['ps'])
# Configure Logging
if not os.path.exists('/mlbench'):
os.makedirs('/mlbench')
configure_logger('/mlbench', is_ps, rank)
batch_size = 128
replicas_to_aggregate = len(cluster_spec['worker'])
main(is_ps, args.run_id, rank, world_size, cluster_spec,
batch_size, replicas_to_aggregate, light_target=args.light)
|
465674
|
import pandas as pd
from pyarc.qcba.data_structures import QuantitativeDataFrame
from pyids.ids_classifier import IDS, mine_IDS_ruleset
from pyids.ids_ruleset import IDSRuleSet
from pyids.rule_mining import RuleMiner
from pyids.model_selection import CoordinateAscentOptimizer, train_test_split_pd
df = pd.read_csv("../../data/titanic.csv")
ids_rules = mine_IDS_ruleset()
ids_ruleset = IDSRuleSet.from_cba_rules(cars)
df_train, df_test = train_test_split_pd(df, prop=0.25)
quant_df_train, quant_df_test = QuantitativeDataFrame(df_train), QuantitativeDataFrame(df_test)
coordinate_ascent = CoordinateAscentOptimizer(IDS(), maximum_delta_between_iterations=200, maximum_score_estimation_iterations=10, ternary_search_precision=20, maximum_consecutive_iterations=20)
lambda_array = coordinate_ascent.fit(ids_ruleset, quant_df_train, quant_df_test)
print(lambda_array)
with open("results/coordinate_ascent_lambda_array.txt", "w") as file:
file.write(str(lambda_array))
|
465718
|
import time
from collections import deque
import gym
class RecordEpisodeStatistics(gym.Wrapper):
def __init__(self, env, deque_size=100):
super().__init__(env)
self.t0 = time.perf_counter()
self.episode_return = 0.0
self.episode_horizon = 0
self.return_queue = deque(maxlen=deque_size)
self.horizon_queue = deque(maxlen=deque_size)
def reset(self, **kwargs):
observation = super().reset(**kwargs)
self.episode_return = 0.0
self.episode_horizon = 0
return observation
def step(self, action):
observation, reward, done, info = super().step(action)
self.episode_return += reward
self.episode_horizon += 1
if done:
info['episode'] = {'return': self.episode_return,
'horizon': self.episode_horizon,
'time': round(time.perf_counter() - self.t0, 4)}
self.return_queue.append(self.episode_return)
self.horizon_queue.append(self.episode_horizon)
self.episode_return = 0.0
self.episode_horizon = 0
return observation, reward, done, info
|
465740
|
import asyncio
from asyncio.futures import Future
from typing import List
from PIL import Image
from io import BytesIO
from importlib import resources
from datetime import datetime
from collections import defaultdict
from .base_rust_api import BaseRustSocket
from .structures import RustTime, RustInfo, RustMap, RustMarker, RustChatMessage, RustTeamInfo, RustTeamMember, RustTeamNote, RustEntityInfo, RustContents, RustItem
from .remote.rustplus_pb2 import *
from .remote import HeartBeat
from ..commands import CommandOptions
from ..exceptions import *
from ..utils import *
class RustSocket(BaseRustSocket):
def __init__(self, ip: str = None, port: str = None, steamid: int = None, playertoken: int = None, command_options : CommandOptions = None, raise_ratelimit_exception : bool = True, ratelimit_limit : int = 25, ratelimit_refill : int = 3) -> None:
super().__init__(ip=ip, port=port, steamid=steamid, playertoken=playertoken, command_options=command_options, raise_ratelimit_exception=raise_ratelimit_exception, ratelimit_limit=ratelimit_limit, ratelimit_refill=ratelimit_refill, heartbeat=HeartBeat(self))
def entity_event(self, eid):
"""
Decorator to register a smart device listener
"""
def wrap_func(coro):
def entity_event_callback(future : Future):
try:
entity_info : RustEntityInfo = future.result()
self.remote.event_handler.register_event(eid, (coro, loop, entity_info.type))
except:
raise SmartDeviceRegistrationError("Not Found")
loop = asyncio.get_event_loop()
future = asyncio.run_coroutine_threadsafe(self.get_entity_info(eid), loop)
future.add_done_callback(entity_event_callback)
return wrap_func
async def get_time(self) -> RustTime:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTime.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
response = await self.remote.get_response(app_request.seq, app_request)
return format_time(response)
async def send_team_message(self, message: str) -> None:
await self._handle_ratelimit(2)
app_send_message = AppSendMessage()
app_send_message.message = message
app_request = self._generate_protobuf()
app_request.sendTeamMessage.CopyFrom(app_send_message)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def get_info(self) -> RustInfo:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
response = await self.remote.get_response(app_request.seq, app_request)
return RustInfo(response.response.info)
async def get_team_chat(self) -> List[RustChatMessage]:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTeamChat.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
messages = (await self.remote.get_response(app_request.seq, app_request)).response.teamChat.messages
return [RustChatMessage(message) for message in messages]
async def get_team_info(self):
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTeamInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustTeamInfo(app_message.response.teamInfo)
async def get_markers(self) -> List[RustMarker]:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getMapMarkers.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return [RustMarker(marker) for marker in app_message.response.mapMarkers.markers]
async def get_raw_map_data(self) -> RustMap:
await self._handle_ratelimit(5)
app_request = self._generate_protobuf()
app_request.getMap.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustMap(app_message.response.map)
async def get_map(self, add_icons: bool = False, add_events: bool = False, add_vending_machines: bool = False, override_images: dict = {}) -> Image:
MAPSIZE = int((await self.get_info()).size)
await self._handle_ratelimit(5 + 1 if [add_icons, add_events, add_vending_machines].count(True) >= 1 else 0)
app_request = self._generate_protobuf()
app_request.getMap.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
map = app_message.response.map
monuments = list(map.monuments)
try:
image = Image.open(BytesIO(map.jpgImage))
except:
raise ImageError("Invalid bytes for the image")
image = image.crop((500,500,map.height-500,map.width-500))
map = image.resize((MAPSIZE,MAPSIZE), Image.ANTIALIAS)
if add_icons or add_events or add_vending_machines:
mapMarkers = await self.get_markers()
if add_icons:
for monument in monuments:
if str(monument.token) == "DungeonBase":
continue
icon = convert_monument(monument.token, override_images)
if monument.token in override_images:
icon = icon.resize((150, 150))
if str(monument.token) == "train_tunnel_display_name":
icon = icon.resize((100, 125))
map.paste(icon, (format_cood(int(monument.x), int(monument.y), MAPSIZE)), icon)
if add_vending_machines:
with resources.path("rustplus.api.icons", "vending_machine.png") as path:
vendingMachine = Image.open(path).convert("RGBA")
vendingMachine = vendingMachine.resize((100, 100))
for marker in mapMarkers:
if add_events:
if marker.type == 2 or marker.type == 4 or marker.type == 5 or marker.type == 6:
icon = convert_marker(str(marker.type), marker.rotation)
if marker.type == 6:
x = marker.x
y = marker.y
if y > MAPSIZE: y = MAPSIZE
if y < 0: y = 100
if x > MAPSIZE: x = MAPSIZE - 75
if x < 0: x = 50
map.paste(icon, (int(x), MAPSIZE - int(y)), icon)
else:
map.paste(icon, (format_cood(int(marker.x), int(marker.y), MAPSIZE)), icon)
if add_vending_machines and marker.type == 3:
map.paste(vendingMachine, (int(marker.x) - 50, MAPSIZE - int(marker.y) - 50), vendingMachine)
return map.resize((2000, 2000), Image.ANTIALIAS)
async def get_entity_info(self, eid: int = None) -> RustEntityInfo:
await self._handle_ratelimit()
if eid is None:
raise ValueError("EID cannot be None")
app_request = self._generate_protobuf()
app_request.entityId = eid
app_request.getEntityInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustEntityInfo(app_message.response.entityInfo)
async def _update_smart_device(self, eid : int, value : bool) -> None:
await self._handle_ratelimit()
entityValue = AppSetEntityValue()
entityValue.value = value
app_request = self._generate_protobuf()
app_request.entityId = eid
app_request.setEntityValue.CopyFrom(entityValue)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def turn_on_smart_switch(self, eid: int = None) -> None:
if eid is None:
raise ValueError("EID cannot be None")
await self._update_smart_device(eid, True)
async def turn_off_smart_switch(self, eid: int = None) -> None:
if eid is None:
raise ValueError("EID cannot be None")
await self._update_smart_device(eid, False)
async def promote_to_team_leader(self, steamid: int = None) -> None:
if steamid is None:
raise ValueError("SteamID cannot be None")
await self._handle_ratelimit()
leaderPacket = AppPromoteToLeader()
leaderPacket.steamId = steamid
app_request = self._generate_protobuf()
app_request.promoteToLeader.CopyFrom(leaderPacket)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def get_current_events(self) -> List[RustMarker]:
return [marker for marker in (await self.get_markers()) if marker.type == 2 or marker.type == 4 or marker.type == 5 or marker.type == 6]
async def get_tc_storage_contents(self, eid: int = None, combine_stacks: bool = False) -> RustContents:
if eid is None:
raise ValueError("EID cannot be None")
returnedData = await self.get_entity_info(eid)
targetTime = datetime.utcfromtimestamp(int(returnedData.protectionExpiry))
difference = targetTime - datetime.utcnow()
items = []
for item in returnedData.items:
items.append(RustItem(translate_id_to_stack(item.itemId), item.itemId, item.quantity, item.itemIsBlueprint))
if combine_stacks:
mergedMap = defaultdict(tuple)
for item in items:
data = mergedMap[str(item.itemId)]
if data:
count = int(data[0]) + int(item.quantity)
mergedMap[str(item.itemId)] = (count, bool(item.isBlueprint))
else:
mergedMap[str(item.itemId)] = (int(item.quantity), bool(item.isBlueprint))
items = []
for key in mergedMap.keys():
items.append(RustItem(translate_id_to_stack(key), key, int(mergedMap[key][0]), bool(mergedMap[key][1])))
return RustContents(difference, bool(returnedData.hasProtection), items)
|
465752
|
class Node:
def __init__(self, parent, rank=0, size=1):
self.parent = parent
self.rank = rank
self.size = size
def __repr__(self):
return '(parent=%s, rank=%s, size=%s)' % (self.parent, self.rank, self.size)
class Forest:
def __init__(self, num_nodes):
self.nodes = [Node(i) for i in range(num_nodes)]
self.num_sets = num_nodes
def size_of(self, i):
return self.nodes[i].size
def find(self, n):
temp = n
while temp != self.nodes[temp].parent:
temp = self.nodes[temp].parent
self.nodes[n].parent = temp
return temp
def merge(self, a, b):
if self.nodes[a].rank > self.nodes[b].rank:
self.nodes[b].parent = a
self.nodes[a].size = self.nodes[a].size + self.nodes[b].size
else:
self.nodes[a].parent = b
self.nodes[b].size = self.nodes[b].size + self.nodes[a].size
if self.nodes[a].rank == self.nodes[b].rank:
self.nodes[b].rank = self.nodes[b].rank + 1
self.num_sets = self.num_sets - 1
def print_nodes(self):
for node in self.nodes:
print(node)
def create_edge(img, width, x, y, x1, y1, diff):
vertex_id = lambda x, y: y * width + x
w = diff(img, x, y, x1, y1)
return (vertex_id(x, y), vertex_id(x1, y1), w)
def build_graph(img, width, height, diff, neighborhood_8=False):
graph_edges = []
for y in range(height):
for x in range(width):
if x > 0:
graph_edges.append(create_edge(img, width, x, y, x - 1, y, diff))
if y > 0:
graph_edges.append(create_edge(img, width, x, y, x, y - 1, diff))
if neighborhood_8:
if x > 0 and y > 0:
graph_edges.append(create_edge(img, width, x, y, x - 1, y - 1, diff))
if x > 0 and y < height - 1:
graph_edges.append(create_edge(img, width, x, y, x - 1, y + 1, diff))
return graph_edges
def remove_small_components(forest, graph, min_size):
for edge in graph:
a = forest.find(edge[0])
b = forest.find(edge[1])
if a != b and (forest.size_of(a) < min_size or forest.size_of(b) < min_size):
forest.merge(a, b)
return forest
def segment_graph(graph_edges, num_nodes, const, min_size, threshold_func):
# Step 1: initialization
forest = Forest(num_nodes)
weight = lambda edge: edge[2]
sorted_graph = sorted(graph_edges, key=weight)
threshold = [ threshold_func(1, const) for _ in range(num_nodes) ]
# Step 2: merging
for edge in sorted_graph:
parent_a = forest.find(edge[0])
parent_b = forest.find(edge[1])
a_condition = weight(edge) <= threshold[parent_a]
b_condition = weight(edge) <= threshold[parent_b]
if parent_a != parent_b and a_condition and b_condition:
forest.merge(parent_a, parent_b)
a = forest.find(parent_a)
threshold[a] = weight(edge) + threshold_func(forest.nodes[a].size, const)
return remove_small_components(forest, sorted_graph, min_size)
|
465755
|
from __future__ import (
absolute_import,
unicode_literals,
)
import functools
def decorated(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
|
465793
|
import asyncio
from ..pool import ConnectionPool, ClosedPool, EmptyPool
from .aioconnection import AIOLDAPConnection
MYPY = False
if MYPY:
from ..ldapclient import LDAPClient
class AIOPoolContextManager:
def __init__(self, pool, *args, **kwargs):
self.pool = pool
self.__conn = None
async def __aenter__(self):
if self.pool.closed:
await self.pool.open()
self.__conn = await self.pool.get()
return self.__conn
async def __aexit__(self, type, value, traceback):
await self.pool.put(self.__conn)
class AIOConnectionPool(ConnectionPool):
"""
A connection pool that can be used with asnycio tasks. It's inherited from
:class:`bonsai.pool.ConnectionPool`.
:param LDAPClient client: the :class:`bonsai.LDAPClient` that's used to create
connections.
:param int minconn: the minimum number of connections that's created
after the pool is opened.
:param int maxconn: the maximum number of connections in the pool.
:param \\*\\*kwargs: additional keyword arguments that are passed to
the :meth:`bonsai.LDAPClient.connect` method.
:raises ValueError: when the minconn is negative or the maxconn is less
than the minconn.
"""
def __init__(
self,
client: "LDAPClient",
minconn: int = 1,
maxconn: int = 10,
loop=None,
**kwargs
):
super().__init__(client, minconn, maxconn, **kwargs)
self._loop = loop
try:
# The loop parameter is deprecated since 3.8, removed in 3.10
# and it raises TypeError.
self._lock = asyncio.Condition(loop=self._loop)
except TypeError:
self._lock = asyncio.Condition()
async def open(self) -> None:
async with self._lock:
for _ in range(
self._minconn - self.idle_connection - self.shared_connection
):
conn = await self._client.connect(
is_async=True, loop=self._loop, **self._kwargs
)
self._idles.add(conn)
self._closed = False
async def get(self) -> AIOLDAPConnection:
async with self._lock:
if self._closed:
raise ClosedPool("The pool is closed.")
await self._lock.wait_for(lambda: not self.empty or self._closed)
try:
conn = self._idles.pop()
except KeyError:
if len(self._used) < self._maxconn:
conn = await self._client.connect(
is_async=True, loop=self._loop, **self._kwargs
)
else:
raise EmptyPool("Pool is empty.") from None
self._used.add(conn)
self._lock.notify()
return conn
async def put(self, conn: AIOLDAPConnection) -> None:
async with self._lock:
super().put(conn)
self._lock.notify()
async def close(self) -> None:
async with self._lock:
super().close()
self._lock.notify_all()
def spawn(self, *args, **kwargs):
return AIOPoolContextManager(self, *args, **kwargs)
|
465805
|
import json
import copy
import requests
import datetime
from logUtils import *
# Log file location
_logFilePath = r"D:/Temp/Logging/createViews_[date].log"
# ArcGIS Online
_sourceFLUrl = "[YOUR-FEATURE-LAYER-URL]"
_uniqueValueField = "PROVINCIE"
_username = "[YOUR-USERNAME]"
_password = "[<PASSWORD>]"
# Sript parameters
_viewServiceProperties = ["serviceDescription", "hasStaticData","maxRecordCount", "supportedQueryFormats", "capabilities", "description",
"copyrightText", "spatialReference", "initialExtent", "allowGeometryUpdates", "units", "xssPreventionInfo"]
_viewLayerProperties = ["currentVersion", "id", "name", "type", "displayField", "extent"]
# Global parameters
_token = None
_tokenExpires = None
_sourceService = None
_sourceServiceData = None
_sourceLayer = None
_sourceItem = None
def main():
# Start logging
ConfigureLogging(_logFilePath, level="INFO")
LogInfo("Script started")
# Read source service information
readSourceData()
# Get unique values
uniqueValues = getUniqueValues()
# Loop through all unique values and create a view for it
for uniqueValue in uniqueValues:
createViewForUniqueValue(uniqueValue)
LogInfo("Script completed")
def getUniqueValues():
"""Get the unique values from the Feature Service"""
queryUrl = f"{_sourceFLUrl}/query"
queryParams = {}
queryParams["where"] = "1 = 1"
queryParams["outFields"] = _uniqueValueField
queryParams["returnDistinctValues"] = True
queryParams["returnGeometry"] = False
queryResponse = sendRequest(queryUrl, queryParams)
uniqueValues = [feature["attributes"][_uniqueValueField] for feature in queryResponse["features"]]
return uniqueValues
def createViewForUniqueValue(uniqueValue):
"""Create a new View from the source layer"""
# Create json with view information using source service
viewJson = {}
# Generate a unique name for the view (fs name + unique value)
viewJson["name"] = f"{_sourceFLUrl.split('/')[-3]} {uniqueValue}"
for serviceProperty in _viewServiceProperties:
viewJson[serviceProperty] = _sourceService[serviceProperty]
# CreateService
LogInfo(f"Creating Service for {uniqueValue}")
createServiceUrl = f"https://www.arcgis.com/sharing/rest/content/users/{_username}/createService"
createServiceParams = {}
createServiceParams["isView"] = True
createServiceParams["outputType"] = "featureService"
createServiceParams["createParameters"] = json.dumps(viewJson)
createServiceResponse = sendRequest(createServiceUrl, createServiceParams)
# If service is created successfull
if createServiceResponse["success"] == True:
viewItemID = createServiceResponse["itemId"]
viewItemUrl = f"https://www.arcgis.com/sharing/rest/content/users/{_username}/items/{viewItemID}"
viewServiceUrl = createServiceResponse["serviceurl"]
# Add layer to View definition
LogInfo("AddToDefinition View")
adminServiceUrl = viewServiceUrl.replace("rest/services", "rest/admin/services")
addToDefinitionUrl = f"{adminServiceUrl}/addToDefinition"
sourceLayerJson = {}
for layerProperty in _viewLayerProperties:
sourceLayerJson[layerProperty] = json.dumps(_sourceLayer[layerProperty])
sourceLayerJson["url"] = _sourceFLUrl
sourceLayerJson["adminLayerInfo"] = {
"viewLayerDefinition": {
"sourceServiceName": sourceLayerJson["url"].split("/")[-3],
"sourceLayerId": 0,
"sourceLayerFields": "*"
}
}
addToDefinitionParams = {}
addToDefinitionParams["addToDefinition"] = json.dumps({
"layers": [sourceLayerJson]
})
addToDefinitionResponse = sendRequest(addToDefinitionUrl, addToDefinitionParams)
if addToDefinitionResponse["success"] == False:
LogException(f"Unable to add layer to view for {uniqueValue}: {addToDefinitionResponse['error']['message']}")
# Update _sourceServiceData for current unique value
uniqueValueData = editViewData(uniqueValue)
# Update View
LogInfo("Update View")
updateViewUrl = f"{viewItemUrl}/update"
updateViewParams = {}
updateViewParams["title"] = viewJson["name"]
updateViewParams["id"] = viewItemID
updateViewParams["text"] = json.dumps(uniqueValueData)
updateViewResponse = sendRequest(updateViewUrl, updateViewParams)
if updateViewResponse["success"] == False:
LogException(f"Unable to update view for {uniqueValue}: {updateViewResponse['error']['message']}")
# Share View
LogInfo("Share View")
shareViewUrl = f"{viewItemUrl}/share"
shareViewParams = {}
shareViewParams["everyone"] = True
shareViewResponse = sendRequest(shareViewUrl, shareViewParams)
if len(shareViewResponse["notSharedWith"]) > 0:
LogException(f"Unable to share view for {uniqueValue}")
else:
LogException(f"Could not create view for {uniqueValue}: {createServiceResponse['error']['message']}")
return viewItemID, viewServiceUrl
def editViewData(uniqueValue):
"""Edit the source service data for the current unique value"""
# Create a copy from the source service data
uniqueValueData = copy.deepcopy(_sourceServiceData)
# Change service data to use current unique value information (assumes a string value )
uniqueValueData["layers"][0]["layerDefinition"]["definitionExpression"] = f"{_uniqueValueField} = '{uniqueValue}'"
return uniqueValueData
def readSourceData():
"""Read information from the source Feature Service"""
global _sourceService
global _sourceServiceData
global _sourceLayer
global _sourceItem
LogInfo("Loading source data and information into memory")
# Feature Service Definition
_sourceService = sendRequest(_sourceFLUrl[:-2])
# Feature Layer (0) Definition
sourceLayerUrl = _sourceFLUrl
_sourceLayer = sendRequest(sourceLayerUrl)
# Item Description
sourceItemUrl = f"https://www.arcgis.com/sharing/rest/content/items/{_sourceService['serviceItemId']}"
_sourceItem = sendRequest(sourceItemUrl)
# Item Data
sourceDataUrl = f"{sourceItemUrl}/data"
_sourceServiceData = sendRequest(sourceDataUrl)
def checkToken():
"""Check if ArcGIS token is still valid, if not, retrieve new one"""
global _token
global _tokenExpires
# If token is expired, or not yet created, generate a new token
if not _tokenExpires or _tokenExpires < (datetime.datetime.now().timestamp() * 1000) + 3600:
portalUrl = "https://www.arcgis.com"
tokenURL = "{}/sharing/generateToken".format(portalUrl)
tokenParams = {'username':_username,'password': <PASSWORD>,'referer': portalUrl,'f':'json','expiration':60}
r = requests.post(tokenURL, tokenParams)
tokenObject = r.json()
_token = tokenObject['token']
_tokenExpires = tokenObject["expires"]
def sendRequest(requestUrl, params={}):
"""Send a request to the given url to retrieve data"""
# Check if the token is still valid
checkToken()
# Set token and output type
params["token"] = _token
params["f"] = "json"
# Send the request
r = requests.post(requestUrl, params)
results = r.json()
return results
if __name__ == "__main__":
main()
|
465840
|
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import TYPE_CHECKING, Iterator, Optional
if TYPE_CHECKING:
from dvc.fs.ssh import SSHFileSystem
from dvc.types import StrPath
class BaseMachineBackend(ABC):
def __init__(self, tmp_dir: "StrPath", **kwargs):
self.tmp_dir = tmp_dir
@abstractmethod
def create(self, name: Optional[str] = None, **config):
"""Create and start an instance of the specified machine."""
@abstractmethod
def destroy(self, name: Optional[str] = None, **config):
"""Stop and destroy all instances of the specified machine."""
@abstractmethod
def instances(
self, name: Optional[str] = None, **config
) -> Iterator[dict]:
"""Iterate over status of all instances of the specified machine."""
def close(self):
pass
@abstractmethod
def run_shell(self, name: Optional[str] = None, **config):
"""Spawn an interactive SSH shell for the specified machine."""
@abstractmethod
def get_executor_kwargs(self, name: str, **config) -> dict:
"""Return SSHExecutor kwargs which can be used for DVC
experiment/pipeline execution on the specified machine.
"""
@abstractmethod
@contextmanager
def get_sshfs(
self, name: Optional[str] = None, **config
) -> Iterator["SSHFileSystem"]:
"""Return an sshfs instance for the default directory on the
specified machine."""
@abstractmethod
def rename(self, name: str, new: str, **config):
"""Rename a machine instance."""
|
465847
|
import moderngl
import numpy as np
ctx = moderngl.create_standalone_context()
prog = ctx.program(
vertex_shader='''
#version 330
in vec2 in_vert;
in vec3 in_color;
out vec3 v_color;
void main() {
v_color = in_color;
gl_Position = vec4(in_vert, 0.0, 1.0);
}
''',
fragment_shader='''
#version 330
in vec3 v_color;
out vec3 f_color;
void main() {
f_color = v_color;
}
''',
)
x = np.linspace(-1.0, 1.0, 50)
y = np.random.rand(50) - 0.5
r = np.ones(50)
g = np.zeros(50)
b = np.zeros(50)
vertices = np.dstack([x, y, r, g, b])
vbo = ctx.buffer(vertices.astype('f4').tobytes())
vao = ctx.simple_vertex_array(prog, vbo, 'in_vert', 'in_color')
|
465886
|
from copy import deepcopy
import json
class Encoding(object):
CHANNELS = ['x', 'y', 'row', 'column', 'color', 'size', 'shape', 'text']
def __init__(self, **condition):
if (len(condition.keys()) != 1):
raise ValueError('only one condition allowed')
self._condition = next(iter(condition.items()))
if (self._condition[0] == 'field'):
self._condition = (self._condition[0], '\"{0}\"'.format(self._condition[1]))
self._field = False
self._type = False
self._aggregate = False
self._channel = False
self._bin = None
self._maxbins = False
self._scale = False
def clone(self):
return deepcopy(self)
def type(self, value):
self._type = value
return self
def field(self, value):
self._field = value
return self
def aggregate(self, value):
self._aggregate = value
return self
def channel(self, value):
self._channel = value
return self
def bin(self, value):
self._bin = value
return self
def maxbins(self, value):
self._bin = True
self._maxbins = value
return self
def scale(self, value):
self._scale = value
return self
def to_asp(self, vid):
asp = []
facts = []
eid = 'E'
condition_key = self._condition[0]
condition_value = self._condition[1]
declare = None
condition = None
if (condition_key == 'aggregate' and condition_value == None):
declare = ':- aggregate({0},_,count).'.format(vid)
else:
condition = '{0}({1},{2},{3})'.format(condition_key, vid, eid, condition_value)
declare = ':- not {0} : encoding({1},E).'.format(condition, vid)
asp.append(declare)
template = '{0}({1},{2},{3})'
none_template = 'not {0}({1},{2},_)'
if (self._field is not False):
field = None
if (self._field is None):
field = none_template.format('field', vid, eid)
else:
field = template.format('field', vid, eid, '\"{0}\"'.format(self._field))
facts.append(field)
if (self._type is not False):
type = None
if (self._type is None):
type = none_template.format('type', vid, eid)
else:
type = template.format('type', vid, eid, self._type)
facts.append(type)
if (self._aggregate is not False):
aggregate = None
if (self._aggregate is None):
aggregate = none_template.format('aggregate', vid, eid)
else:
aggregate = template.format('aggregate', vid, eid, self._aggregate)
facts.append(aggregate)
if (self._channel is not False):
channel = None
if (self._channel is None):
channel = none_template.format('channel', vid, eid)
else:
channel = template.format('channel', vid, eid, self._channel)
facts.append(channel)
if (self._bin is not None):
bin = None
if (self._bin is False):
bin = none_template.format('bin', vid, eid)
else:
bins = 10 if self._maxbins is False else self._maxbins
bin = template.format('bin', vid, eid, bins)
facts.append(bin)
if (self._scale is not False):
if (self._scale == 'log'):
scale = 'log({0},{1})'.format(vid, eid)
elif (self._scale == 'zero'):
scale = 'zero({0},{1})'.format(vid, eid)
facts.append(scale)
conditioned_facts = [':- {0}, not {1}.'.format(condition, f) for f in facts]
asp += conditioned_facts
return asp
def __repr__(self):
return json.dumps(self.__dict__)
def __str__(self):
return json.dumps(self.__dict__)
|
465905
|
import torch
from torch import nn
import torch.nn.functional as F
from tqdm import tqdm, trange
import numpy as np
from math import *
import laplace.util as lutil
from util.evaluation import get_calib
class DiagLaplace(nn.Module):
"""
Taken, with modification, from:
https://github.com/wjmaddox/swa_gaussian/blob/master/swag/posteriors/diag_laplace.py
"""
def __init__(self, base_model):
super().__init__()
self.net = base_model
self.params = []
self.net.apply(lambda module: dla_parameters(module, self.params))
self.hessians = None
self.n_params = sum(p.numel() for p in base_model.parameters())
def forward(self, x):
return self.net.forward(x)
def forward_sample(self, x):
self.sample()
return self.net.forward(x)
def sample(self, scale=1, require_grad=False):
for module, name in self.params:
mean = module.__getattr__(f'{name}_mean')
var = module.__getattr__(f'{name}_var')
eps = torch.randn(*mean.shape, device='cuda')
w = mean + scale * torch.sqrt(var) * eps
if require_grad:
w.requires_grad_()
module.__setattr__(name, w)
getattr(module, name)
else:
for module, name in self.params:
mean = module.__getattr__(f'{name}_mean')
var = module.__getattr__(f'{name}_var')
def sample_raw(self, var0, scale=1, require_grad=False):
tau = 1/var0
for module, name in self.params:
mean = module.__getattr__(f'{name}_mean')
var = module.__getattr__(f'{name}_var')
eps = torch.randn(*mean.shape, device='cuda')
w = mean + scale * torch.sqrt(1/(tau + var)) * eps
if require_grad:
w.requires_grad_()
module.__setattr__(name, w)
def estimate_variance(self, var0, invert=True):
tau = 1/var0
for module, name in self.params:
h = self.hessians[(module, name)].clone()
var = (1 / (h + tau)) if invert else h
module.__getattr__(f'{name}_var').copy_(var)
def get_hessian(self, train_loader, binary=False):
criterion = nn.BCEWithLogitsLoss(reduction='mean') if binary else nn.CrossEntropyLoss(reduction='mean')
diag_hess = dict()
for module, name in self.params:
var = module.__getattr__(f'{name}_var')
diag_hess[(module, name)] = torch.zeros_like(var)
# Populate parameters with the means
self.sample(scale=0, require_grad=True)
for x, y in tqdm(train_loader):
x = x.cuda()
self.net.zero_grad()
out = self(x).squeeze()
if binary:
distribution = torch.distributions.Binomial(logits=out)
else:
distribution = torch.distributions.Categorical(logits=out)
y = distribution.sample()
loss = criterion(out, y)
loss.backward()
for module, name in self.params:
grad = module.__getattr__(name).grad
diag_hess[(module, name)] += grad**2
n_data = len(train_loader.dataset)
self.hessians = diag_hess
return diag_hess
def gridsearch_var0(self, val_loader, ood_loader, interval, n_classes=10, lam=1):
vals, var0s = [], []
pbar = tqdm(interval)
for var0 in pbar:
self.estimate_variance(var0)
if n_classes == 2:
preds_in, y_in = lutil.predict_binary(val_loader, self, 10, return_targets=True)
preds_out = lutil.predict_binary(ood_loader, self, 10)
loss_in = F.binary_cross_entropy(preds_in.squeeze(), y_in.float())
loss_out = F.binary_cross_entropy(preds_out.squeeze(), torch.ones_like(y_in)*0.5)
else:
preds_in, y_in = lutil.predict(val_loader, self, n_samples=5, return_targets=True)
preds_out = lutil.predict(ood_loader, self, n_samples=5)
loss_in = F.nll_loss(torch.log(preds_in + 1e-8), y_in)
loss_out = -torch.log(preds_out + 1e-8).mean()
loss = loss_in + lam * loss_out
vals.append(loss)
var0s.append(var0)
pbar.set_description(f'var0: {var0:.5f}, Loss-in: {loss_in:.3f}, Loss-out: {loss_out:.3f}, Loss: {loss:.3f}')
best_var0 = var0s[np.argmin(vals)]
return best_var0
def dla_parameters(module, params):
for name in list(module._parameters.keys()):
if module._parameters[name] is None:
# print(module, name)
continue
data = module._parameters[name].data
module._parameters.pop(name)
module.register_buffer(f'{name}_mean', data)
module.register_buffer(f'{name}_var', data.new(data.size()).zero_())
module.register_buffer(name, data.new(data.size()).zero_())
params.append((module, name))
|
465943
|
from typing import Union
from django.shortcuts import render
from django.http import HttpResponse
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework.authentication import TokenAuthentication, SessionAuthentication, BasicAuthentication
from rest_framework.renderers import JSONRenderer
import json
from navigation import models as navigation_models
from navigation.utilities import level as navigation_level
from navigation.utilities import query as navigation_query
from navigation.utilities import scorecard as navigation_scorecard
from PIL import Image
import imageio
import numpy as np
# Create your views here.
class UnrealEngineDataInterface:
def __init__(self):
self.project_id = 0
self.level_id = 0
self.level: Union[navigation_level.Level, None] = None
def get_level(self, project_id: int, level_id: int):
if not self.level:
level = navigation_models.Level.objects.filter(project_id=project_id, level_id=level_id).first()
self.level = navigation_query.decode_level(level)
if self.level.model.level_id != level_id or self.level.model.project_id != project_id:
level = navigation_models.Level.objects.filter(project_id=project_id, level_id=level_id).first()
self.level = navigation_query.decode_level(level)
return self.level
UEDataIFace = UnrealEngineDataInterface()
#@ PRIMARY UTILITY
@api_view(['GET'])
@permission_classes([AllowAny])
def uei_gtm(request: Request, project_id: int, level_id: int) -> Response:
global UEDataIFace
level = UEDataIFace.get_level(project_id, level_id)
lm = level.elevation.copy()[0]
navigation_scorecard.mask_replace_with(
lm, level.feature[0], 1, 0.0
)
r = ((lm*255) / np.nanmax(lm[lm != np.inf]))
# image = Image.fromarray(((lm*255) / np.nanmax(lm[lm != np.inf])).astype(np.uint32), mode="L")
image = Image.fromarray(r, mode="I")
response = HttpResponse(content_type='image/png')
image.save('../rasters/test.tif')
imageio.imwrite('../rasters/test.png', r.astype(np.uint16))
image.save(response, "PNG")
return response
@api_view(['POST'])
@permission_classes([AllowAny])
@authentication_classes([TokenAuthentication, SessionAuthentication])
def uei_gtp(request: Request, project_id: int, level_id: int) -> Response:
global UEDataIFace
level = UEDataIFace.get_level(project_id, level_id)
data = request.data
print(data)
start_grid_pos = level.transform.transform_to_grid(
(
data['start'][0]/100, data['start'][1]/100
)
)
end_grid_pos = level.transform.transform_to_grid(
(
data['end'][0]/100, data['end'][1]/100
)
)
print(start_grid_pos, end_grid_pos)
path = list(level.find_path_safe(start_grid_pos, end_grid_pos, 0, 0, True)[0])
new_path = []
for p in path:
print(p)
w = level.transform.transform_to_world((int(p[1]), int(p[0])))
new_path.append(
(w[0], w[1])
)
new_path.reverse()
data = {'path': new_path}
return Response(data, content_type='application/json')
|
465952
|
import json
import requests
import urllib.parse as urlparse
def check_web_exception(host_name):
if "http" not in host_name:
host_name = "https://" + host_name
try:
res = requests.get(host_name, timeout=3)
res.raise_for_status()
except Exception:
return 2
try:
res = requests.post(
urlparse.urljoin(host_name, "hello"), json={"name": 1}, timeout=3
)
if res.status_code != 500:
return 1
res = res.json()
if res["flag"] == "FLAG{b4d_excep7ion_handl1ng}":
return 0
else:
return 1
except Exception:
return 1
if __name__ == "__main__":
print(check_web_exception("localhost"))
|
465974
|
import pandas as pd
import geopandas as gpd
import re
import textdistance
import numpy as np
import math
def make_ordinal(s):
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4])
name_ord = []
for x in s:
x = x.title()
m = re.findall(r'\d+', x)
if(len(m) > 0):
num = m[0]
t = re.sub('{}'.format(num), ordinal(int(num)), x)
name_ord.append(t)
else:
t = x
name_ord.append(t)
return name_ord
def main():
elevator_list = pd.read_csv('../../data/raw/EE_master_list.csv')
stations = gpd.read_file('../../data/raw/subway_stations.geojson')
turnstile_remotes = pd.read_excel('../../data/raw/Remote-Booth-Station.xls')
gtfs = pd.read_csv('../../data/raw/google_transit/stops.txt')
turnstile_remotes['Line Name'] = turnstile_remotes['Line Name'].astype(str)
gtfs = gtfs[gtfs.location_type == 1]
gtfs_routes = pd.read_csv('../../data/raw/google_transit/routes.txt')
gtfs_trips = pd.read_csv('../../data/raw/google_transit/trips.txt')
gtfs_stop_times = pd.read_csv('../../data/raw/google_transit/stop_times.txt')
## Getting lines for each GTFS Stop ID
gtfs_stop_times = gtfs_stop_times[gtfs_stop_times.trip_id.str.contains('Weekday')]
gtfs_lines = gtfs_stop_times.merge(gtfs_trips,on="trip_id")
gtfs_lines = gtfs_lines.merge(gtfs_routes,on='route_id')
gtfs_lines['stop_id'] = [re.sub('N$|S$','',x) for x in gtfs_lines.stop_id]
gtfs_lines['lines'] = gtfs_lines[['stop_id','route_short_name']].groupby(['stop_id'])['route_short_name'].transform(lambda x:
','.join(x.unique()))
gtfs_lines = gtfs_lines[['stop_id','lines']]
gtfs_lines = gtfs_lines.drop_duplicates()
gtfs = gtfs.merge(gtfs_lines[['stop_id','lines']],how='left',on='stop_id')
gtfs = gtfs[~gtfs.lines.isnull()]
## Standardization
stations = pd.DataFrame(stations.drop('geometry',axis=1))
# Standardizing names
stations['name_ord'] = stations.name
turnstile_remotes['name_ord'] = make_ordinal(turnstile_remotes.Station)
elevator_list['name_ord'] = make_ordinal(elevator_list.station_name)
gtfs['name_ord'] = make_ordinal(gtfs.stop_name)
# Standardizing lines
stations["clean_lines"] = [re.sub('-','',re.sub('-\d+ Express','',x)) for x in stations.line]
turnstile_remotes['clean_lines'] = [re.sub('-','',re.sub(r'(\w)(?!$)',r'\1-',str(x))) for x in turnstile_remotes['Line Name']]
elevator_list['clean_lines'] = [re.sub('-','',re.sub('/', '-',re.sub('(/METRO-NORTH)|(/LIRR)','', x))) for x in
elevator_list.subway_lines]
gtfs['clean_lines'] = [re.sub('-','',re.sub(',','-',re.sub(',((\d)|(\w))X','',x))) for x in gtfs.lines]
# Dropping unnecessary columns
stations = stations[['name','name_ord','clean_lines','line']]
elevator_list = elevator_list[['equipment_id','station_name','name_ord','clean_lines','subway_lines']]
turnstile_remotes = turnstile_remotes[['Remote','Station','name_ord','clean_lines','Line Name']]
gtfs = gtfs[['stop_id','stop_name','stop_lat','stop_lon','name_ord','clean_lines','lines']]
###### Text Matching
elevator_list.reset_index(drop=True,inplace=True)
elevator_list['station_match'] = ''
elevator_list['station_lines'] = ''
for i,row in elevator_list.iterrows():
## station matching lines
st_line_matches = [y if len(textdistance.lcsstr(row.clean_lines,y)) > 0 else None for y in stations.clean_lines]
st_line_matches = [x for x in st_line_matches if x is not None]
st_subset = stations[stations.clean_lines.isin(st_line_matches)]
## Fails to find the right match for just 59th St
if row.station_name == '59 St':
continue
## elevator
if st_subset.shape[0] > 0:
st_dist = [textdistance.jaccard(row.name_ord,y) for y in st_subset.name_ord]
st_match = st_subset.iloc[np.argmax(st_dist),]
st_score = max(st_dist)
if st_score > 0.75:
elevator_list.iloc[i,][['station_match','station_lines']] = st_match[['name_ord','line']]
else:
st_dist = [textdistance.jaro_winkler(row.name_ord,y) for y in st_subset.name_ord]
st_match = st_subset.iloc[np.argmax(st_dist),]
st_score = max(st_dist)
elevator_list.iloc[i,][['station_match','station_lines']] = st_match[['name_ord','line']]
## Manual overrides
elevator_list.loc[(elevator_list.station_name == '57 St - 7 Av')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['NQRW','57th St','N-Q-R-W']
elevator_list.loc[(elevator_list.station_name == '59 St')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['456','Lexington Ave - 59th St','4-5-6-6 Express']
elevator_list.loc[(elevator_list.station_name == '68 St / Hunter College')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['46','68th St - Hunter College','4-6-6 Express']
elevator_list.loc[(elevator_list.station_name == '86 St')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['456','86th St','4-5-6-6 Express']
elevator_list.loc[(elevator_list.station_name == 'Bedford Park Blvd/Grand Concourse Line')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['BD','Bedford Park Blvd','B-D']
elevator_list.loc[(elevator_list.station_name == 'Chambers St')&(elevator_list.station_match == ''),
['clean_lines','station_match','station_lines']] = ['JZ','Chambers St','J-Z']
el_station_merge = elevator_list.copy()
el_station_merge['equipments'] = el_station_merge.groupby(['station_match','station_lines'])['equipment_id'].transform(lambda x :
','.join(x.unique()))
el_station_merge.drop(['equipment_id','name_ord'],axis=1,inplace=True)
el_station_merge = el_station_merge.drop_duplicates()
crosswalk = stations.merge(el_station_merge,how='left',left_on=['name','line'],right_on=['station_match','station_lines'])
crosswalk.rename(columns={'clean_lines_x':'clean_lines','station_name':'el_station_name','subway_lines':'el_lines'},inplace=True)
crosswalk.drop(['station_match','station_lines','clean_lines_y'],axis=1,inplace=True)
crosswalk.fillna('',inplace=True)
## Matching GTFS
crosswalk.reset_index(drop=True,inplace=True)
crosswalk['gtfs_station_name'] = ''
crosswalk['gtfs_lines'] = ''
for i,row in crosswalk.iterrows():
## gtfs matching lines
gtfs_line_matches = [y if len(textdistance.lcsstr(row.clean_lines,y)) > 0 else None for y in gtfs.clean_lines]
gtfs_line_matches = [x for x in gtfs_line_matches if x is not None]
gtfs_subset = gtfs[gtfs.clean_lines.isin(gtfs_line_matches)]
###### distances
## exceptions where it fails
if((row.name_ord == '46th St') | (row.name_ord == '57th St')):
continue
if gtfs_subset.shape[0] > 0:
gtfs_dist = [textdistance.jaccard(row.name_ord,y) for y in gtfs_subset.name_ord]
gtfs_match = gtfs_subset.iloc[np.argmax(gtfs_dist),]
gtfs_score = max(gtfs_dist)
if gtfs_score > 0.88:
crosswalk.iloc[i,][['gtfs_station_name','gtfs_lines']] = gtfs_match[['stop_name','lines']]
else:
gtfs_dist = [textdistance.jaro_winkler(row.name_ord,y) for y in gtfs_subset.name_ord]
gtfs_match = gtfs_subset.iloc[np.argmax(gtfs_dist),]
gtfs_score = max(gtfs_dist)
if gtfs_score > 0.74:
crosswalk.iloc[i,][['gtfs_station_name','gtfs_lines']] = gtfs_match[['stop_name','lines']]
## Manual overrides
crosswalk.loc[(crosswalk.name_ord == 'Lexington Ave - 59th St')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['59 St','4,5,5X,6,6X']
crosswalk.loc[(crosswalk.name_ord == 'Long Island City - Court Sq')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['Court Sq - 23 St','G']
crosswalk.loc[(crosswalk.name_ord == '46th St')&(crosswalk.clean_lines=='EMR')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['46 St','E,M,R']
crosswalk.loc[(crosswalk.name_ord == '46th St')&(crosswalk.clean_lines=='7')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['46 St - Bliss St','7']
crosswalk.loc[(crosswalk.name_ord == 'Gravesend - 86th St')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['86 St','N,W,Q']
crosswalk.loc[(crosswalk.name_ord == 'Lower East Side - 2nd Ave')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['2 Av','F,FX']
crosswalk.loc[(crosswalk.name_ord == '57th St')&(crosswalk.clean_lines=='F')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['57 St','F,FX,M']
crosswalk.loc[(crosswalk.name_ord == '57th St')&(crosswalk.clean_lines=='NQRW')&(crosswalk.gtfs_station_name == ''),
['gtfs_station_name','gtfs_lines']] = ['57 St - 7 Av','N,W,Q,R']
##### Turnstile
stations_w_issues = ['36th Ave','111th St','168th St','104th St','7th Ave','28th St','39th Ave','81st St','30th Ave',
'Broadway Junction','49th St', '57th St', '80th St','96th St','176th St']
crosswalk.reset_index(drop=True,inplace=True)
crosswalk['turnstile_station_name'] = ''
crosswalk['turnstile_lines'] = ''
for i,row in crosswalk.iterrows():
## turnstile matching lines
ts_line_matches = [y if len(textdistance.lcsstr(row.clean_lines,y)) > 0 else None for y in turnstile_remotes.clean_lines]
ts_line_matches = [x for x in ts_line_matches if x is not None]
ts_subset = turnstile_remotes[turnstile_remotes.clean_lines.isin(ts_line_matches)]
##### distances
if (row.name_ord in stations_w_issues):
continue
# turnstile
if ts_subset.shape[0] > 0:
ts_dist = [textdistance.jaccard(row.name_ord,y) for y in ts_subset.name_ord]
ts_match = ts_subset.iloc[np.argmax(ts_dist),]
ts_score = max(ts_dist)
if ts_score > 0.88:
crosswalk.iloc[i,][['turnstile_station_name','turnstile_lines']] = ts_match[['Station','Line Name']]
else:
ts_dist = [textdistance.jaro_winkler(row.name_ord,y) for y in ts_subset.name_ord]
ts_match = ts_subset.iloc[np.argmax(ts_dist),]
ts_score = max(ts_dist)
if ts_score > 0.81:
crosswalk.iloc[i,][['turnstile_station_name','turnstile_lines']] = ts_match[['Station','Line Name']]
missing_vals = crosswalk[crosswalk.turnstile_station_name == ''][['name','clean_lines']]
missing_vals.reset_index(drop=True,inplace=True)
## manual overrides
ts_override = [['MAIN ST','7'],['138 ST-3 AVE','6'],['42 ST-GRD CNTRL','4567S'],['96 ST','6'],['61 ST/WOODSIDE','7'],['96 ST','BC'],
['168 ST-BROADWAY','1AC'],['UNION TPK-KEW G','EF'],['WASHINGTON-36 A','NQ'],['42 ST-GRD CNTRL','4567S'],['GREENWOOD-111','A'],
['OXFORD-104 ST','A'],['7 AV-PARK SLOPE','FG'],['7 AVE','BQ'],['FLATBUSH AVE','25'],['28 ST-BROADWAY','NR'],['COURT SQ','EMG'],
['VAN ALSTON-21ST','G'],['BEEBE-39 AVE','NQ'],['96 ST','123'],['110 ST-CPN','23'],['81 ST-MUSEUM','BC'],['110 ST-CATHEDRL','1'],['176 ST','4'],
['168 ST-BROADWAY','1AC'],['111 ST','7'],['LEFFERTS BLVD','A'],['28 ST','1'],['28 ST','6'],['42 ST-GRD CNTRL','4567S'],['FOREST PARKWAY','J'],
['111 ST','J'],['MYRTLE AVE','LM'],['ROCKAWAY PKY','L'],['EAST 105 ST','L'],['BROADWAY-ENY','ACJLZ'],['ELDERTS LANE','JZ'],['MYRTLE AVE','LM'],
['VAN WYCK BLVD','EF'],['HOYT ST-ASTORIA','NQ'],['DITMARS BL-31 S','NQ'],['148 ST-LENOX','3'],['242 ST','1'],['E TREMONT AVE','25'],['DYRE AVE','5'],
['BROADWAY-ENY','ACJLZ'],['149 ST-3 AVE','25'],['GRAND-30 AVE','NQ'],['NEW UTRECHT AVE','ND'],['86 ST','N'],['22 AVE-BAY PKY','F'],
['7 AVE-53 ST','BDE'],['57 ST','F'],['49 ST-7 AVE','NQR'],['57 ST-7 AVE','NQR'],['57 ST-7 AVE','NQR'],['2 AVE','F'],['BOROUGH HALL/CT','2345R'],['BROADWAY-ENY','ACJLZ'],
['BROOKLYN BRIDGE','456JZ'],['METROPOLITAN AV','M'],['ROOSEVELT AVE','EFMR7'],['E 177 ST-PARKCH','6'],['HUDSON-80 ST','A'],['STILLWELL AVE','DFNQ'],['34 ST-HUDSON YD','7'],
['72 ST-2 AVE','Q'],['86 ST-2 AVE','Q'],['96 ST-2 AVE','Q']]
turnstile_override = pd.DataFrame(ts_override)
turnstile_override.rename(columns={0:'turnstile_station_name',1:'turnstile_lines'},inplace=True)
turnstile_override = pd.concat([missing_vals,turnstile_override],axis=1)
for i,row in crosswalk.iterrows():
if (row.turnstile_station_name == ''):
ts_match = turnstile_override[(turnstile_override.name == row.name_ord)&
(turnstile_override.clean_lines == row.clean_lines)][['turnstile_station_name','turnstile_lines']]
crosswalk.iloc[i,][['turnstile_station_name','turnstile_lines']] = ts_match.values[0]
crosswalk.drop('name_ord',axis=1,inplace=True)
crosswalk.rename(columns={'name':'station_name','line':'station_lines'},inplace=True)
crosswalk = crosswalk.merge(gtfs.drop('name_ord',axis=1),how='left',left_on=['gtfs_station_name','gtfs_lines'],right_on=['stop_name','lines'])
crosswalk.drop(['stop_name','clean_lines_y','lines'],axis=1,inplace=True)
crosswalk.rename(columns={'stop_id':'gtfs_stop_id','stop_lat':'lat','stop_lon':'lon','clean_lines_x':'clean_lines'},inplace=True)
turnstile_remotes['turnstile_units'] = turnstile_remotes.groupby(['Station','Line Name'])['Remote'].transform(lambda x : ','.join(x.unique()))
turnstile_merge = turnstile_remotes.drop(['Remote','name_ord','clean_lines'],axis=1).drop_duplicates()
crosswalk = crosswalk.merge(turnstile_merge,how='left',left_on=['turnstile_station_name','turnstile_lines'],right_on=['Station','Line Name']).drop(['Station','Line Name'],axis=1)
## adding missing units
crosswalk.loc[(crosswalk.station_name == '34th St - Hudson Yards')&(crosswalk.clean_lines == '7'),['turnstile_units']] = ['R072']
crosswalk.loc[(crosswalk.station_name == '72nd St')&(crosswalk.clean_lines == 'Q'),['turnstile_units']] = ['R570']
crosswalk.loc[(crosswalk.station_name == '86th St')&(crosswalk.clean_lines == 'Q'),['turnstile_units']] = ['R571']
crosswalk.loc[(crosswalk.station_name == '96th St')&(crosswalk.clean_lines == 'Q'),['turnstile_units']] = ['R572']
crosswalk.to_csv('../../data/crosswalk/Master_crosswalk.csv',index=False)
if __name__ == "__main__":
main()
|
465977
|
from mxnet import gluon
from mxnet import autograd
from mxnet import nd
from mxnet import image
import mxnet as mx
from tqdm import tqdm
def load_data_fashion_mnist(batch_size, resize=None):
"""download the fashion mnist dataest and then load into memory"""
def transform_mnist(data, label):
if resize:
# resize to resize x resize
data = image.imresize(data, resize, resize)
# change data from height x weight x channel to channel x height x weight
return nd.transpose(data.astype('float32'), (2,0,1))/255, label.astype('float32')
mnist_train = gluon.data.vision.FashionMNIST(root='./data',
train=True, transform=transform_mnist)
mnist_test = gluon.data.vision.FashionMNIST(root='./data',
train=False, transform=transform_mnist)
train_data = gluon.data.DataLoader(
mnist_train, batch_size, shuffle=True)
test_data = gluon.data.DataLoader(
mnist_test, batch_size, shuffle=False)
return (train_data, test_data)
def load_data_mnist(batch_size, resize=None):
"""download the fashion mnist dataest and then load into memory"""
def transform_mnist(data, label):
if resize:
# resize to resize x resize
data = image.imresize(data, resize, resize)
# change data from height x weight x channel to channel x height x weight
return nd.transpose(data.astype('float32'), (2,0,1))/255, label.astype('float32')
mnist_train = gluon.data.vision.MNIST(root='./data',
train=True, transform=transform_mnist)
mnist_test = gluon.data.vision.MNIST(root='./data',
train=False, transform=transform_mnist)
train_data = gluon.data.DataLoader(
mnist_train, batch_size, shuffle=True)
test_data = gluon.data.DataLoader(
mnist_test, batch_size, shuffle=False)
return (train_data, test_data)
def try_gpu():
"""If GPU is available, return mx.gpu(0); else return mx.cpu()"""
try:
ctx = mx.gpu()
_ = nd.zeros((1,), ctx=ctx)
except:
ctx = mx.cpu()
return ctx
def SGD(params, lr):
for param in params:
param[:] = param - lr * param.grad
def accuracy(output, label):
# print('accuracy',output, label)
return nd.mean(nd.argmax(output,axis=1)==label).asscalar()
def _get_batch(batch, ctx):
"""return data and label on ctx"""
if isinstance(batch, mx.io.DataBatch):
data = batch.data[0]
label = batch.label[0]
else:
data, label = batch
return data.as_in_context(ctx), label.as_in_context(ctx)
def evaluate_accuracy(data_iterator, net, ctx=mx.cpu()):
acc = 0.
if isinstance(data_iterator, mx.io.MXDataIter):
data_iterator.reset()
for i, batch in enumerate(data_iterator):
data, label = _get_batch(batch, ctx)
output = net(data)
print(output)
acc += accuracy(output, label)
return acc / (i+1)
def train(train_data, test_data, net, loss, trainer, ctx, num_epochs, print_batches=100):
"""Train a network"""
for epoch in range(num_epochs):
train_loss = 0.
train_acc = 0.
n = 0
for i, (data, label) in tqdm(enumerate(train_data), total=len(train_data), ncols=70, leave=False, unit='b'):
# for i, batch in enumerate(train_data):
# data, label = batch
one_hot_label = nd.one_hot(label,10)
label = label.as_in_context(ctx)
one_hot_label = one_hot_label.as_in_context(ctx)
data = data.as_in_context(ctx)
with autograd.record():
output = net(data)
L = loss(output, one_hot_label)
L.backward()
trainer.step(data.shape[0])
train_loss += nd.mean(L).asscalar()
# print('nd.mean(L).asscalar()',nd.mean(L).asscalar())
train_acc += accuracy(output, label)
n = i + 1
if print_batches and n % print_batches == 0:
print('output',output)
print("Batch %d. Loss: %f, Train acc %f" % (
n, train_loss/n, train_acc/n
))
# print('train_loss',train_loss)
test_acc = evaluate_accuracy(test_data, net, ctx)
print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
epoch, train_loss/n, train_acc/n, test_acc
))
|
466128
|
import numpy as np
import torch
import torch.nn as nn
from .backbone import backbone_fn
from collections import OrderedDict
from utils.utils import non_max_suppression
class yolov3layer(nn.Module):
'''
Detection Decoder followed yolo v3.
'''
def __init__(self, args):
super(yolov3layer, self).__init__()
self.args = args
self.backbone = backbone_fn(args)
_out_filters = self.backbone.layers_out_filters
self.num_classes = len(args.classes_names)
final_out_filter0 = 3 * (5 + self.num_classes)
self.embedding0 = self._make_embedding([512, 1024], _out_filters[-1], final_out_filter0)
# embedding1
final_out_filter1 = 3 * (5 + self.num_classes)
self.embedding1_cbl = self._make_cbl(512, 256, 1)
# self.embedding1_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.embedding1 = self._make_embedding([256, 512], _out_filters[-2] + 256, final_out_filter1)
# embedding2
final_out_filter2 = 3 * (5 + self.num_classes)
self.embedding2_cbl = self._make_cbl(256, 128, 1)
# self.embedding2_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.embedding2 = self._make_embedding([128, 256], _out_filters[-3] + 128, final_out_filter2)
self.anchors = np.array(args.anchors)
self.num_layers = len(self.anchors) // 3
# initlize the loss function here.
self.loss = yolo_loss(args)
def _make_cbl(self, _in, _out, ks):
''' cbl = conv + batch_norm + leaky_relu
'''
pad = (ks - 1) // 2 if ks else 0
return nn.Sequential(OrderedDict([
("conv", nn.Conv2d(_in, _out, kernel_size=ks, stride=1, padding=pad, bias=False)),
("bn", nn.BatchNorm2d(_out)),
("relu", nn.LeakyReLU(0.1)),
]))
def _make_embedding(self, filters_list, in_filters, out_filter):
m = nn.ModuleList([
self._make_cbl(in_filters, filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3)])
m.add_module("conv_out", nn.Conv2d(filters_list[1], out_filter, kernel_size=1,
stride=1, padding=0, bias=True))
return m
def _branch(self, _embedding, _in):
for i, e in enumerate(_embedding):
_in = e(_in)
if i == 4:
out_branch = _in
return _in, out_branch
def forward(self, img, label0, label1, label2):
if self.args.backbone_lr == 0:
with torch.no_grad():
x2, x1, x0 = self.backbone(img)
else:
x2, x1, x0 = self.backbone(img)
out0, out0_branch = self._branch(self.embedding0, x0)
# yolo branch 1
x1_in = self.embedding1_cbl(out0_branch)
x1_in = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)(x1_in)
x1_in = torch.cat([x1_in, x1], 1)
out1, out1_branch = self._branch(self.embedding1, x1_in)
# yolo branch 2
x2_in = self.embedding2_cbl(out1_branch)
# x2_in = self.embedding2_upsample(x2_in)
x2_in = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)(x2_in)
x2_in = torch.cat([x2_in, x2], 1)
out2, out2_branch = self._branch(self.embedding2, x2_in)
loss = self.loss((out0, out1, out2), (label0, label1, label2))
return loss
def detect(self, img, ori_shape):
with torch.no_grad():
x2, x1, x0 = self.backbone(img)
# forward the decoder block
out0, out0_branch = self._branch(self.embedding0, x0)
# yolo branch 1
x1_in = self.embedding1_cbl(out0_branch)
x1_in = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)(x1_in)
x1_in = torch.cat([x1_in, x1], 1)
out1, out1_branch = self._branch(self.embedding1, x1_in)
# yolo branch 2
x2_in = self.embedding2_cbl(out1_branch)
x2_in = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)(x2_in)
x2_in = torch.cat([x2_in, x2], 1)
out2, out2_branch = self._branch(self.embedding2, x2_in)
dets_, images_, classes_= yolo_eval((out0, out1, out2), self.anchors, self.num_classes, ori_shape)
return dets_, images_, classes_
def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
'''Process Conv layer output'''
box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,
anchors, num_classes, input_shape)
boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
boxes = boxes.view([-1, 4])
box_scores = box_confidence * box_class_probs
box_scores = box_scores.view(-1, num_classes)
return boxes.view(feats.size(0), -1, 4), box_scores.view(feats.size(0), -1, num_classes)
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
'''Get corrected boxes'''
image_shape = image_shape.cpu()
box_yx = torch.stack((box_xy[..., 1], box_xy[..., 0]), dim=4)
box_hw = torch.stack((box_wh[..., 1], box_wh[..., 0]), dim=4)
new_shape = torch.round(image_shape * torch.min(input_shape / image_shape))
offset = (input_shape - new_shape) / 2. / input_shape
scale = input_shape / new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = torch.stack([
box_mins[..., 0], # y_min
box_mins[..., 1], # x_min
box_maxes[..., 0], # y_max
box_maxes[..., 1] # x_max
], dim=4)
# Scale boxes back to original image shape.
boxes *= torch.cat([image_shape, image_shape]).view(1, 1, 1, 1, 4)
return boxes
def yolo_eval(yolo_outputs,
anchors,
num_classes,
image_shape,
score_threshold=.2,
nms_threshold=.3):
"""Evaluate YOLO model on given input and return filtered boxes."""
yolo_outputs = yolo_outputs
num_layers = len(yolo_outputs)
max_per_image = 100
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]] # default setting
input_shape = torch.Tensor([yolo_outputs[0].shape[2] * 32, yolo_outputs[0].shape[3] * 32]).type_as(yolo_outputs[0])
input_shape = input_shape.cpu()
boxes = []
box_scores = []
# output all the boxes and scores in two lists
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
boxes.append(_boxes.cpu())
box_scores.append(_box_scores.cpu())
# concatenate data based on batch size
boxes = torch.cat(boxes, dim=1) # torch.Size([1, 10647, 4])
box_scores = torch.cat(box_scores, dim=1) # torch.Size([1, 10647, num_classes])
dets_ = []
classes_ = []
images_ = []
for i in range(boxes.size(0)):
mask = box_scores[i] >= score_threshold
img_dets = []
img_classes = []
img_images = []
for c in range(num_classes):
# tf.boolean_mask(boxes, mask[:, c])
class_boxes = boxes[i][mask[:, c]]
if len(class_boxes) == 0:
continue
class_box_scores = box_scores[i][:, c][mask[:, c]]
_, order = torch.sort(class_box_scores, 0, True)
# do nms here.
cls_dets = torch.cat((class_boxes, class_box_scores.view(-1, 1)), 1)
cls_dets = cls_dets[order]
keep = non_max_suppression(cls_dets.cpu().numpy(), nms_threshold)
keep = torch.from_numpy(np.array(keep))
cls_dets = cls_dets[keep.view(-1).long()]
img_dets.append(cls_dets)
img_classes.append(torch.ones(cls_dets.size(0)) * c)
img_images.append(torch.ones(cls_dets.size(0)) * i)
# Limit to max_per_image detections *over all classes*
if len(img_dets) > 0:
img_dets = torch.cat(img_dets, dim=0)
img_classes = torch.cat(img_classes, dim=0)
img_images = torch.cat(img_images, dim=0)
if max_per_image > 0:
if img_dets.size(0) > max_per_image:
_, order = torch.sort(img_dets[:, 4], 0, True)
keep = order[:max_per_image]
img_dets = img_dets[keep]
img_classes = img_classes[keep]
img_images = img_images[keep]
dets_.append(img_dets)
classes_.append(img_classes)
images_.append(img_images)
if not dets_:
return torch.Tensor(dets_), torch.Tensor(classes_), torch.Tensor(images_)
dets_ = torch.cat(dets_, dim=0)
images_ = torch.cat(images_, dim=0)
classes_ = torch.cat(classes_, dim=0)
return dets_, images_, classes_
def box_iou(b1, b2):
'''Return iou tensor
Parameters
----------
b1: tensor, shape=(i1,...,iN, 4), xywh
b2: tensor, shape=(j, 4), xywh
Returns
-------
iou: tensor, shape=(i1,...,iN, j)
'''
# Expand dim to apply broadcasting.
b1 = b1.unsqueeze(3)
b1_xy = b1[..., :2]
b1_wh = b1[..., 2:4]
b1_wh_half = b1_wh / 2.
b1_mins = b1_xy - b1_wh_half
b1_maxes = b1_xy + b1_wh_half
# if b2 is an empty tensor: then iou is empty
if b2.shape[0] == 0:
iou = torch.zeros(b1.shape[0:4]).type_as(b1)
else:
b2 = b2.view(1, 1, 1, b2.size(0), b2.size(1))
# Expand dim to apply broadcasting.
b2_xy = b2[..., :2]
b2_wh = b2[..., 2:4]
b2_wh_half = b2_wh / 2.
b2_mins = b2_xy - b2_wh_half
b2_maxes = b2_xy + b2_wh_half
intersect_mins = torch.max(b1_mins, b2_mins)
intersect_maxes = torch.min(b1_maxes, b2_maxes)
intersect_wh = torch.clamp(intersect_maxes - intersect_mins, min=0)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b1_wh[..., 0] * b1_wh[..., 1]
b2_area = b2_wh[..., 0] * b2_wh[..., 1]
iou = intersect_area / (b1_area + b2_area - intersect_area)
return iou
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
if not calc_loss:
feats = feats.cpu()
input_shape = input_shape.cpu()
num_anchors = len(anchors)
anchors_tensor = torch.from_numpy(anchors).view(1, 1, 1, num_anchors, 2).type_as(feats)
grid_shape = (feats.shape[2:4])
grid_y = torch.arange(0, grid_shape[0]).view(-1, 1, 1, 1).expand(grid_shape[0], grid_shape[0], 1, 1)
grid_x = torch.arange(0, grid_shape[1]).view(1, -1, 1, 1).expand(grid_shape[1], grid_shape[1], 1, 1)
grid = torch.cat([grid_x, grid_y], dim=3).unsqueeze(0).type_as(feats)
feats = feats.view(-1, num_anchors, num_classes + 5, grid_shape[0], \
grid_shape[1]).permute(0, 3, 4, 1, 2).contiguous()
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (torch.sigmoid(feats[..., :2]) + grid) / torch.tensor(grid_shape).view(1, 1, 1, 1, 2).type_as(feats) #
box_wh = torch.exp(feats[..., 2:4]) * anchors_tensor / input_shape.view(1, 1, 1, 1, 2)
box_confidence = torch.sigmoid(feats[..., 4:5])
box_class_probs = torch.sigmoid(feats[..., 5:])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
class yolo_loss(nn.Module):
def __init__(self, args):
super(yolo_loss, self).__init__()
self.args = args
self.anchors = np.array(args.anchors)
self.num_layers = len(self.anchors) // 3
self.anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if self.num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
self.num_classes = len(args.classes_names)
self.ignore_thresh = 0.5
self.mse_loss = nn.MSELoss(reduce=False)
self.bce_loss = nn.BCEWithLogitsLoss(reduce=False)
def forward(self, yolo_outputs, y_true):
input_shape = torch.Tensor([yolo_outputs[0].shape[2] * 32, yolo_outputs[0].shape[3] * 32]).type_as(yolo_outputs[0])
grid_shapes = [torch.Tensor([output.shape[2], output.shape[3]]).type_as(yolo_outputs[0]) for output in yolo_outputs]
bs = yolo_outputs[0].size(0)
loss_xy = 0
loss_wh = 0
loss_conf = 0
loss_clss = 0
for l in range(self.num_layers):
object_mask = y_true[l][..., 4:5]
true_class_probs = y_true[l][..., 5:]
grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l], self.anchors[self.anchor_mask[l]],
self.num_classes, input_shape, calc_loss=True)
pred_box = torch.cat([pred_xy, pred_wh], dim=4)
# Darknet raw box to calculate loss.
raw_true_xy = y_true[l][..., :2] * grid_shapes[l].view(1, 1, 1, 1, 2) - grid
raw_true_wh = torch.log(y_true[l][..., 2:4] / torch.Tensor(self.anchors[self.anchor_mask[l]]).
type_as(pred_box).view(1, 1, 1, self.num_layers, 2) *
input_shape.view(1, 1, 1, 1, 2))
raw_true_wh.masked_fill_(object_mask.expand_as(raw_true_wh) == 0, 0)
box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]
# Find ignore mask, iterate over each of batch.
# ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
best_ious = []
for b in range(bs):
true_box = y_true[l][b, ..., 0:4][object_mask[b, ..., 0] == 1]
iou = box_iou(pred_box[b], true_box)
best_iou, _ = torch.max(iou, dim=3)
best_ious.append(best_iou)
best_ious = torch.stack(best_ious, dim=0).unsqueeze(4)
ignore_mask = (best_ious < self.ignore_thresh).float()
# binary_crossentropy is helpful to avoid exp overflow.
xy_loss = torch.sum(object_mask * box_loss_scale * self.bce_loss(raw_pred[..., 0:2], raw_true_xy)) / bs
wh_loss = torch.sum(object_mask * box_loss_scale * self.mse_loss(raw_pred[..., 2:4], raw_true_wh)) / bs
confidence_loss = (torch.sum(self.bce_loss(raw_pred[..., 4:5], object_mask) * object_mask +
(1 - object_mask) * self.bce_loss(raw_pred[..., 4:5], object_mask) * ignore_mask)) / bs
class_loss = torch.sum(object_mask * self.bce_loss(raw_pred[..., 5:], true_class_probs)) / bs
loss_xy += xy_loss
loss_wh += wh_loss
loss_conf += confidence_loss
loss_clss += class_loss
loss = loss_xy + loss_wh + loss_conf + loss_clss
# print('loss %.3f, xy %.3f, wh %.3f, conf %.3f, class_loss: %.3f'
# %(loss.item(), xy_loss.item(), wh_loss.item(), confidence_loss.item(), class_loss.item()))
return loss.unsqueeze(0), loss_xy.unsqueeze(0), loss_wh.unsqueeze(0), loss_conf.unsqueeze(0), \
loss_clss.unsqueeze(0)
|
466147
|
from django.test import TestCase
from django.db import IntegrityError
from django.core.exceptions import ValidationError
from datetime import datetime
from datetime import timedelta
from .models import TestBasicInformation as BasicInformation
from .models import TestExperience as Experience
from .models import TestEducation as Education
from .models import TestProject as Project
from .models import TestPublication as Publication
from .models import TestLanguage as Language
class BasicInformationTestCase(TestCase):
def test_multiple_basic_information__raises_IntegrityError(self):
with self.assertRaises(IntegrityError):
BasicInformation.objects.create(name="James")
BasicInformation.objects.create(name="John")
class EducationTestCase(TestCase):
def test_start_date_after_end_date__raises_ValidationError(self):
with self.assertRaises(ValidationError):
today = datetime.today()
yesterday = datetime.now() - timedelta(hours=24)
Education.objects.create(name="blah",
abbreviation="blah",
start_date=today,
end_date=yesterday,
major="blah",
gpa="4.0")
class ExperienceTestCase(TestCase):
def setUp(self):
self.experience = Experience.objects.create(company="blah",
description="blah")
def test_start_date_after_end_date__raises_ValidationError(self):
with self.assertRaises(ValidationError):
today = datetime.today()
yesterday = datetime.now() - timedelta(hours=24)
Experience.objects.create(company="blah",
role="blah",
start_date=today,
end_date=yesterday)
def test_project_with_languages_assigned__returns_languages(self):
python = Language.objects.create(name="Python")
django = Language.objects.create(name="Django")
python.experience.add(self.experience)
django.experience.add(self.experience)
expected = {self.experience: [python, django]}
self.assertDictEqual(expected, self.experience.get_languages())
class ProjectTestCase(TestCase):
def setUp(self):
self.project = Project.objects.create(name="blah",
description="blah")
def test_start_date_after_end_date__raises_ValidationError(self):
with self.assertRaises(ValidationError):
today = datetime.today()
yesterday = datetime.now() - timedelta(hours=24)
Project.objects.create(name="blah",
description="blah",
start_date=today,
end_date=yesterday)
def test_project_with_languages_assigned__returns_languages(self):
python = Language.objects.create(name="Python")
django = Language.objects.create(name="Django")
python.projects.add(self.project)
django.projects.add(self.project)
expected = {self.project: [python, django]}
self.assertDictEqual(expected, self.project.get_languages())
class PublicationTestCase(TestCase):
pass
class LanguageTestCase(TestCase):
pass
|
466166
|
import numpy as np
import hypers as hp
from typing import Tuple, List
__all__ = ['decompose', 'vca']
class decompose:
""" Provides instances of decomposition classes """
def __init__(self, X: 'hp.hparray'):
self.vca = vca(X)
class vca:
def __init__(self, X: 'hp.hparray'):
self.X = X
self.n_components = None
self.spcs = None
self.coords = None
def calculate(self, n_components: int = 4,
input_snr: float = 0) -> Tuple[np.ndarray, List[int]]:
self.n_components = n_components
Ae, indice, Yp = self._calcluate_vca(self.X.collapse().T, n_components, snr_input=input_snr)
index = [0] * n_components
for component in range(n_components):
index[component] = np.unravel_index(indice[component], self.X.shape[:-1])
self.spcs = Ae
self.coords = index
return Ae, index
@staticmethod
def _estimate_snr(Y: np.ndarray,
r_m: np.ndarray,
x: np.ndarray) -> np.ndarray:
[L, N] = Y.shape # L number of bands (channels), N number of pixels
[p, N] = x.shape # p number of endmembers (reduced dimension)
P_y = np.sum(Y ** 2) / float(N)
P_x = np.sum(x ** 2) / float(N) + np.sum(r_m ** 2)
snr_est = 10 * np.log10((P_x - p / L * P_y) / (P_y - P_x))
return snr_est
def _calcluate_vca(self, Y, R, verbose=True, snr_input=0.0):
# Vertex Component Analysis
#
# Ae, indice, Yp = vca(Y,R,verbose = True,snr_input = 0)
#
# ------- Input variables -------------
# Y - matrix with dimensions L(channels) x N(pixels)
# each pixel is a linear mixture of R endmembers
# signatures Y = M x s, where s = gamma x alfa
# gamma is a illumination perturbation factor and
# alfa are the abundance fractions of each endmember.
# R - positive integer number of endmembers in the scene
#
# ------- Output variables -----------
# Ae - estimated mixing matrix (endmembers signatures)
# indice - pixels that were chosen to be the most pure
# Yp - Data matrix Y projected.
#
# ------- Optional parameters---------
# snr_input - (float) signal to noise ratio (dB)
# v - [True | False]
# ------------------------------------
#
# Author: <NAME> (<EMAIL>)
# This code is a translation of a matlab code provided by
# <NAME> (<EMAIL>) and <NAME> (<EMAIL>)
# available at http://www.lx.it.pt/~bioucas/code.htm under a non-specified Copyright (c)
# Translation of last version at 22-February-2018 (Matlab version 2.1 (7-May-2004))
#
# more details on:
# <NAME> and <NAME>
# "Vertex Component Analysis: A Fast Algorithm to Unmix Hyperspectral Data"
# submited to IEEE Trans. Geosci. Remote Sensing, vol. .., no. .., pp. .-., 2004
#
#
#############################################
# Initializations
#############################################
if len(Y.shape) != 2:
raise ValueError('Input data must be of size L (number of bands i.e. channels) by N (number of pixels)')
[L, N] = Y.shape # L number of bands (channels), N number of pixels
R = int(R)
if R < 0 or R > L:
raise ValueError('ENDMEMBER parameter must be integer between 1 and L')
#############################################
# SNR Estimates
#############################################
if snr_input == 0:
y_m = np.mean(Y, axis=1, keepdims=True)
Y_o = Y - y_m # data with zero-mean
Ud = np.linalg.svd(np.dot(Y_o, Y_o.T) / float(N))[0][:, :R] # computes the R-projection matrix
x_p = np.dot(Ud.T, Y_o) # project the zero-mean data onto p-subspace
SNR = self._estimate_snr(Y, y_m, x_p);
if verbose:
print("SNR estimated = {}[dB]".format(SNR))
else:
SNR = snr_input
if verbose:
print("input SNR = {}[dB]\n".format(SNR))
SNR_th = 15 + 10 * np.log10(R)
#############################################
# Choosing Projective Projection or
# projection to p-1 subspace
#############################################
if SNR < SNR_th:
if verbose:
print("... Select proj. to R-1")
d = R - 1
if snr_input == 0: # it means that the projection is already computed
Ud = Ud[:, :d]
else:
y_m = np.mean(Y, axis=1, keepdims=True)
Y_o = Y - y_m # data with zero-mean
Ud = np.linalg.svd(np.dot(Y_o, Y_o.T) / float(N))[0][:, :d] # computes the p-projection matrix
x_p = np.dot(Ud.T, Y_o) # project thezeros mean data onto p-subspace
Yp = np.dot(Ud, x_p[:d, :]) + y_m # again in dimension L
x = x_p[:d, :] # x_p = Ud.T * Y_o is on a R-dim subspace
c = np.amax(np.sum(x ** 2, axis=0)) ** 0.5
y = np.vstack((x, c * np.ones((1, N))))
else:
if verbose:
print("... Select the projective proj.")
d = R
Ud = np.linalg.svd(np.dot(Y, Y.T) / float(N))[0][:, :d] # computes the p-projection matrix
x_p = np.dot(Ud.T, Y)
Yp = np.dot(Ud, x_p[:d, :]) # again in dimension L (note that x_p has no null mean)
x = np.dot(Ud.T, Y)
u = np.mean(x, axis=1, keepdims=True) # equivalent to u = Ud.T * r_m
y = x / np.dot(u.T, x)
#############################################
# VCA algorithm
#############################################
indice = np.zeros(R, dtype=int)
A = np.zeros((R, R))
A[-1, 0] = 1
for i in range(R):
w = np.random.rand(R, 1);
f = w - np.dot(A, np.dot(np.linalg.pinv(A), w))
f = f / np.linalg.norm(f)
v = np.dot(f.T, y)
indice[i] = np.argmax(np.absolute(v))
A[:, i] = y[:, indice[i]] # same as x(:,indice(i))
Ae = Yp[:, indice]
return Ae, indice, Yp
|
466179
|
class temporal:
'''This is an abstract class'''
class MensajeOut:
tipo='normal'
mensaje=''
codigo=''
class MensajeTs:
instruccion=''
identificador=''
tipo=''
referencia=''
dimension=''
class Tabla_run:
def __init__(self, basepadre, nombre, atributos=[]):
self.basepadre = basepadre
self.nombre = nombre
self.atributos = atributos
class constraint_name:
unique = None #CONSTRAINT UNIQUE
anulable = None #CONSTRAINT NOT NULL
default = None #CONSTRIANT DEFAULT
primary = None #CONSTRAINT PRIMARY
foreign = None #CONSTRAINT FOREING
check = None #CONSTRAINT CHECK
class Columna_run:
nombre = ''
tipo = ''
size = None
precision = None
unique = None #CONSTRAINT UNIQUE
anulable = None #CONSTRAINT NOT NULL
default = None #CONSTRIANT DEFAULT
primary = None #CONSTRAINT PRIMARY
foreign = None #CONSTRAINT FOREING
refence = None #REFERENCES
check = None #CONSTRAINT CHECK
constraint = None
def __init__(self):
global constraint
self.constraint = constraint_name()
self.constraint.unique=None
self.constraint.anulable=None
self.constraint.default=None
self.constraint.primary=None
self.constraint.foreign=None
self.constraint.refence=None
self.constraint.check=None
|
466199
|
from .custom_driver import client
from .utils import log
from enum import Enum
def close_modal(browser: client) -> None:
el = browser.find("//div[@class='modalContent']")
el = el.find_element_by_xpath(".//a[@class='closeWindow clickable']")
browser.click(el)
def close_welcome_screen(browser: client) -> None:
wc = browser.find("//div[contains(@class, 'welcomeScreen')]")
log("closing welcome-screen")
el = wc.find_element_by_xpath(".//a[@class='closeWindow clickable']")
browser.click(el)
def check_resources(browser: client) -> dict:
resources_list = ["wood", "clay", "iron", "crop"]
resources = {}
for res in resources_list:
find_resources = browser.find("//div[@class='stockContainer {0}']".format(res))
find_resources = find_resources.find_element_by_xpath(
".//div[contains(@class, 'progressbar')]"
)
value = int(find_resources.get_attribute("value"))
resources[res] = value
return resources
class shortcut(Enum):
marketplace = 0
barrack = 1
stable = 2
workshop = 3
def open_shortcut(browser: client, sc: shortcut) -> None:
shortcut_link = browser.find("//div[@id='quickLinks']")
shortcut_link = shortcut_link.find_element_by_xpath(
".//div[contains(@class, 'slotWrapper')]"
)
link = shortcut_link.find_elements_by_xpath(
".//div[contains(@class, 'slotContainer')]"
)
browser.click(link[sc.value], 1)
class overview(Enum):
overview = "optimizely_maintab_Overview"
resources = "optimizely_maintab_Resources"
warehouse = "optimizely_maintab_Store"
culture_points = "optimizely_maintab_CulturePoints"
units = "optimizely_maintab_Troops"
oases = "optimizely_maintab_Oases"
def open_village_overview(browser: client, tab: overview) -> None:
btn = browser.find("//a[@id='villageOverview']")
browser.click(btn, 1)
navi_tab = browser.find(f"//a[@id='{tab.value}']")
classes = navi_tab.get_attribute("class")
if "inactive" in classes:
browser.click(tab, 2)
def old_shortcut(browser: client, shortcut: str) -> None:
shortcut_dict = {"marketplace": 0, "barrack": 1, "stable": 2, "workshop": 3}
shortcut_link = browser.find("//div[@id='quickLinks']")
shortcut_link = shortcut_link.find_element_by_xpath(
".//div[contains(@class, 'slotWrapper')]"
)
link = shortcut_link.find_elements_by_xpath(
".//div[contains(@class, 'slotContainer')]"
)
browser.click(link[shortcut_dict[shortcut.lower()]], 1)
|
466217
|
from canvas.tests.tests_helpers import CanvasTestCase, create_user, create_staff, create_group
class TestAuthorization(CanvasTestCase):
def test_user_cannot_moderate_group(self):
normal_user, group = create_user(), create_group()
self.assertFalse(group.can_moderate(normal_user))
def test_user_cannot_disable_group(self):
normal_user, group = create_user(), create_group()
self.assertFalse(group.can_disable(normal_user))
def test_user_cannot_modify_group(self):
normal_user, group = create_user(), create_group()
self.assertFalse(group.can_modify(normal_user))
def test_moderator_cannot_modify_group(self):
normal_user, group = create_user(), create_group()
group.moderators.add(normal_user)
self.assertFalse(group.can_modify(normal_user))
def test_staff_cannot_modify_group(self):
staff_user, group = create_staff(), create_group()
self.assertFalse(group.can_modify(staff_user))
def test_founder_can_modify_group(self):
normal_user, group = create_user(), create_group()
group.founder = normal_user
self.assertTrue(group.can_modify(normal_user))
def test_founder_can_moderate_group(self):
normal_user, group = create_user(), create_group()
group.founder = normal_user
self.assertTrue(group.can_moderate(normal_user))
def test_moderator_can_moderate_group(self):
normal_user, group = create_user(), create_group()
group.moderators.add(normal_user)
self.assertTrue(group.can_moderate(normal_user))
def test_founder_cannot_disable_group(self):
normal_user, group = create_user(), create_group()
group.founder = normal_user
self.assertFalse(group.can_disable(normal_user))
def test_staff_cannot_moderate_group(self):
staff_user, group = create_staff(), create_group()
self.assertFalse(group.can_moderate(staff_user))
def test_staff_can_disable_group(self):
staff_user, group = create_staff(), create_group()
self.assertTrue(group.can_disable(staff_user))
|
466218
|
import lasagne
import lasagne.layers as L
import lasagne.nonlinearities as NL
import lasagne.init as LI
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.lasagne_layers import batch_norm
from rllab.core.serializable import Serializable
from rllab.misc import ext
from rllab.policies.base import Policy
class DeterministicMLPPolicy(Policy, LasagnePowered, Serializable):
def __init__(
self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.rectify,
hidden_W_init=LI.HeUniform(),
hidden_b_init=LI.Constant(0.),
output_nonlinearity=NL.tanh,
output_W_init=LI.Uniform(-3e-3, 3e-3),
output_b_init=LI.Uniform(-3e-3, 3e-3),
bn=False):
Serializable.quick_init(self, locals())
l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim))
l_hidden = l_obs
if bn:
l_hidden = batch_norm(l_hidden)
for idx, size in enumerate(hidden_sizes):
l_hidden = L.DenseLayer(
l_hidden,
num_units=size,
W=hidden_W_init,
b=hidden_b_init,
nonlinearity=hidden_nonlinearity,
name="h%d" % idx
)
if bn:
l_hidden = batch_norm(l_hidden)
l_output = L.DenseLayer(
l_hidden,
num_units=env_spec.action_space.flat_dim,
W=output_W_init,
b=output_b_init,
nonlinearity=output_nonlinearity,
name="output"
)
# Note the deterministic=True argument. It makes sure that when getting
# actions from single observations, we do not update params in the
# batch normalization layers
action_var = L.get_output(l_output, deterministic=True)
self._output_layer = l_output
self._f_actions = ext.compile_function([l_obs.input_var], action_var)
super(DeterministicMLPPolicy, self).__init__(env_spec)
LasagnePowered.__init__(self, [l_output])
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
action = self._f_actions([flat_obs])[0]
return action, dict()
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
return self._f_actions(flat_obs), dict()
def get_action_sym(self, obs_var):
return L.get_output(self._output_layer, obs_var)
|
466295
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
from courses.models import Course
from twython import Twython
from twython.exceptions import TwythonError
from telegram import Bot, ParseMode
@receiver(post_save, sender=Course)
def send_message_telegram(sender, instance, **kwargs):
bot = Bot(token=settings.TELEGRAM_TOKEN)
bot.send_message(
chat_id="@udemy_free_courses",
text=instance.message,
parse_mode=ParseMode.HTML
)
@receiver(post_save, sender=Course)
def send_message_twitter(sender, instance, **kwargs):
twitter = Twython(settings.TWITTER_API_KEY,
settings.TWITTER_API_SECRET,
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_TOKEN_SECRET)
try:
twitter.update_status(status=instance.message)
except TwythonError as error:
if error.error_code == 403:
pass
|
466343
|
from dolfin import *
from dolfin_adjoint import *
import windse
import numpy as np
import copy
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
parameters['form_compiler']['quadrature_degree'] = 6
set_log_level(15)
### Create an Instance of the Options ###
windse.initialize("params3D.yaml")
### Generate Simple Domain ###
dom = windse.BoxDomain()
dom.Save(val=0)
### Warp Mesh. This is always fine since the deltaZ doesn't change with x ###
dom.Warp(200,0.75)
dom.Save(val=1)
### Refine in the middle. This creates hanging nodes. ###
region = [[-100,100],[-100,100],[0,150]]
dom.Refine(1,region=region)
dom.Save(val=2)
# # dom.boundary_markers = MeshFunction("double", dom.mesh, dom.mesh.topology().dim() - 1)
# for facet in facets(dom.mesh):
# ind = facet.index()
# if dom.boundary_markers[ind] > 6:
# dom.boundary_markers.set_value(ind,0)
### Save a copy of the mesh for returning to this state ###
x_temp = copy.deepcopy(dom.mesh.coordinates()[:,0])
y_temp = copy.deepcopy(dom.mesh.coordinates()[:,1])
z_temp = copy.deepcopy(dom.mesh.coordinates()[:,2])
### Define the transformation. It has very large gradients
def bed(x,y):
return -100*np.sin(np.pi*x/250.0)*np.sin(np.pi*y/250.0)
# return 150*np.sin(np.pi*x/400.0)
def transform(x,y,z,z0,z1):
return (z1-bed(x,y))*(z-z0)/(z1-z0)+bed(x,y)
### Deform the mesh by directly changing the mesh coordinates
z_new = transform(x_temp,y_temp,z_temp,0.,500.)
dom.mesh.coordinates()[:,0]=x_temp
dom.mesh.coordinates()[:,1]=y_temp
dom.mesh.coordinates()[:,2]=z_new
dom.mesh.bounding_box_tree().build(dom.mesh)
dom.Save(val=3)
### Return to the original mesh ###
dom.mesh.coordinates()[:,0]=x_temp
dom.mesh.coordinates()[:,1]=y_temp
dom.mesh.coordinates()[:,2]=z_temp
dom.mesh.bounding_box_tree().build(dom.mesh)
dom.Save(val=4)
### Create a boundary mesh ###
b_mesh = BoundaryMesh(dom.mesh,"exterior")
### Move the boundary mesh ###
x_hd = copy.deepcopy(b_mesh.coordinates()[:,0])
y_hd = copy.deepcopy(b_mesh.coordinates()[:,1])
z_hd = copy.deepcopy(b_mesh.coordinates()[:,2])
z_hd = transform(x_hd,y_hd,z_hd,0.,500.)
b_mesh.coordinates()[:,0]=x_hd
b_mesh.coordinates()[:,1]=y_hd
b_mesh.coordinates()[:,2]=z_hd
b_mesh.bounding_box_tree().build(b_mesh)
plot(b_mesh)
plt.show()
### Move mesh using a hd boundary mesh ###
ALE.move(dom.mesh,b_mesh)
dom.Save(val=5)
|
466353
|
from flask import Flask, render_template, request, redirect, url_for, session
from tinydb import TinyDB, Query
from mastodon import Mastodon
from wordcloud import WordCloud
from datetime import datetime
from numpy.random import *
from xml.sax.saxutils import unescape
import re
import json
import requests
import MeCab
app = Flask(__name__)
app.config.from_object('config')
db = TinyDB('db.json')
qwy = Query()
m = MeCab.Tagger()
target_hinshi = ['名詞', '形容詞', '形容動詞']
exclude = ['非自立', '接尾']
with open("stopwordlist.txt") as f:
swl = [s.strip() for s in f.readlines()]
def register_app(host):
data = {
'client_name': 'TootCloud',
'redirect_uris': app.config['SITE_URL'] + '/callback',
'scopes': 'read:accounts read:statuses write:media write:statuses',
'website': app.config['SITE_URL']
}
resp = requests.post("https://{host}/api/v1/apps".format(host=host), data=data, headers={'User-Agent': "TootCloud"})
resp.raise_for_status()
return resp.json()
def get_token(host, client_id, client_secret, code):
data = {
'grant_type': 'authorization_code',
'redirect_uri': app.config['SITE_URL'] + '/callback',
'client_id': client_id,
'client_secret': client_secret,
'code': code
}
resp = requests.post("https://{host}/oauth/token".format(host=host), data=data, headers={'User-Agent': "TootCloud"})
resp.raise_for_status()
return resp.json()
def checkStatus():
mstdn = Mastodon(
client_id = session['client_id'],
client_secret = session['client_secret'],
access_token = session['access_token'],
api_base_url = session['uri'])
account = mstdn.account_verify_credentials()
id = account["id"]
acct = account["acct"]
scnt = account["statuses_count"]
return(id, scnt, acct)
def reform(text):
text = re.sub(":\w+:", "", text)
text = re.sub("</?p>", "", text)
text = re.sub("<a href=\".*\".*>(.*)</a>", "", text)
text = re.sub("</?span.*>", "", text)
text = re.sub("</?div.*>", "", text)
text = re.sub("<br\s?/?>", '\n', text)
text = unescape(text, {''': '\'', '"': '"'})
return(text)
def getToots(id, lim, max, vis=["public"]):
text = ""
mstdn = Mastodon(
client_id = session['client_id'],
client_secret = session['client_secret'],
access_token = session['access_token'],
api_base_url = session['uri'])
ltl = mstdn.account_statuses(id, limit=lim, max_id=max)
for row in ltl:
if row["reblog"] == None:
if row["visibility"] in vis:
text += reform(row["content"]) + "\n"
toot_id = row["id"]
return(text, toot_id)
def create_at(time):
id = int(time) * 1000 + randint(1000)
id = id << 16
id += randint(2**16)
return(id)
def wc(ttl, vis, exl):
t = ttl
check = checkStatus()
print(check)
if check[1] < t:
t = check[1]
id = check[0]
toots = ""
max = None
while t > 0:
print(t, max)
if t > 40:
data = getToots(id, 40, max, vis)
else:
data = getToots(id, t, max, vis)
t -= 40
# print(data[0])
toots += data[0]
max = int(data[1]) - 1
kekka = ""
for chunk in m.parse(toots).splitlines()[:-1]:
(surface, feature) = chunk.split('\t')
if feature.split(',')[0] in target_hinshi:
if feature.split(',')[1] not in exl:
if feature.split(',')[0] == '名詞':
if surface not in exl:
kekka += surface + "\n"
else:
if feature.split(',')[6] not in exl:
kekka += feature.split(',')[6] + "\n"
if kekka == "":
return None
else:
wordcloud = WordCloud(background_color="white", font_path="./Kazesawa-Regular.ttf", width=1024, height=768, collocations=False, stopwords="").generate(kekka)
fn = create_at(datetime.now().strftime("%s"))
wordcloud.to_file("./static/out/"+str(fn)+".png")
return(fn)
@app.route('/')
def index():
return render_template('index.html', site_url=app.config['SITE_URL'])
@app.route('/login', methods=['GET', 'POST'])
def login():
if session.get('access_token') is not None:
return redirect(url_for('setting'))
else:
try:
instance = request.form['instance']
except:
instance = ""
if instance != "":
instance = re.sub(r'https?://', "", instance)
instance = re.sub(r'/$', "", instance)
instance = instance.encode('idna').decode('utf-8')
try:
gotjson = json.loads(requests.get("https://"+instance+"/api/v1/instance", headers={'User-Agent': "TootCloud"}).text)
if gotjson['uri'] == instance:
client_data = db.search(qwy.uri == instance)
if len(client_data) == 0:
rspns = register_app(instance)
db.insert({'uri': instance, 'id': rspns['id'], 'client_id': rspns['client_id'], 'client_secret': rspns['client_secret']})
client_data = db.search(qwy.uri == instance)
client_data = client_data[0]
session['uri'] = instance
session['client_id'] = client_data['client_id']
session['client_secret'] = client_data['client_secret']
return render_template('login2.html', status="back", site_url=app.config['SITE_URL'])
else:
return render_template('login.html', status="back", login="false", site_url=app.config['SITE_URL'])
except:
return render_template('login.html', status="back", login="false", site_url=app.config['SITE_URL'])
else:
return render_template('login.html', status="back", site_url=app.config['SITE_URL'])
@app.route('/callback')
def callback():
code = request.args.get('code')
tkn = get_token(session['uri'], session['client_id'], session['client_secret'], code)
session['access_token'] = tkn['access_token']
return redirect(url_for('setting'))
@app.route('/setting')
def setting():
if session.get('access_token') is None:
return redirect(url_for('login'))
else:
session['acct'] = checkStatus()[2]
return render_template('setting.html', status="logout", site_url=app.config['SITE_URL'])
@app.route('/result', methods=['POST'])
def result():
if session.get('access_token') is None:
return redirect(url_for('login'))
else:
if request.method == 'POST':
num = int(request.form["TootsNum"])
vis = request.form.getlist("visibility")
ex_opt = len(request.form.getlist("defaultlist"))
if ex_opt == 1:
exl = swl
else:
exl = []
ex = request.form["exlist"]
exl.extend(re.split('\W+', ex))
filename = wc(num, vis, exl)
if filename == None:
return render_template('setting.html', status="logout", site_url=app.config['SITE_URL'], error="notext")
else:
return render_template('result.html', status="logout", filename=filename, site_url=app.config['SITE_URL'])
else:
return redirect(url_for('setting'))
@app.route('/toot', methods=['POST'])
def toot():
img = request.args.get('img')
text = request.form['maintext']
vsbl = request.form['visibility']
cw = bool(request.form.getlist('sensitive'))
mstdn = Mastodon(
client_id = session['client_id'],
client_secret = session['client_secret'],
access_token = session['access_token'],
api_base_url = session['uri'])
media_path = "./static/out/" + img + ".png"
image = mstdn.media_post(media_path)
media_files = [image]
status = mstdn.status_post(status=text, media_ids=media_files, visibility=vsbl, sensitive=cw)
url = status['url']
return render_template('toot.html', toot_url=url, status="logout", site_url=app.config['SITE_URL'])
@app.route('/logout')
def logout():
session.pop('uri', None)
session.pop('client_id', None)
session.pop('client_secret', None)
session.pop('access_token', None)
return redirect(url_for('index'))
if __name__ == '__main__':
app.run()
|
466379
|
import threading
def split_processing(ports, num_splits, scan, range_low, range_high):
split_size = (range_high-range_low) // num_splits
threads = []
for i in range(num_splits):
# determine the indices of the list this thread will handle
start = i * split_size
# special case on the last chunk to account for uneven splits
end = range_high if i+1 == num_splits else (i+1) * split_size
# create the thread
threads.append(
threading.Thread(target=scan, args=(ports, start, end)))
threads[-1].start() # start the thread we just created
# wait for all threads to finish
for t in threads:
t.join()
|
466395
|
import adafruit_irremote
import board
import digitalio
import neopixel
import pulseio
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10)
red_led = digitalio.DigitalInOut(board.D13)
red_led.direction = digitalio.Direction.OUTPUT
pulsein = pulseio.PulseIn(board.REMOTEIN, maxlen=120, idle_state=True)
decoder = adafruit_irremote.GenericDecode()
# among others, this example works with the Adafruit mini IR remote:
# https://www.adafruit.com/product/389
# size must match what you are decoding! for NEC use 4
received_code = bytearray(4)
# IR Remote Mapping
'''
1: [255, 2, 247, 8]
2: [255, 2, 119, 136]
3: [255, 2, 183, 72]
4: [255, 2, 215, 40]
5: [255, 2, 87, 168]
6: [255, 2, 151, 104]
7: [255, 2, 231, 24]
8: [255, 2, 103, 152]
9: [255, 2, 167, 88]
0: [255, 2, 207, 48]
^ : [255, 2, 95, 160]
v : [255, 2, 79, 176]
> : [255, 2, 175, 80]
< : [255, 2, 239, 16]
Enter: [255, 2, 111, 144]
Setup: [255, 2, 223, 32]
Stop/Mode: [255, 2, 159, 96]
Back: [255, 2, 143, 112]
Vol - : [255, 2, 255, 0]
Vol + : [255, 2, 191, 64]
Play/Pause: [255, 2, 127, 128]
'''
RED = (255, 0, 0)
GREEN = (0, 255, 0)
WHITE = (85, 85, 85)
BLUE = (0, 0, 255)
PINK = (128, 0, 128)
YELLOW = (148, 108, 0)
PURPLE = (200, 0, 55)
TEAL = (0, 200, 100)
ORANGE = (100, 45, 0)
BLACK = (0, 0, 0)
last_command = None
while True:
red_led.value = False
try:
pulses = decoder.read_pulses(pulsein)
except MemoryError as e:
print("Memory error: ", e)
continue
red_led.value = True
print("Heard", len(pulses), "Pulses:", pulses)
command = None
try:
code = decoder.decode_bits(pulses, debug=False)
if len(code) > 3:
command = code[2]
print("Decoded:", code)
except adafruit_irremote.IRNECRepeatException: # unusual short code!
print("NEC repeat!")
command = last_command
except adafruit_irremote.IRDecodeException as e: # failed to decode
print("Failed to decode:", e)
except MemoryError as e:
print("Memory error: ", e)
if not command:
continue
last_command = command
print("----------------------------")
red_led.value = False
if command == 247: # IR button 1
pixels.fill(RED)
elif command == 119: # 2
pixels.fill(GREEN)
elif command == 183: # 3
pixels.fill(WHITE)
elif command == 215: # 4
pixels.fill(BLUE)
elif command == 87: # 5
pixels.fill(PINK)
elif command == 151: # 6
pixels.fill(YELLOW)
elif command == 231: # 7
pixels.fill(PURPLE)
elif command == 103: # 8
pixels.fill(TEAL)
elif command == 167: # 9
pixels.fill(ORANGE)
elif command == 207:
pixels.fill(BLACK) # 0/10+
|
466445
|
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import tensorflow as tf
import datetime
import scipy.io as sio
import math
import time
from matplotlib.pyplot import pause
import os
import glob
from tensorflow.keras import layers
from tensorflow.keras import models
import warnings
class CFA_process:
def __init__(self, devices, ii_saved_local, neighbors, federated=True, graph=0):
self.federated = federated # true for federation active
self.devices = devices # number of devices
self.ii_saved_local = ii_saved_local # device index
self.neighbors = neighbors # neighbors number (given the network topology)
self.graph = graph
self.training_end = False
if graph == 0: # use k-degree network
self.neighbor_vec = self.get_connectivity(ii_saved_local, neighbors, devices) # neighbor list
else:
mat_content = self.getMobileNetwork_connectivity(self.ii_saved_local, self.neighbors, self.devices, 0)
self.neighbor_vec = np.asarray(mat_content[0], dtype=int)
def get_neighbor_weights(self, epoch_count, outfile, outfile_models, epoch=0, max_lag=1):
warnings.filterwarnings("ignore")
success = False
# max_lag = 30 # default 30
stop_federation = False
# neighbor model and stats (train variables)
#outfile_models = 'results/dump_train_model{}.npy'.format(neighbor)
#outfile = 'results/dump_train_variables{}.npz'.format(neighbor)
while not os.path.isfile(outfile):
print("waiting for variables")
pause(1)
try:
dump_vars = np.load(outfile, allow_pickle=True)
neighbor_epoch_count = dump_vars['epoch_count']
self.training_end = dump_vars['training_end']
except:
pause(5)
print("retrying opening variables")
try:
dump_vars = np.load(outfile, allow_pickle=True)
neighbor_epoch_count = dump_vars['epoch_count']
self.training_end = dump_vars['training_end']
except:
print("halting federation")
stop_federation = True
pause(round(np.random.random(), 2))
# check file and updated neighbor frame count, max lag
if not stop_federation:
while not os.path.isfile(outfile_models) or neighbor_epoch_count < epoch_count - max_lag and not self.training_end:
# implementing consensus
# print("neighbor frame {} local frame {}, device {} neighbor {}".format(neighbor_frame_count, frame_count, self.ii_saved_local, neighbor[q]))
pause(1)
try:
dump_vars = np.load(outfile, allow_pickle=True)
neighbor_epoch_count = dump_vars['epoch_count']
self.training_end = dump_vars['training_end']
except:
pause(2)
print("retrying opening variables")
try:
dump_vars = np.load(outfile, allow_pickle=True)
neighbor_epoch_count = dump_vars['epoch_count']
self.training_end = dump_vars['training_end']
except:
print("problems loading variables")
# load neighbor model
try:
neighbor_model = np.load(outfile_models, allow_pickle=True)
success = True
except:
pause(5)
print("retrying opening model")
try:
neighbor_model = np.load(outfile_models, allow_pickle=True)
success = True
except:
print("failed to load model federation")
neighbor_model = []
else:
neighbor_model = []
return neighbor_model, success
def getMobileNetwork_connectivity(self, ii_saved_local, neighbors, devices, epoch):
graph_index = sio.loadmat('consensus/vGraph.mat')
dev = np.arange(1, devices + 1)
graph_mobile = graph_index['graph']
set = graph_mobile[ii_saved_local, :, epoch]
tot_neighbors = np.sum(set, dtype=np.uint8)
sets_neighbors_final = np.zeros(tot_neighbors, dtype=np.uint8)
counter = 0
for kk in range(devices):
if set[kk] == 1:
sets_neighbors_final[counter] = kk
counter = counter + 1
return sets_neighbors_final
def get_connectivity(self, ii_saved_local, neighbors, devices):
saved_neighbors = neighbors
if neighbors < 2:
neighbors = 2 # set minimum to 2 neighbors
if (ii_saved_local == 0):
sets_neighbors_final = np.arange(ii_saved_local + 1, ii_saved_local + neighbors + 1)
elif (ii_saved_local == devices - 1):
sets_neighbors_final = np.arange(ii_saved_local - neighbors, ii_saved_local)
elif (ii_saved_local >= math.ceil(neighbors / 2)) and (
ii_saved_local <= devices - math.ceil(neighbors / 2) - 1):
sets_neighbors = np.arange(ii_saved_local - math.floor(neighbors / 2),
ii_saved_local + math.floor(neighbors / 2) + 1)
index_ii = np.where(sets_neighbors == ii_saved_local)
sets_neighbors_final = np.delete(sets_neighbors, index_ii)
else:
if (ii_saved_local - math.ceil(neighbors / 2) < 0):
sets_neighbors = np.arange(0, neighbors + 1)
else:
sets_neighbors = np.arange(devices - neighbors - 1, devices)
index_ii = np.where(sets_neighbors == ii_saved_local)
sets_neighbors_final = np.delete(sets_neighbors, index_ii)
if saved_neighbors < 2:
if ii_saved_local > 0:
neighbors_final = ii_saved_local - 1
else:
neighbors_final = devices - 1
else:
neighbors_final = sets_neighbors_final
return neighbors_final
def get_tx_connectivity(self, ii_saved_local, neighbors, devices):
saved_neighbors = neighbors
if neighbors < 2:
neighbors = 2 # set minimum to 2 neighbors
if (ii_saved_local == 0):
sets_neighbors_final = np.arange(ii_saved_local + 1, ii_saved_local + neighbors + 1)
elif (ii_saved_local == devices - 1):
sets_neighbors_final = np.arange(ii_saved_local - neighbors, ii_saved_local)
elif (ii_saved_local >= math.ceil(neighbors / 2)) and (
ii_saved_local <= devices - math.ceil(neighbors / 2) - 1):
sets_neighbors = np.arange(ii_saved_local - math.floor(neighbors / 2),
ii_saved_local + math.floor(neighbors / 2) + 1)
index_ii = np.where(sets_neighbors == ii_saved_local)
sets_neighbors_final = np.delete(sets_neighbors, index_ii)
else:
if (ii_saved_local - math.ceil(neighbors / 2) < 0):
sets_neighbors = np.arange(0, neighbors + 1)
else:
sets_neighbors = np.arange(devices - neighbors - 1, devices)
index_ii = np.where(sets_neighbors == ii_saved_local)
sets_neighbors_final = np.delete(sets_neighbors, index_ii)
if saved_neighbors < 2:
if ii_saved_local == self.devices - 1:
neighbors_final = 0
else:
neighbors_final = ii_saved_local + 1
else:
neighbors_final = sets_neighbors_final
return neighbors_final
def federated_weights_computing(self, neighbor, neighbors, epoch_count, eps_t_control, epoch=0, max_lag=30):
warnings.filterwarnings("ignore")
# max_lag = 30 # default 30
stop_federation = False
old_weights = self.local_weights
neighbor_weights = []
# seqc = random.sample(range(self.devices), self.active)
if neighbors > 1:
for q in range(neighbors):
outfile_models = 'results/dump_train_model{}.npy'.format(neighbor[q])
outfile = 'results/dump_train_variables{}.npz'.format(neighbor[q])
weight_n, success = self.get_neighbor_weights(epoch_count, outfile, outfile_models, epoch=0, max_lag=1)
if success:
neighbor_weights.append(weight_n)
if self.training_end and len(neighbor_weights) > 0:
# one of the neighbors solved the optimization, apply transfer learning
break
else:
outfile_models = 'results/dump_train_model{}.npy'.format(neighbor)
outfile = 'results/dump_train_variables{}.npz'.format(neighbor)
weight_n, success = self.get_neighbor_weights(epoch_count, outfile, outfile_models, epoch=0, max_lag=1)
if success:
neighbor_weights.append(weight_n)
if len(neighbor_weights) > 0:
eps_t_control = 1 / (len(neighbor_weights) + 1) # overwrite
for q in range(len(neighbor_weights)):
if self.training_end:
print("detected training end")
# it is reasonable to replace local model with the received one as succesful, stop model averaging with other neighbors
for k in range(self.layers):
self.local_weights[k] = neighbor_weights[-1][k]
break
else: # apply model averaging
for k in range(self.layers):
self.local_weights[k] = self.local_weights[k] + eps_t_control*(neighbor_weights[q][k]-self.local_weights[k])
# self.local_weights[k] = self.local_weights[k] + eps_t_control * (neighbor_weights[k] - self.local_weights[k])
del neighbor_weights
return self.local_weights.tolist()
def federated_grads_computing(self, neighbor, neighbors, epoch_count, eps_t_control, max_lag=1):
warnings.filterwarnings("ignore")
# max_lag = 30 # default 30
neighbor_grads = []
# seqc = random.sample(range(self.devices), self.active)
if neighbors > 1:
for q in range(neighbors):
# neighbor model and stats (train variables)
outfile = 'results/dump_train_variables{}.npz'.format(neighbor[q])
outfile_models_grad = 'results/dump_train_grad{}.npy'.format(neighbor[q])
weight_n, success = self.get_neighbor_weights(epoch_count, outfile,
outfile_models_grad, epoch=0, max_lag=1)
if success:
neighbor_grads.append(weight_n)
if self.training_end and len(neighbor_grads) > 0:
# one of the neighbors solved the optimization, apply transfer learning
break
else:
# neighbor model and stats (train variables)
outfile = 'results/dump_train_variables{}.npz'.format(neighbor)
outfile_models_grad = 'results/dump_train_grad{}.npy'.format(neighbor)
weight_n, success = self.get_neighbor_weights(epoch_count, outfile,
outfile_models_grad, epoch=0, max_lag=1)
if success:
neighbor_grads.append(weight_n)
if len(neighbor_grads) > 0:
# eps_t_control = 1 / (len(neighbor_grads) + 1) # overwrite
for q in range(len(neighbor_grads)):
# apply model averaging
for k in range(self.layers):
self.local_gradients[k] = self.local_gradients[k] + eps_t_control * (
neighbor_grads[q][k] - self.local_gradients[k])
del neighbor_grads
grads_out = []
for ii in range(self.layers):
grads_out.append(tf.convert_to_tensor(self.local_gradients[ii]))
return grads_out
def getTrainingStatusFromNeightbor(self):
return self.training_end
def update_local_target_model(self, model):
self.local_weights = model
self.layers = self.local_weights.size
def update_local_gradient(self, gradients):
self.local_gradients = gradients
def update_local_model(self, model):
self.local_weights = model
self.layers = self.local_weights.size
|
466471
|
import tensorflow as tf
import config
import os
from urllib.request import urlretrieve
from zipfile import ZipFile
from dataset.dataset import Dataset
from network.eval import Learning
FLAGS = tf.app.flags.FLAGS
data_dir = config.data_dir
tmp_zip_adr = config.tmp_zip_adr
dataset_urls = config.dataset_urls
def download_dataset_if_needed():
def download_and_unzip(zipurls):
for url in zipurls:
print("Downloading {}".format(url))
fpath, _ = urlretrieve(url, tmp_zip_adr)
zf = ZipFile(fpath)
zf.extractall(data_dir)
zf.close()
os.remove(fpath)
print("Dataset downloaded into 'dataset/data' folder")
if not os.path.exists(data_dir) or FLAGS.download:
os.makedirs(data_dir)
print("Downloading dataset")
download_and_unzip(dataset_urls)
def main(argv=None):
download_dataset_if_needed()
if FLAGS.update or not os.path.exists(data_dir + 'segmented_set1'):
print("Starting processing binary dataset")
Dataset().create_dataset(data_dir + "segmented_set?/*.avi")
Learning()
if __name__ == '__main__':
tf.app.run()
|
466500
|
import logging
import os
import shutil
import subprocess
import tempfile
import uuid
import panda3d.core as pc
from direct.directtools.DirectGrid import DirectGrid
from direct.showbase import ShowBaseGlobal
from panda3d.core import ConfigVariableBool
from pubsub import pub
import p3d
from p3d.displayShading import DisplayShading
from p3d.editorCamera import EditorCamera
from p3d.frameRate import FrameRate
from p3d.mouse import MOUSE_ALT
from pandaEditor import constants
from pandaEditor.ui.mainFrame import MainFrame
from pandaEditor import actions, commands, gizmos
from pandaEditor.assetManager import AssetManager
from pandaEditor.dragdropmanager import DragDropManager
from scene import Scene
from pandaEditor.project import Project
from pandaEditor.selection import Selection
from pandaEditor.ui.document import Document
from pandaEditor.game.showbase import ShowBase as GameShowBase
from pandaEditor.nodes.manager import Manager as NodeManager
from pandaEditor.plugins.manager import Manager as PluginManager
from pandaEditor.sceneparser import SceneParser
logger = logging.getLogger(__name__)
class ShowBase(GameShowBase):
node_manager_cls = NodeManager
plug_manager_cls = PluginManager
scene_parser_cls = SceneParser
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if ConfigVariableBool('no_ui', False):
return
self.forcedAspectWins = []
# Create our managers.
self.asset_manager = AssetManager()
self.drag_drop_manager = DragDropManager()
self.action_manager = actions.Manager()
self.startWx()
self.frame = MainFrame(self, None, size=(800, 600))
self.frame.Show()
self.frame.pnlViewport.Initialize()
self.gizmo = False
self._xformTask = None
# Bind publisher events
pub.subscribe(self.OnUpdate, 'Update')
self.SetupEdRender()
self.SetupEdRender2d()
self.SetupEdMouseWatcher()
self.SetupEdCamera()
# Make additional camera for 2d nodes
cam2d = self.makeCamera2d(self.win)
cam2d.reparentTo(self.edRender2d)
# Add the editor window, camera and pixel 2d to the list of forced
# aspect windows so aspect is fixed when the window is resized.
self.forcedAspectWins.append((self.win, self.edCamera, self.edPixel2d))
self.reset()
# Create project manager
self.project = Project(self)
self.frame.SetProjectPath(self.frame.cfg.Read('projDirPath'))
# Create grid
self.SetupGrid()
# Create frame rate meter
self.frameRate = FrameRate()
# Create shading mode keys
dsp = DisplayShading()
dsp.accept('4', dsp.Wireframe)
dsp.accept('5', dsp.Shade)
dsp.accept('6', dsp.Texture)
# Set up gizmos
self.SetupGizmoManager()
# Bind mouse events
self.accept('mouse1', self.OnMouse1Down)
self.accept('shift-mouse1', self.OnMouse1Down, [True])
self.accept('control-mouse1', self.OnMouse1Down)
self.accept('mouse2', self.OnMouse2Down)
self.accept('mouse1-up', self.OnMouse1Up)
self.accept('mouse2-up', self.OnMouse2Up)
# Create selection manager
self.selection = Selection(
self,
camera=self.edCamera,
root2d=self.edRender2d,
win=self.win,
mouseWatcherNode=self.edMouseWatcherNode
)
# Bind events
self.accept('z', self.action_manager.undo)
self.accept('shift-z', self.action_manager.redo)
self.accept('f', self.frame_selection)
self.accept(
'del',
lambda fn: commands.remove(fn()),
[self.selection.get]
)
self.accept(
'backspace',
lambda fn: commands.remove(fn()),
[self.selection.get]
)
self.accept(
'control-d',
lambda fn: commands.duplicate(fn()),
[self.selection.get]
)
self.accept(
'control-g',
lambda fn: commands.group(fn()),
[self.selection.get]
)
self.accept('control-s', self.frame.OnFileSave, [None])
self.accept(
'arrow_up',
lambda fn: commands.select(fn()),
[self.selection.select_parent]
)
self.accept(
'arrow_down',
lambda fn: commands.select(fn()),
[self.selection.select_child]
)
self.accept(
'arrow_left',
lambda fn: commands.select(fn()),
[self.selection.select_prev]
)
self.accept(
'arrow_right',
lambda fn: commands.select(fn()),
[self.selection.select_next]
)
self.accept('projectFilesModified', self.OnProjectFilesModified)
# Create a "game"
# self.game = EditorBase(self)
self.load_plugins()
self.plugin_manager.on_build_ui()
# Start with a new scene
self.CreateScene()
self.doc.on_refresh()
self.windowEvent(None)
def SetupEdRender(self):
"""
Create editor root node behind render node so we can keep editor only
nodes out of the scene.
"""
self.edRender = pc.NodePath('edRender')
render.reparentTo(self.edRender)
def SetupEdRender2d(self):
"""
Creates the render2d scene graph, the primary scene graph for 2-d
objects and gui elements that are superimposed over the 3-d geometry
in the window.
"""
self.edRender2d = pc.NodePath('edRender2d')
# Set up some overrides to turn off certain properties which we
# probably won't need for 2-d objects.
self.edRender2d.setDepthTest(0)
self.edRender2d.setDepthWrite(0)
self.edRender2d.setMaterialOff(1)
self.edRender2d.setTwoSided(1)
# This special root, pixel2d, uses units in pixels that are relative
# to the window. The upperleft corner of the window is (0, 0),
# the lowerleft corner is (xsize, -ysize), in this coordinate system.
xsize, ysize = self.getSize()
self.edPixel2d = self.edRender2d.attachNewNode(pc.PGTop('edPixel2d'))
self.edPixel2d.setPos(-1, 0, 1)
if xsize > 0 and ysize > 0:
self.edPixel2d.setScale(2.0 / xsize, 1.0, 2.0 / ysize)
def SetupEdMouseWatcher(self):
# Setup mouse watcher for the editor window
buttonThrowers, pointerWatcherNodes = self.setupMouseCB(self.win)
self.edMouseWatcher = buttonThrowers[0].getParent()
self.edMouseWatcherNode = self.edMouseWatcher.node()
self.edMouseWatcherParent = self.edMouseWatcher.getParent()
def SetupEdCamera(self):
# Create editor camera
self.edCamera = EditorCamera(
'camera',
style=p3d.camera.CAM_VIEWPORT_AXES,
orbit_sensitivity=2,
dolly_sensitivity=2,
zoom_sensitivity=3,
pos=(56, 56, 42),
rootNp=self.edRender,
rootP2d=self.edPixel2d,
win=self.win,
mouseWatcherNode=self.edMouseWatcherNode
)
self.edCamera.reparentTo(self.edRender)
self.edCamera.Start()
# Modify the existing display region and create a new one for the
# editor camera.
self.dr = self.cam.node().getDisplayRegion(0)
self.dr.setClearColorActive(True)
self.dr.setClearColor(self.getBackgroundColor())
self.dr.setActive(False)
self.dr.setSort(20)
self.dr2d = self.cam2d.node().getDisplayRegion(0)
self.dr2d.setActive(False)
self.dr2d.setSort(21)
self.edDr = self.win.makeDisplayRegion(0, 1, 0, 1)
self.edDr.setCamera(self.edCamera)
self.edDr.setClearColorActive(True)
self.edDr.setClearColor((0.63, 0.63, 0.63, 0))
def windowEvent(self, *args, **kwargs):
"""
Overridden so as to fix the aspect ratio of the editor camera and
editor pixel2d.
"""
super().windowEvent(*args, **kwargs)
for win, cam, pixel2d in self.forcedAspectWins:
aspectRatio = self.getAspectRatio(win)
cam.node().getLens().setAspectRatio(aspectRatio)
# Fix pixel2d scale for new window size
# Temporary hasattr for old Pandas
if not hasattr(win, 'getSbsLeftXSize'):
pixel2d.setScale(2.0 / win.getXSize(), 1.0, 2.0 / win.getYSize())
else:
pixel2d.setScale(2.0 / win.getSbsLeftXSize(), 1.0, 2.0 / win.getSbsLeftYSize())
def GetEditorRenderMasks(self):
"""
Return the show, hide and clear masks for objects that are to be
rendered only in the editor viewport.
"""
show = pc.BitMask32()
show.setRangeTo(True, 28, 4)
hide = pc.BitMask32().allOn()
hide.setRangeTo(False, 28, 4)
clear = pc.BitMask32()
return show, hide, clear
def SetupCameraMask(self):
"""
Set camera mask to draw all objects but those with the first four bits
flipped. All editor geometry will use these bits so as to not be
rendered in the game view.
"""
bits = self.cam.node().getCameraMask()
bits.setRangeTo(False, 28, 4)
self.cam.node().setCameraMask(bits)
# Set edRender mask
self.edRender.node().adjustDrawMask(*self.GetEditorRenderMasks())
def SetupRenderMask(self):
"""
Set the draw mask for the render node to be visible to all cameras.
Since we are adjusting the draw mask of the render node's parent we
need to manually set this node's mask or it will inherit those
properties.
"""
showMask = pc.BitMask32().allOn()
hideMask = pc.BitMask32()
clearMask = pc.BitMask32()
render.node().adjustDrawMask(showMask, hideMask, clearMask)
def reset(self):
"""Remove all default nodes and recreate them."""
# Remove all default nodes and set them to None so they are recreated
# properly.
for name in ('cam', 'camera', 'cam2d', 'camera2d'):
np = getattr(self, name)
np.removeNode()
setattr(self, name, None)
# Set up render and render2d again, forcing their new values into
# builtins.
self.setupRender()
# This is kinda lame imho. These default nodes are created by importing
# the showbase global module, which makes it difficult to recreate these
# nodes for our purposes.
render2d = pc.NodePath('render2d')
aspect2d = render2d.attachNewNode(pc.PGTop('aspect2d'))
ShowBaseGlobal.render2d = render2d
ShowBaseGlobal.aspect2d = aspect2d
self.setupRender2d()
__builtins__['render'] = self.render
__builtins__['render2d'] = self.render2d
__builtins__['aspect2d'] = self.aspect2d
__builtins__['pixel2d'] = self.pixel2d
self.makeCamera(self.win)
self.makeCamera2d(self.win)
__builtins__['camera'] = self.camera
for cam, dr in {self.cam:self.dr, self.cam2d:self.dr2d}.items():
defaultDr = cam.node().getDisplayRegion(0)
self.win.removeDisplayRegion(defaultDr)
dr.setCamera(cam)
# Set up masks
self.SetupCameraMask()
self.SetupRenderMask()
# Set auto shader.
render.setShaderAuto()
def ResetModelPath(self):
"""
Clears the model path, making sure to restore the current working
directory (so editor models can still be found).
"""
pc.getModelPath().clear()
pc.getModelPath().prependDirectory('.')
def DisableEditorMouse(self):
self.edMouseWatcher.detachNode()
def EnableEditorMouse(self):
self.edMouseWatcher.reparentTo(self.edMouseWatcherParent)
def LayoutGameView(self):
"""Deactivate both display regions and enable mouse."""
self.DisableEditorMouse()
self.dr.setActive(True)
self.dr.setDimensions(0, 1, 0, 1)
self.dr2d.setActive(True)
self.dr2d.setDimensions(0, 1, 0, 1)
self.edRender2d.hide()
self.edPixel2d.hide()
def LayoutEditorView(self):
"""Deactivate both display regions and enable mouse."""
self.EnableEditorMouse()
self.dr.setActive(False)
self.dr2d.setActive(False)
self.edDr.setActive(True)
self.edRender2d.show()
self.edPixel2d.show()
def LayoutBothView(self):
"""Deactivate both display regions and enable mouse."""
self.EnableEditorMouse()
self.dr.setActive(True)
self.dr.setDimensions(0.65, 1, 0.65, 1)
self.dr2d.setActive(True)
self.dr2d.setDimensions(0.65, 1, 0.65, 1)
self.edDr.setActive(True)
self.edRender2d.show()
self.edPixel2d.show()
def SetupGrid(self):
"""Create the grid and set up its appearance."""
self.grid = DirectGrid(
gridSize=20.0,
gridSpacing=1.0,
planeColor=(0.5, 0.5, 0.5, 0.0),
parent=self.edRender
)
self.grid.snapMarker.hide()
self.grid.centerLines.setColor((0, 0, 0, 0))
self.grid.centerLines.setThickness(2)
self.grid.majorLines.setColor((0.25, 0.25, 0.25, 0))
self.grid.majorLines.setThickness(1)
self.grid.minorLines.setColor((0.5, 0.5, 0.5, 0))
self.grid.updateGrid()
def SetupGizmoManager(self):
"""Create gizmo manager."""
gizmoMgrRootNp = self.edRender.attachNewNode('gizmoManager')
kwargs = {
'camera':self.edCamera,
'rootNp':gizmoMgrRootNp,
'win':self.win,
'mouseWatcherNode':self.edMouseWatcherNode
}
self.gizmoMgr = gizmos.Manager(**kwargs)
self.gizmoMgr.AddGizmo(gizmos.Translation('pos', **kwargs))
self.gizmoMgr.AddGizmo(gizmos.Rotation('rot', **kwargs))
self.gizmoMgr.AddGizmo(gizmos.Scale('scl', **kwargs))
# Bind gizmo manager events
self.accept('q', self.SetActiveGizmo, [None])
self.accept('w', self.SetActiveGizmo, ['pos'])
self.accept('e', self.SetActiveGizmo, ['rot'])
self.accept('r', self.SetActiveGizmo, ['scl'])
self.accept('space', self.ToggleGizmoLocal)
self.accept('+', self.gizmoMgr.SetSize, [2])
self.accept('-', self.gizmoMgr.SetSize, [0.5])
def SetActiveGizmo(self, name):
self.gizmoMgr.SetActiveGizmo(name)
self.frame.OnUpdateXform(None)
def SetGizmoLocal(self, val):
self.gizmoMgr.SetLocal(val)
self.frame.OnUpdateXform(None)
def ToggleGizmoLocal(self):
self.gizmoMgr.ToggleLocal()
self.frame.OnUpdateXform(None)
def OnMouse1Down(self, shift=False):
"""
Handle mouse button 1 down event. Start the drag select operation if
a gizmo is not being used and the alt key is not down, otherwise start
the transform operation.
"""
if (
not self.gizmoMgr.IsDragging() and
MOUSE_ALT not in self.edCamera.modifiers
):
self.selection.StartDragSelect(shift)
elif self.gizmoMgr.IsDragging():
self.StartTransform()
def OnMouse2Down(self):
"""
Handle mouse button 2 down event. Start the transform operation if a
gizmo is being used.
"""
if self.gizmoMgr.IsDragging():
self.StartTransform()
def OnMouse1Up(self):
"""
Handle mouse button 1 up event. Stop the drag select operation if the
marquee is running, otherwise stop the transform operation if a gizmo
is being used.
"""
if self.selection.marquee.IsRunning():
commands.select(self.selection.StopDragSelect())
elif self.gizmoMgr.IsDragging() or self.gizmo:
self.StopTransform()
def OnMouse2Up(self):
"""
Handle mouse button 2 up event. Stop the transform operation if a
gizmo is being used.
"""
if self.gizmoMgr.IsDragging() or self.gizmo:
self.StopTransform()
def StartTransform(self):
"""
Start the transfrom operation by adding a task to constantly send a
selection modified message while transfoming.
"""
self.gizmo = True
self._xformTask = taskMgr.add(self.doc.on_selection_modified,
'SelectionModified')
def StopTransform(self):
"""
Stop the transfrom operation by removing the selection modified
message task. Also create a transform action and push it onto the undo
queue.
"""
# Remove the transform task
if self._xformTask in self.task_mgr.getAllTasks():
self.task_mgr.remove(self._xformTask)
self._xformTask = None
actGizmo = self.gizmoMgr.GetActiveGizmo()
actns = []
comps = []
for i, np in enumerate(actGizmo.attachedNps):
comp = self.node_manager.wrap(np)
comps.append(comp)
actns.append(actions.Transform(comp, np.getTransform(), actGizmo.initNpXforms[i]))
actn = actions.Composite(actns)
self.action_manager.push(actn)
self.gizmo = False
# Make sure to mark the NodePath as dirty in case it is a child of a
# model root.
comp = self.node_manager.wrap(np)
comp.modified = True
# Call OnModified next frame. Not sure why but if we call it straight
# away it causes a small jitter when xforming...
self.task_mgr.doMethodLater(
0,
self.doc.on_modified,
'dragDrop',
[comps]
)
def frame_selection(self):
"""
Call frame selection on the camera if there are some node paths in the
selection.
"""
nps = self.selection.node_paths
if nps:
self.edCamera.Frame(nps)
else:
self.edCamera.Frame([self.scene.rootNp])
def OnUpdate(self, comps=None):
"""
Subscribed to the update selection message. Make sure that the
selected nodes are attached to the managed gizmos, then refresh the
active one.
"""
#nps = self.selection.GetNodePaths()
self.gizmoMgr.AttachNodePaths(self.selection.node_paths)
self.gizmoMgr.RefreshActiveGizmo()
self.selection.update()
def CreateScene(self, filePath=None, newDoc=True):
"""
Create an empty scene and set its root node to the picker's root node.
"""
# Reset undo queue if creating a new document
if newDoc:
self.action_manager.reset()
# Close the current scene if there is one
self.selection.clear()
if hasattr(self, 'scene'):
self.scene.close()
# Create a new scene
self.scene = Scene()
self.scene.rootNp.reparentTo(self.edRender)
# Set the selection and picker root node to the scene's root node
self.selection.rootNp = self.scene.rootNp
self.selection.picker.rootNp = self.scene.rootNp
self.selection.marquee.rootNp = self.scene.rootNp
# Create the document wrapper if creating a new document
if newDoc:
self.doc = Document(filePath, self.scene)
def add_component(self, type_str, *args, **kwargs):
comp_cls = self.node_manager.get_component_by_name(type_str)
comp = comp_cls.create(*args, **kwargs)
comp.parent = comp.default_parent
comp.id = str(uuid.uuid4())
comp.set_default_values()
commands.add([comp])
return comp
def add_prefab(self, file_path):
logger.info(f'Adding prefab: {file_path}')
root_comp = self.node_manager.wrap(self.render)
prefab_comp = self.scene_parser.load(file_path, root_comp)
commands.add([prefab_comp])
return prefab_comp
def OnProjectFilesModified(self, filePaths):
self.asset_manager.on_asset_modified(filePaths)
self.plugin_manager.on_project_modified(filePaths)
def write_bam_file(self, evt):
sel_comps = self.selection.comps
self.selection.clear()
for comp in sel_comps:
model_name = comp.data.get_name()
bam_path = os.path.join(self.project.models_directory, model_name) + '.bam'
comp.data.write_bam_file(pc.Filename.from_os_specific(bam_path))
self.selection.add(sel_comps)
def export_obj(self, evt):
# TODO: Save panda3d runtime location to preferences.
sel_comps = self.selection.comps
self.selection.clear()
for comp in sel_comps:
model_name = comp.data.get_name()
temp_dir_path = tempfile.mkdtemp()
bam_path = os.path.join(temp_dir_path, model_name) + '.bam'
comp.data.write_bam_file(pc.Filename.from_os_specific(bam_path))
bam_to_egg_path = os.path.join(constants.PANDA_3D_RUNTIME_PATH, 'bin', 'bam2egg.exe')
egg_path = os.path.join(temp_dir_path, model_name) + '.egg'
p = subprocess.call(
[bam_to_egg_path, bam_path, egg_path],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
#print(p)
#print(p.communicate())
egg_to_obj_path = os.path.join(constants.PANDA_3D_RUNTIME_PATH, 'bin', 'egg2obj.exe')
obj_path = os.path.join(self.project.models_directory, model_name) + '.obj'
p = subprocess.call(
[egg_to_obj_path, egg_path, obj_path],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
#print(p.communicate())
shutil.rmtree(temp_dir_path)
self.selection.add(sel_comps)
|
466530
|
from curtsies.input import *
def paste():
with Input() as input_generator:
print("If more than %d chars read in same read a paste event is generated" % input_generator.paste_threshold)
for e in input_generator:
print(repr(e))
time.sleep(1)
if __name__ == '__main__':
paste()
|
466598
|
import unittest
from unittest.mock import patch
from requests.exceptions import HTTPError
from my_user import get_users, requests
class TestUsers(unittest.TestCase):
@patch.object(requests, 'get', side_effect=HTTPError)
def test_get_users(self, mock_requests):
with self.assertRaises(HTTPError):
get_users()
if __name__ == '__main__':
unittest.main()
|
466605
|
from abc import abstractmethod
from typing import Dict, Any
from anoncreds.protocol.exceptions import SchemaNotFoundError
from anoncreds.protocol.types import ID, PublicKey, RevocationPublicKey, \
Schema, Tails, Accumulator, \
AccumulatorPublicKey, TimestampType, SchemaKey
class PublicRepo:
# GET
@abstractmethod
async def getSchema(self, schemaId: ID) -> Schema:
raise NotImplementedError
@abstractmethod
async def getPublicKey(self,
schemaId: ID,
signatureType='CL') -> PublicKey:
raise NotImplementedError
@abstractmethod
async def getPublicKeyRevocation(self,
schemaId: ID,
signatureType='CL') -> RevocationPublicKey:
raise NotImplementedError
@abstractmethod
async def getPublicKeyAccumulator(self,
schemaId: ID) -> AccumulatorPublicKey:
raise NotImplementedError
@abstractmethod
async def getAccumulator(self, schemaId: ID) -> Accumulator:
raise NotImplementedError
@abstractmethod
async def getTails(self, schemaId: ID) -> Tails:
raise NotImplementedError
# SUBMIT
@abstractmethod
async def submitSchema(self,
schema: Schema) -> Schema:
raise NotImplementedError
@abstractmethod
async def submitPublicKeys(self,
schemaId: ID,
pk: PublicKey,
pkR: RevocationPublicKey = None,
signatureType='CL') -> (
PublicKey, RevocationPublicKey):
raise NotImplementedError
@abstractmethod
async def submitAccumulator(self, schemaId: ID,
accumPK: AccumulatorPublicKey,
accum: Accumulator, tails: Tails) -> \
AccumulatorPublicKey:
raise NotImplementedError
@abstractmethod
async def submitAccumUpdate(self, schemaId: ID, accum: Accumulator,
timestampMs: TimestampType):
raise NotImplementedError
class PublicRepoInMemory(PublicRepo):
def __init__(self):
self._schemasByKey = {}
self._schemasById = {}
self._pks = {}
self._pkRs = {}
self._accums = {}
self._accumPks = {}
self._tails = {}
self._schemaId = 1
self._pkId = 1
self._pkRId = 1
self._acumPkId = 1
# GET
async def getSchema(self, schemaId: ID) -> Schema:
if schemaId.schemaKey and schemaId.schemaKey in self._schemasByKey:
return self._schemasByKey[schemaId.schemaKey]
if schemaId.schemaId and schemaId.schemaId in self._schemasById:
return self._schemasById[schemaId.schemaId]
raise SchemaNotFoundError(
'No schema with ID={} and key={}'.format(
schemaId.schemaId,
schemaId.schemaKey))
async def getPublicKey(self,
schemaId: ID,
signatureType='CL') -> PublicKey:
return await self._getValueForId(self._pks, schemaId)
async def getPublicKeyRevocation(self,
schemaId: ID,
signatureType='CL') -> RevocationPublicKey:
return await self._getValueForId(self._pkRs, schemaId)
async def getPublicKeyAccumulator(self,
schemaId: ID) -> AccumulatorPublicKey:
return await self._getValueForId(self._accumPks, schemaId)
async def getAccumulator(self, schemaId: ID) -> Accumulator:
return await self._getValueForId(self._accums, schemaId)
async def getTails(self, schemaId: ID) -> Tails:
return await self._getValueForId(self._tails, schemaId)
# SUBMIT
async def submitSchema(self,
schema: Schema) -> Schema:
schema = schema._replace(seqId=self._schemaId)
self._schemaId += 1
self._schemasByKey[schema.getKey()] = schema
self._schemasById[schema.seqId] = schema
return schema
async def submitPublicKeys(self,
schemaId: ID,
pk: PublicKey,
pkR: RevocationPublicKey = None,
signatureType='CL') -> (
PublicKey, RevocationPublicKey):
pk = pk._replace(seqId=self._pkId)
self._pkId += 1
await self._cacheValueForId(self._pks, schemaId, pk)
if pkR:
pkR = pkR._replace(seqId=self._pkRId)
self._pkRId += 1
await self._cacheValueForId(self._pkRs, schemaId, pkR)
return pk, pkR
async def submitAccumulator(self, schemaId: ID,
accumPK: AccumulatorPublicKey,
accum: Accumulator,
tails: Tails) -> AccumulatorPublicKey:
accumPK = accumPK._replace(seqId=self._acumPkId)
self._acumPkId += 1
await self._cacheValueForId(self._accums, schemaId, accum)
accumPk = await self._cacheValueForId(self._accumPks, schemaId,
accumPK)
await self._cacheValueForId(self._tails, schemaId, tails)
return accumPk
async def submitAccumUpdate(self, schemaId: ID, accum: Accumulator,
timestampMs: TimestampType):
await self._cacheValueForId(self._accums, schemaId, accum)
async def _getValueForId(self, dictionary: Dict[SchemaKey, Any],
schemaId: ID) -> Any:
schema = await self.getSchema(schemaId)
schemaKey = schema.getKey()
if schemaKey not in dictionary:
raise ValueError(
'No value for schema with ID={} and key={}'.format(
schemaId.schemaId, schemaId.schemaKey))
return dictionary[schemaKey]
async def _cacheValueForId(self, dictionary: Dict[SchemaKey, Any],
schemaId: ID, value: Any):
schema = await self.getSchema(schemaId)
schemaKey = schema.getKey()
dictionary[schemaKey] = value
|
466629
|
print "Using testing configuration."
SECRET_KEY = '<KEY>'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/wwwhisper_test_db',
}
}
|
466633
|
import sys
sys.path.append(r'../shared')
from decl_grid import *
from numpy import *
from geo import *
from statistics import *
# x = 14
# y = 3
# d_x = 10
# d_y = 10
def optimal_dx_dy(array1, array2, array3, d_x, d_y, min_max, x):
w_m = []
dx_ar = []
dy_ar = []
for dx in range(1, d_x):
for dy in range(1, d_y):
l1 = (min_max[2] - min_max[0])/dx
l2 = (min_max[3] - min_max[1])/dy
array_grid = Grid(min_max[0], min_max[1], dx, dy, l1, l2)
for i in xrange(x):
array_grid.add_point(array1[i], array2[i])
#cell_declustering
w_cell = array_grid.get_weights_cell()
#print "Cell_declustering"
w_cell = stand_weight(w_cell, x)
#print w_cell
w_m.append(w_mean(w_cell, array3))
dx_ar.append(dx)
dy_ar.append(dy)
#print w_m
w_min = min(w_m)
for i in xrange(len(w_m)):
if (w_m[i] == w_min):
i_min = i
#print i_min
#print dx_ar[i_min], dy_ar[i_min]
return dx_ar[i_min], dy_ar[i_min]
|
466665
|
import sys
import os
launcher={ 'shared' : 'env ' + sys.argv[4],
'MPI' : 'mpirun -genv ' + sys.argv[4] + ' -genv DIST_CNC=MPI -n 4',
'SOCKETS': 'env ' + sys.argv[4] + ' DIST_CNC=SOCKETS CNC_SOCKET_HOST=$CNCROOT/share/icnc/misc/distributed/socket/start_batch.sh',
'SHMEM' : 'env ' + sys.argv[4] + ' DIST_CNC=SHMEM'
}
cmd=launcher[sys.argv[2]] + " " + sys.argv[1] + " " + " ".join(sys.argv[5:]) + " > " + sys.argv[3]
#print "run.py: " + cmd + "(" + sys.argv[4] + ")"
os.system(cmd)
|
466670
|
from django.test import TestCase
from abidria.exceptions import EntityDoesNotExistException
from experiences.entities import Experience
from experiences.models import ORMExperience, ORMSave
from experiences.repositories import ExperienceRepo
from people.models import ORMPerson
class ExperienceRepoTestCase(TestCase):
def test_get_all_experiences_with_mine_false(self):
ExperienceRepoTestCase.ScenarioMaker() \
.given_a_person_in_db() \
.given_an_experience_created_by_first_person_in_db() \
.given_another_experience_created_by_first_person_in_db() \
.given_another_person_in_db() \
.given_an_experience_created_by_second_person_in_db() \
.given_another_experience_created_by_second_person_in_db() \
.given_a_third_experience_created_by_second_person_and_saved_by_first() \
.given_a_fourth_experience_created_by_second_person_and_saved_by_second() \
.given_logged_person_id_is_first_person_id() \
.when_get_all_experiences(mine=False) \
.then_repo_should_return_just_second_two_experience_and_fourth_with_saved_mine_false()
def test_get_all_experiences_with_mine_true(self):
ExperienceRepoTestCase.ScenarioMaker() \
.given_a_person_in_db() \
.given_an_experience_created_by_first_person_in_db() \
.given_another_experience_created_by_first_person_in_db() \
.given_another_person_in_db() \
.given_an_experience_created_by_second_person_in_db() \
.given_another_experience_created_by_second_person_in_db() \
.given_logged_person_id_is_first_person_id() \
.when_get_all_experiences(mine=True) \
.then_repo_should_return_just_first_two_experience_with_mine_true()
def test_get_all_experiences_with_saved_true(self):
ExperienceRepoTestCase.ScenarioMaker() \
.given_a_person_in_db() \
.given_an_experience_created_by_first_person_in_db() \
.given_another_experience_created_by_first_person_in_db() \
.given_another_person_in_db() \
.given_an_experience_created_by_second_person_in_db() \
.given_another_experience_created_by_second_person_in_db() \
.given_a_save_to_first_second_person_experience_from_first_person() \
.given_logged_person_id_is_first_person_id() \
.when_get_all_experiences(saved=True) \
.then_repo_should_return_just_first_second_person_experience_with_saved_true()
def test_get_experience_returns_experience(self):
ExperienceRepoTestCase.ScenarioMaker() \
.given_a_person_in_db() \
.given_an_experience_in_db() \
.when_get_experience_with_its_id() \
.then_repo_should_return_experience()
def test_get_unexistent_experience_raises_error(self):
ExperienceRepoTestCase.ScenarioMaker() \
.when_get_unexistent_experience() \
.then_entity_does_not_exists_should_be_raised()
def test_create_experience_creates_and_returns_experience(self):
ExperienceRepoTestCase.ScenarioMaker() \
.given_a_person_in_db() \
.given_an_experience_to_create() \
.when_create_this_experience() \
.then_should_return_this_experience_with_mine_true() \
.then_should_save_this_experience_to_db()
def test_update_experience(self):
ExperienceRepoTestCase.ScenarioMaker() \
.given_a_person_in_db() \
.given_an_experience_in_db() \
.given_an_updated_experience() \
.when_update_first_experience() \
.then_result_should_be_same_as_updated() \
.then_updated_experience_should_be_saved_on_db()
def test_save_experience(self):
ExperienceRepoTestCase.ScenarioMaker() \
.given_a_person_in_db() \
.given_an_experience_in_db() \
.when_save_that_experience() \
.then_result_should_be_true() \
.then_save_should_be_created_for_that_experience_and_person()
def test_save_twice_doesnt_create_2_saves(self):
ExperienceRepoTestCase.ScenarioMaker() \
.given_a_person_in_db() \
.given_an_experience_in_db() \
.given_a_save_for_that_person_and_experience() \
.when_save_that_experience() \
.then_result_should_be_true() \
.then_save_for_that_experience_and_person_should_be_only_one()
def test_unsave_experience(self):
ExperienceRepoTestCase.ScenarioMaker() \
.given_a_person_in_db() \
.given_an_experience_in_db() \
.given_a_save_for_that_person_and_experience() \
.when_unsave_that_experience() \
.then_result_should_be_true() \
.then_save_should_be_deleted_from_db()
class ScenarioMaker:
def __init__(self):
self.orm_person = None
self.orm_experience_a = None
self.orm_experience_b = None
self.experience_a = None
self.experience_b = None
self.result = None
self.entity_does_not_exist_error = None
self.experience_to_create = None
def given_a_person_in_db(self):
self.orm_person = ORMPerson.objects.create(username='usr')
return self
def given_another_person_in_db(self):
self.second_orm_person = ORMPerson.objects.create(username='nme')
return self
def given_an_experience_created_by_first_person_in_db(self):
self.orm_experience_a = ORMExperience.objects.create(title='Exp a', description='some description',
author=self.orm_person)
self.experience_a = Experience(id=self.orm_experience_a.id, title='Exp a', description='some description',
author_id=self.orm_person.id, author_username=self.orm_person.username)
return self
def given_another_experience_created_by_first_person_in_db(self):
self.orm_experience_b = ORMExperience.objects.create(title='Exp b', description='some description',
author=self.orm_person)
self.experience_b = Experience(id=self.orm_experience_b.id, title='Exp b', description='some description',
author_id=self.orm_person.id, author_username=self.orm_person.username)
return self
def given_an_experience_created_by_second_person_in_db(self):
self.orm_experience_c = ORMExperience.objects.create(title='Exp c', description='description',
author=self.second_orm_person)
self.experience_c = Experience(id=self.orm_experience_c.id, title='Exp c', description='description',
author_id=self.second_orm_person.id,
author_username=self.second_orm_person.username)
return self
def given_another_experience_created_by_second_person_in_db(self):
self.orm_experience_d = ORMExperience.objects.create(title='Exp d', description='description',
author=self.second_orm_person)
self.experience_d = Experience(id=self.orm_experience_d.id, title='Exp d', description='description',
author_id=self.second_orm_person.id,
author_username=self.second_orm_person.username)
return self
def given_a_third_experience_created_by_second_person_and_saved_by_first(self):
self.orm_experience_e = ORMExperience.objects.create(title='Exp e', description='description',
author=self.second_orm_person)
self.experience_e = Experience(id=self.orm_experience_e.id, title='Exp e', description='description',
author_id=self.second_orm_person.id,
author_username=self.second_orm_person.username)
ORMSave.objects.create(person=self.orm_person, experience=self.orm_experience_e)
return self
def given_a_fourth_experience_created_by_second_person_and_saved_by_second(self):
self.orm_experience_f = ORMExperience.objects.create(title='Exp f', description='description',
author=self.second_orm_person)
self.experience_f = Experience(id=self.orm_experience_f.id, title='Exp f', description='description',
author_id=self.second_orm_person.id,
author_username=self.second_orm_person.username)
ORMSave.objects.create(person=self.second_orm_person, experience=self.orm_experience_f)
return self
def given_logged_person_id_is_first_person_id(self):
self.logged_person_id = self.orm_person.id
return self
def given_an_experience_to_create(self):
self.experience_to_create = Experience(id="", title='Exp a', description='some description',
author_id=self.orm_person.id)
return self
def given_an_experience_in_db(self):
self.orm_experience_a = ORMExperience.objects.create(title='Exp a', description='some description',
author=self.orm_person)
self.experience_a = Experience(id=self.orm_experience_a.id, title='Exp a', description='some description',
author_id=self.orm_person.id, author_username=self.orm_person.username)
return self
def given_an_updated_experience(self):
self.updated_experience = Experience(id=self.experience_a.id, title='T2', description='updated',
author_id=self.orm_person.id,
author_username=self.orm_person.username)
return self
def given_another_experience_in_db(self):
self.orm_experience_b = ORMExperience.objects.create(title='Exp b', description='other description',
author=self.orm_person)
self.experience_b = Experience(id=self.orm_experience_b.id, title='Exp b',
description='other description',
author_id=self.orm_person.id, author_username=self.orm_person.username)
return self
def given_a_save_for_that_person_and_experience(self):
ORMSave.objects.create(person=self.orm_person, experience=self.orm_experience_a)
return self
def given_a_save_to_first_second_person_experience_from_first_person(self):
ORMSave.objects.create(person=self.orm_person, experience=self.orm_experience_c)
return self
def when_get_all_experiences(self, mine=False, saved=False):
self.result = ExperienceRepo().get_all_experiences(logged_person_id=self.logged_person_id,
mine=mine, saved=saved)
return self
def when_get_experience_with_its_id(self):
self.result = ExperienceRepo().get_experience(self.orm_experience_a.id)
return self
def when_get_unexistent_experience(self):
try:
ExperienceRepo().get_experience(0)
except EntityDoesNotExistException as e:
self.entity_does_not_exist_error = e
return self
def when_create_this_experience(self):
self.result = ExperienceRepo().create_experience(self.experience_to_create)
return self
def when_update_first_experience(self):
self.result = ExperienceRepo().update_experience(self.updated_experience)
return self
def when_save_that_experience(self):
try:
self.result = ExperienceRepo().save_experience(person_id=self.orm_person.id,
experience_id=self.orm_experience_a.id)
except Exception as e:
self.error = e
return self
def when_unsave_that_experience(self):
try:
self.result = ExperienceRepo().unsave_experience(person_id=self.orm_person.id,
experience_id=self.orm_experience_a.id)
except Exception as e:
self.error = e
return self
def then_repo_should_return_just_first_two_experience_with_mine_true(self):
assert self.result == [self.experience_b.builder().is_mine(True).build(),
self.experience_a.builder().is_mine(True).build()]
return self
def then_repo_should_return_just_second_two_experience_and_fourth_with_saved_mine_false(self):
assert self.result == [self.experience_c, self.experience_d, self.experience_f]
return self
def then_repo_should_return_just_second_two_experience(self):
assert self.result == [self.experience_c, self.experience_d]
return self
def then_repo_should_return_just_first_second_person_experience_with_saved_true(self):
assert self.result == [self.experience_c.builder().is_saved(True).build()]
return self
def then_repo_should_return_experience(self):
assert self.result == self.experience_a
return self
def then_entity_does_not_exists_should_be_raised(self):
assert self.entity_does_not_exist_error is not None
return self
def then_should_return_this_experience_with_mine_true(self):
assert self.result.title == self.experience_to_create.title
assert self.result.description == self.experience_to_create.description
assert self.result.is_mine is True
return self
def then_should_save_this_experience_to_db(self):
exp = ExperienceRepo().get_experience(self.result.id)
assert exp.title == self.experience_to_create.title
assert exp.description == self.experience_to_create.description
return self
def then_result_should_be_same_as_updated(self):
assert self.updated_experience.title == self.result.title
assert self.updated_experience.description == self.result.description
assert not self.result.picture
return self
def then_updated_experience_should_be_saved_on_db(self):
orm_experience = ORMExperience.objects.get(id=self.result.id,
title=self.updated_experience.title,
description=self.updated_experience.description)
assert orm_experience is not None
return self
def then_result_should_be_true(self):
assert self.result is True
return self
def then_save_should_be_created_for_that_experience_and_person(self):
assert ORMSave.objects.filter(person=self.orm_person, experience=self.orm_experience_a).exists()
return self
def then_save_for_that_experience_and_person_should_be_only_one(self):
assert len(ORMSave.objects.filter(person=self.orm_person, experience=self.orm_experience_a)) == 1
return self
def then_save_should_be_deleted_from_db(self):
assert not ORMSave.objects.filter(person=self.orm_person, experience=self.orm_experience_a).exists()
return self
|
466687
|
import numpy as np
import cv2
def norm_rot_angle(rot):
norm_rot = rot
while norm_rot > 180:
norm_rot = norm_rot - 360
while norm_rot <= -180:
norm_rot = norm_rot + 360
return norm_rot
def rotate_2d(pt_2d, rot_rad):
x = pt_2d[0]
y = pt_2d[1]
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
xx = x * cs - y * sn
yy = x * sn + y * cs
return np.array([xx, yy], dtype=np.float32)
def trans_point2d(pt_2d, trans):
src_pt = np.array([pt_2d[0], pt_2d[1], 1.]).T
dst_pt = np.dot(trans, src_pt)
return dst_pt[0:2]
def trans_points_3d(_joints, trans, depth_scale):
joints = _joints.copy()
for n_jt in range(len(joints)):
joints[n_jt, 0:2] = trans_point2d(joints[n_jt, 0:2], trans)
joints[n_jt, 2] = joints[n_jt, 2] * depth_scale
return joints
def fliplr_joints(_joints, _joints_vis, width, matched_parts):
"""
flip coords
:param _joints: numpy array, nJoints * dim, dim == 2 [x, y] or dim == 3 [x, y, z]
:param _joints_vis: same as joints
:param width: image width
:param matched_parts: list of pairs
:return:
"""
joints = _joints.copy()
joints_vis = _joints_vis.copy()
joints[:, 0] = width - joints[:, 0] - 1
# Change left-right parts
for pair in matched_parts:
joints[pair[0], :], joints[pair[1], :] = joints[pair[1], :], joints[pair[0], :].copy()
joints_vis[pair[0], :], joints_vis[pair[1], :] = joints_vis[pair[1], :], joints_vis[pair[0], :].copy()
return joints, joints_vis
def gen_affine_trans_from_box_cv(c_x, c_y, src_width, src_height, dst_width, dst_height, scale, rot, inv):
"""
:param c_x, c_y, src_width, src_height: define a box
:param dst_width, dst_height: target image size
:param scale: augment image size, default 1.0
:param rot: augment box rotation, default 0.0
:param inv: False: image domain to patch domain. True: patch domain to image domain. Default False.
:return:
"""
# augment size with scale
src_w = src_width * scale
src_h = src_height * scale
src_center = np.array([c_x, c_y], dtype=np.float32)
# augment rotation
rot_rad = np.pi * rot / 180
src_downdir = rotate_2d(np.array([0, src_h * 0.5], dtype=np.float32), rot_rad)
src_rightdir = rotate_2d(np.array([src_w * 0.5, 0], dtype=np.float32), rot_rad)
dst_w = dst_width
dst_h = dst_height
dst_center = np.array([dst_w * 0.5, dst_h * 0.5], dtype=np.float32)
dst_downdir = np.array([0, dst_h * 0.5], dtype=np.float32)
dst_rightdir = np.array([dst_w * 0.5, 0], dtype=np.float32)
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = src_center
src[1, :] = src_center + src_downdir
src[2, :] = src_center + src_rightdir
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = dst_center
dst[1, :] = dst_center + dst_downdir
dst[2, :] = dst_center + dst_rightdir
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def gen_patch_image_from_box_cv(cvimg, c_x, c_y, bb_width, bb_height, patch_width, patch_height, do_flip, scale, rot):
"""
:param cvimg: original image
:param c_x, c_y, bb_width, bb_height: define a box
:param patch_width, patch_height: target patch image size
:param do_flip: flip augment
:param scale: scale augment
:param rot: rotation augment
:return:
"""
img = cvimg.copy()
img_height, img_width, img_channels = img.shape
if do_flip:
img = img[:, ::-1, :]
c_x = img_width - c_x - 1
trans = gen_affine_trans_from_box_cv(c_x, c_y, bb_width, bb_height, patch_width, patch_height, scale, rot, False)
img_patch = cv2.warpAffine(img, trans, (int(patch_width), int(patch_height)), flags=cv2.INTER_LINEAR)
return img_patch, trans
def trans_coords_from_patch_to_org_2d(coords_in_patch, c_x, c_y, bb_width, bb_height, rot, patch_width, patch_height):
coords_in_org = coords_in_patch.copy()
trans = gen_affine_trans_from_box_cv(c_x, c_y, bb_width, bb_height, patch_width, patch_height, 1.0, rot, True)
for p in range(coords_in_patch.shape[0]):
coords_in_org[p, 0:2] = trans_point2d(coords_in_patch[p, 0:2], trans)
return coords_in_org
def trans_coords_from_patch_to_org_3d(coords_in_patch, c_x, c_y, bb_width, bb_height, rot, patch_width, patch_height,
depth_scale):
coords_in_org = trans_coords_from_patch_to_org_2d(coords_in_patch, c_x, c_y, bb_width, bb_height, rot, patch_width,
patch_height)
coords_in_org[:, 2] = coords_in_patch[:, 2] * depth_scale
return coords_in_org
|
466691
|
import numpy as np
from PuzzleLib.Backend import gpuarray, Memory
from PuzzleLib.Modules.Module import ModuleError, Module
class DepthConcat(Module):
def __init__(self, name=None):
super().__init__(name)
self.movesData = True
def updateData(self, data):
self.data = Memory.depthConcat(data)
def updateGrad(self, grad):
self.grad = Memory.depthSplit(grad, self.inData)
def checkDataShape(self, shapes):
if not isinstance(shapes, list):
raise ModuleError("Data must be list of tensors")
for shape in shapes:
if len(shape) != 4:
raise ModuleError("Data must consist of 4d tensors")
if shape[0] != shapes[0][0]:
raise ModuleError("Inconsistency in batch size")
def dataShapeFrom(self, shapes):
depth, h, w = 0, 0, 0
for shape in shapes:
depth += shape[1]
h, w = max(h, shape[2]), max(w, shape[3])
return shapes[0][0], depth, h, w
def checkGradShape(self, shape):
if len(shape) != 4:
raise ModuleError("Grad must be 4d tensor")
depth, h, w = 0, 0, 0
for data in self.inData:
sh = data.shape
depth += sh[1]
h, w = max(h, sh[2]), max(w, sh[3])
gradshape = (self.inData[0].shape[0], depth, h, w)
if shape != gradshape:
raise ModuleError("Bad grad shape (%s given, %s expected)" % (shape, gradshape))
def gradShapeFrom(self, shape):
shapes = [data.shape for data in self.inData]
return shapes
def unittest():
data1 = gpuarray.to_gpu(np.random.randn(3, 4, 2, 2).astype(np.float32))
data2 = gpuarray.to_gpu(np.random.randn(3, 2, 6, 6).astype(np.float32))
data3 = gpuarray.to_gpu(np.random.randn(3, 5, 4, 4).astype(np.float32))
data4 = gpuarray.to_gpu(np.random.randn(3, 3, 5, 5).astype(np.float32))
alldata = [data1, data2, data3, data4]
concat = DepthConcat()
concat(alldata)
depth, h, w = 0, 0, 0
for data in alldata:
depth += data.shape[1]
h, w = max(h, data.shape[2]), max(w, data.shape[3])
hostOutData = np.zeros(shape=(data1.shape[0], depth, h, w), dtype=np.float32)
hostOutData[:, :4, 2:4, 2:4] = data1.get()
hostOutData[:, 4:6, :, :] = data2.get()
hostOutData[:, 6:11, 1:5, 1:5] = data3.get()
hostOutData[:, 11:, :5, :5] = data4.get()
assert np.allclose(hostOutData, concat.data.get())
grad = gpuarray.to_gpu(np.random.randn(*hostOutData.shape).astype(np.float32))
concat.backward(grad)
hostInGrads = [np.empty(data.shape, dtype=np.float32) for data in alldata]
hostInGrads[0] = grad.get()[:, :4, 2:4, 2:4]
hostInGrads[1] = grad.get()[:, 4:6, :, :]
hostInGrads[2] = grad.get()[:, 6:11, 1:5, 1:5]
hostInGrads[3] = grad.get()[:, 11:, :5, :5]
assert all(np.allclose(hostInGrad, concat.grad[i].get()) for i, hostInGrad in enumerate(hostInGrads))
if __name__ == "__main__":
unittest()
|
466695
|
from .tokenizer import Tokenizer
from .vocabulary import Vocabulary
# dummy import to call from chariot.transformer module
from .formatter.base import BaseFormatter
from .text.base import BasePreprocessor
from .token.base import BasePreprocessor
from .generator.base import BaseGenerator
|
466764
|
from functools import partial
from multiprocessing import Pool, cpu_count
from pathlib import Path
from shutil import copyfile
from PIL import Image
def resize_one(path, size=(224, 224), output_dir="resized"):
output_dir = Path(output_dir)
image = Image.open(path)
image = image.resize(size, resample=Image.LANCZOS)
image.save(output_dir / path.name)
print(output_dir / path.name)
def resize_images():
basepath = Path("/mnt/projects/ukbiobank/derived/imaging/retinal_fundus/mixed_images")
output_dir = Path("/mnt/projects/ukbiobank/derived/imaging/retinal_fundus/images_resized_224")
output_dir.mkdir(exist_ok=True, parents=True)
f = partial(resize_one, output_dir=output_dir)
num_cores = cpu_count()
with Pool(num_cores) as p:
p.map(f, basepath.glob("*.png"))
def merge_in_dir():
basepath = Path("/mnt/projects/ukbiobank/derived/imaging/retinal_fundus")
leftpath = basepath / "left/672_left"
rightpath = basepath / "right/672_right"
output_dir = basepath / "images_resized_224"
left_files = leftpath.glob("*.png")
right_files = rightpath.glob("*.png")
left_dict = dict()
for left in left_files:
id = left.name.split("_")[0]
left_dict[id] = left
count = 0
for right in right_files:
id = right.name.split("_")[0]
if id in left_dict:
count += 1
copyfile(str(right.resolve()), str(Path(output_dir / right.name).resolve()))
left = left_dict[id]
copyfile(str(left.resolve()), str(Path(output_dir / left.name).resolve()))
if count % 100 == 0:
print("Processed " + str(count) + " scans so far.")
if __name__ == "__main__":
resize_images()
|
466768
|
import abc
import typing
import numpy as np
import SimpleITK as sitk
import pymia.data.conversion as conv
class Load(abc.ABC):
"""Interface for loading the data during the dataset creation in :meth:`.Traverser.traverse`
.. automethod:: __call__
"""
@abc.abstractmethod
def __call__(self, file_name: str, id_: str, category: str, subject_id: str) \
-> typing.Tuple[np.ndarray, typing.Union[conv.ImageProperties, None]]:
"""Loads the data from the file system according to the implementation.
Args:
file_name (str): Path to the corresponding data.
id_ (str): Identifier for the entry of the category, e.g., "Flair".
category (str): Name of the category, e.g., 'images'.
subject_id (str): Identifier of the current subject.
Returns:
tuple: A numpy array containing the loaded data and :class:`ImageProperties` describing the data.
:class:`.ImageProperties` is :code:`None` if the loaded data does not contain further properties.
"""
pass
class LoadDefault(Load):
"""The default loader.
It loads every data item (id/entry, category) for each subject as :code:`sitk.Image`
and the corresponding :class:`.ImageProperties`.
"""
def __call__(self, file_name: str, id_: str, category: str, subject_id: str) -> \
typing.Tuple[np.ndarray, typing.Union[conv.ImageProperties, None]]:
img = sitk.ReadImage(file_name)
return sitk.GetArrayFromImage(img), conv.ImageProperties(img)
|
466774
|
import pytest
from srp import *
def test_formato_string():
user = Usuario(
nombre='Ramanujan',
edad=25,
direccion='Calle X, #Y Colonia Z'
)
assert 'Nombre: Ramanujan\nEdad: 25\nDireccion: Calle X, #Y Colonia Z' == user.formato_string()
def test_formato_diccionario():
user = Usuario(
nombre='Ramanujan',
edad=25,
direccion='Calle X, #Y Colonia Z'
)
assert {'nombre': 'Ramanujan', 'edad': 25, 'direccion': 'Calle X, #Y Colonia Z'} == user.formato_diccionario()
def test_formato_json():
user = Usuario(
nombre='Ramanujan',
edad=25,
direccion='Calle X, #Y Colonia Z'
)
assert '{"nombre": "Ramanujan", "edad": 25, "direccion": "Calle X, #Y Colonia Z"}' == user.formato_json()
def test_formato_html():
user = Usuario(
nombre='Ramanujan',
edad=25,
direccion='Calle X, #Y Colonia Z'
)
assert ('<table border="1"><tr><th>nombre</th><td>Ramanujan</td></tr><tr><th>edad</th><td>25</td></tr><tr><th>direccion</th><td>Calle X, #Y Colonia Z</td></tr></table>') == user.formato_html()
def test_formato_xml():
user = Usuario(
nombre='Ramanujan',
edad=25,
direccion='Calle X, #Y Colonia Z'
)
assert ('<?xmlversion="1.0"?><all><nombretype="str">Ramanujan</nombre><edadtype="int">25</edad><direcciontype="str">CalleX,#YColoniaZ</direccion></all>') == user.formato_xml()
|
466812
|
from time import sleep
from uuid import uuid4
import pyodbc
from .base import *
DB_DRIVER = 'ODBC Driver 17 for SQL Server'
DB_HOST = os.environ['MCR_MICROSOFT_COM_MSSQL_SERVER_HOST']
DB_PORT = os.environ['MCR_MICROSOFT_COM_MSSQL_SERVER_1433_TCP']
DB_USER = 'sa'
DB_PASSWORD = '<PASSWORD>'
sleep(10)
db_connection = pyodbc.connect(f"DRIVER={DB_DRIVER};SERVER={DB_HOST},{DB_PORT};DATABASE=master;UID={DB_USER};PWD={DB_PASSWORD}", autocommit=True)
cursor = db_connection.cursor()
cursor.execute(
"""
If(db_id(N'river') IS NULL)
BEGIN
CREATE DATABASE river
END;
""")
DATABASES = {
'default': {
'ENGINE': 'sql_server.pyodbc',
'NAME': 'river',
'USER': DB_USER,
'PASSWORD': <PASSWORD>,
'HOST': DB_HOST,
'PORT': DB_PORT,
'TEST': {
'NAME': 'river' + str(uuid4()),
},
'OPTIONS': {
'driver': DB_DRIVER
},
}
}
INSTALLED_APPS += (
'river.tests',
)
if django.get_version() >= '1.9.0':
MIGRATION_MODULES = DisableMigrations()
|
466822
|
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image
from src.models.class_patcher import patcher
from src.utils.imgproc import *
class patcher(patcher):
def __init__(self, body='./body/body_i-s.png', **options):
super().__init__('I-s', body=body, pantie_position=[56, 2458], **options)
self.mask = io.imread('./mask/mask_i-s.png')
try:
self.is_4k = self.options['is_4k']
except:
self.is_4k = self.ask(question='4K(4096x4096) resolution texture?', default=False)
def convert(self, image):
pantie = np.array(image)
patch = np.copy(pantie[-180:-5, 546:, :])
patch = skt.resize(patch[::-1, ::-1, :], (200, 65), anti_aliasing=True, mode='reflect')
[pr, pc, d] = patch.shape
pantie[127 - 5:127 - 5 + pr, :pc, :] = np.uint8(patch * 255)
# # Front affine transform
front = pantie[:, :300]
front = skt.rotate(front, -3.0, resize=True)
arrx = (np.linspace(0, 1, 100)**2) * 15 - 10
arry = np.zeros(100)
arry[:50] -= (np.sin(np.linspace(0, 2 * np.pi, 100) - np.pi / 4) * 20)[:50]
front = affine_transform_by_arr(front, arrx, arry)
front = np.uint8(front[:-150, :-5] * 255)
# First back affine transform
back = pantie[:, 300:]
back = skt.rotate(back, 27.3, resize=True)
arrx = np.zeros(100)
arry = np.zeros(100)
arrx[:40] += (np.linspace(1, 0, 40)**2) * 70
arrx[50:] += (np.linspace(0, 1, 50)**2) * 220
arrx -= 20
back = affine_transform_by_arr(back, arrx, arry)
back = np.uint8(back[10:-200, 28:] * 255)
[fr, fc, d] = front.shape
[br, bc, d] = back.shape
shift_x = 0
shift_y = 85
pantie = np.zeros((np.max([fr + shift_y, br]), fc + bc - shift_x, d), dtype=np.uint8)
pantie[shift_y:shift_y + fr, :fc] = front
pantie[:br, fc - shift_x:] = back
pantie = np.bitwise_and(pantie, self.mask)
pantie = np.uint8(resize(pantie, [1.63, 1.83]) * 255)
return Image.fromarray(pantie)
def patch(self, image, transparent=False):
image = self.convert(image)
if transparent:
if self.is_4k:
patched = Image.new("RGBA", (4096, 4096))
else:
patched = Image.new("RGBA", (2048, 2048))
else:
patched = self.body.copy()
if self.is_4k or patched.size[0] > 2048:
pantie_position = self.pantie_position
else:
pantie_position = (int(self.pantie_position[0] / 2), int(self.pantie_position[1] / 2))
image = image.resize((int(image.width / 2), int(image.height / 2)), resample=Image.BICUBIC)
patched = self.paste(patched, image, pantie_position)
return patched
|
466831
|
import claripy
import nose
def test_fallback_abstraction():
bz = claripy.backends.z3
a = claripy.BVV(5, 32)
b = claripy.BVS('x', 32, explicit_name=True)
c = a+b
d = 5+b
e = b+5
f = b+b
g = a+a
nose.tools.assert_false(a.symbolic)
nose.tools.assert_true(b.symbolic)
nose.tools.assert_true(c.symbolic)
nose.tools.assert_true(d.symbolic)
nose.tools.assert_true(e.symbolic)
nose.tools.assert_true(f.symbolic)
nose.tools.assert_is(type(claripy.backends.concrete.convert(a)), claripy.bv.BVV)
nose.tools.assert_is(type(claripy.backends.concrete.convert(g)), claripy.bv.BVV)
nose.tools.assert_raises(claripy.errors.BackendError, claripy.backends.concrete.convert, b)
nose.tools.assert_raises(claripy.errors.BackendError, claripy.backends.concrete.convert, c)
nose.tools.assert_raises(claripy.errors.BackendError, claripy.backends.concrete.convert, d)
nose.tools.assert_raises(claripy.errors.BackendError, claripy.backends.concrete.convert, e)
nose.tools.assert_raises(claripy.errors.BackendError, claripy.backends.concrete.convert, f)
nose.tools.assert_equal(str(bz.convert(b)), 'x')
nose.tools.assert_equal(bz.convert(b).__module__, 'z3.z3')
nose.tools.assert_equal(str(bz.convert(c)), '5 + x')
nose.tools.assert_equal(str(bz.convert(d)), '5 + x')
nose.tools.assert_equal(str(bz.convert(e)), 'x + 5')
nose.tools.assert_equal(str(bz.convert(f)), 'x + x')
if __name__ == '__main__':
test_fallback_abstraction()
|
466835
|
import mxnet as mx
def vgg16_pool3(input):
conv1_1 = mx.sym.Convolution(data=input, kernel=(3,3), pad=(100,100),
num_filter=64, name="conv1_1")
relu1_1 = mx.sym.Activation(data=conv1_1, act_type="relu",
name="relu1_1")
conv1_2 = mx.sym.Convolution(data=relu1_1, kernel=(3,3), pad=(1,1),
num_filter=64, name="conv1_2")
relu1_2 = mx.sym.Activation(data=conv1_2, act_type="relu",
name="relu1_2")
pool1 = mx.sym.Pooling(data=relu1_2, pool_type="max", kernel=(2,2),
stride=(2,2), name="pool1")
conv2_1 = mx.sym.Convolution(data=pool1, kernel=(3,3), pad=(1,1),
num_filter=128, name="conv2_1")
relu2_1 = mx.sym.Activation(data=conv2_1, act_type="relu",
name="relu2_1")
conv2_2 = mx.sym.Convolution(data=relu2_1, kernel=(3,3), pad=(1,1),
num_filter=128, name="conv2_2")
relu2_2 = mx.sym.Activation(data=conv2_2, act_type="relu",
name="relu2_2")
pool2 = mx.sym.Pooling(data=relu2_2, pool_type="max", kernel=(2,2),
stride=(2,2), name="pool2")
conv3_1 = mx.sym.Convolution(data=pool2, kernel=(3,3), pad=(1,1),
num_filter=256, name="conv3_1")
relu3_1 = mx.sym.Activation(data=conv3_1, act_type="relu",
name="relu3_1")
conv3_2 = mx.sym.Convolution(data=relu3_1, kernel=(3,3), pad=(1,1),
num_filter=256, name="conv3_2")
relu3_2 = mx.sym.Activation(data=conv3_2, act_type="relu",
name="relu3_2")
conv3_3 = mx.sym.Convolution(data=relu3_2, kernel=(3,3), pad=(1,1),
num_filter=256, name="conv3_3")
relu3_3 = mx.sym.Activation(data=conv3_3, act_type="relu",
name="relu3_3")
pool3 = mx.sym.Pooling(data=relu3_3, pool_type="max", kernel=(2,2),
stride=(2,2), name="pool3")
return pool3
def vgg16_pool4(input):
conv4_1 = mx.sym.Convolution(data=input, kernel=(3,3), pad=(1,1),
num_filter=512, name="conv4_1")
relu4_1 = mx.sym.Activation(data=conv4_1, act_type="relu",
name="relu4_1")
conv4_2 = mx.sym.Convolution(data=relu4_1, kernel=(3,3), pad=(1,1),
num_filter=512, name="conv4_2")
relu4_2 = mx.sym.Activation(data=conv4_2, act_type="relu",
name="relu4_2")
conv4_3 = mx.sym.Convolution(data=relu4_2, kernel=(3,3), pad=(1,1),
num_filter=512, name="conv4_3")
relu4_3 = mx.sym.Activation(data=conv4_3, act_type="relu",
name="relu4_3")
pool4 = mx.sym.Pooling(data=relu4_3, pool_type="max", kernel=(2,2),
stride=(2,2), name="pool4")
return pool4
def vgg16_score(input, num_classes):
conv5_1 = mx.sym.Convolution(data=input, kernel=(3,3), pad=(1,1),
num_filter=512, name="conv5_1")
relu5_1 = mx.sym.Activation(data=conv5_1, act_type="relu",
name="relu5_1")
conv5_2 = mx.sym.Convolution(data=relu5_1, kernel=(3,3), pad=(1,1),
num_filter=512, name="conv5_2")
relu5_2 = mx.sym.Activation(data=conv5_2, act_type="relu",
name="relu5_2")
conv5_3 = mx.sym.Convolution(data=relu5_2, kernel=(3,3), pad=(1,1),
num_filter=512, name="conv5_3")
relu5_3 = mx.sym.Activation(data=conv5_3, act_type="relu",
name="relu5_3")
pool5 = mx.sym.Pooling(data=relu5_3, pool_type="max", kernel=(2,2),
stride=(2,2), name="pool5")
fc6 = mx.sym.Convolution(data=pool5, kernel=(7,7), num_filter=4096,
name="fc6")
relu6 = mx.sym.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.sym.Dropout(data=relu6, p=0.5, name="drop6")
fc7 = mx.sym.Convolution(data=drop6, kernel=(1,1), num_filter=4096,
name="fc7")
relu7 = mx.sym.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.sym.Dropout(data=relu7, p=0.5, name="drop7")
weight_score = mx.sym.Variable(name="score_weight",
init=mx.init.Constant(0))
score = mx.sym.Convolution(data=drop7, kernel=(1,1), weight=weight_score,
num_filter=num_classes, name="score")
return score
def fcnxs_score(input, crop, offset, kernel, stride, num_classes):
bigscore = mx.sym.Deconvolution(data=input, kernel=kernel, stride=stride,
adj=(stride[0]-1, stride[1]-1),
num_filter=num_classes, name="bigscore")
upscore = mx.sym.Crop(*[bigscore, crop], offset=offset, name="upscore")
softmax = mx.sym.SoftmaxOutput(data=upscore, multi_output=True,
use_ignore=True, ignore_label=-1,
name="softmax", normalization="valid")
return softmax
def symbol_fcn32s(num_classes=21):
data = mx.sym.Variable(name="data")
pool3 = vgg16_pool3(data)
pool4 = vgg16_pool4(pool3)
score = vgg16_score(pool4, num_classes)
softmax = fcnxs_score(score, data, offset=(19,19), kernel=(64,64),
stride=(32,32), num_classes=num_classes)
return softmax
def symbol_fcn16s(num_classes=21):
data = mx.sym.Variable(name="data")
pool3 = vgg16_pool3(data)
pool4 = vgg16_pool4(pool3)
score = vgg16_score(pool4, num_classes)
score2 = mx.sym.Deconvolution(data=score, kernel=(4,4),
stride=(2,2), num_filter=num_classes,
adj=(1,1), name="score2")
weight_score_pool4 = mx.sym.Variable(name="score_pool4_weight",
init=mx.init.Constant(0))
score_pool4 = mx.sym.Convolution(data=pool4, kernel=(1,1),
weight=weight_score_pool4,
num_filter=num_classes,
name="score_pool4")
score_pool4c = mx.sym.Crop(*[score_pool4, score2], offset=(5,5),
name="score_pool4c")
score_fused = score2 + score_pool4c
softmax = fcnxs_score(score_fused, data, offset=(27,27), kernel=(32,32),
stride=(16,16), num_classes=num_classes)
return softmax
def symbol_fcn8s(num_classes=21):
data = mx.sym.Variable(name="data")
pool3 = vgg16_pool3(data)
pool4 = vgg16_pool4(pool3)
score = vgg16_score(pool4, num_classes)
score2 = mx.sym.Deconvolution(data=score, kernel=(4,4),
stride=(2,2), num_filter=num_classes,
adj=(1,1), name="score2")
weight_score_pool4 = mx.sym.Variable(name="score_pool4_weight",
init=mx.init.Constant(0))
score_pool4 = mx.sym.Convolution(data=pool4, kernel=(1,1),
weight=weight_score_pool4,
num_filter=num_classes,
name="score_pool4")
score_pool4c = mx.sym.Crop(*[score_pool4, score2], offset=(5,5),
name="score_pool4c")
score_fused = score2 + score_pool4c
score4 = mx.sym.Deconvolution(data=score_fused, kernel=(4,4),
stride=(2,2), num_filter=num_classes,
adj=(1,1), name="score4")
weight_score_pool3 = mx.sym.Variable(name="score_pool3_weight",
init=mx.init.Constant(0))
score_pool3 = mx.sym.Convolution(data=pool3, kernel=(1,1),
weight=weight_score_pool3,
num_filter=num_classes,
name="score_pool3")
score_pool3c = mx.sym.Crop(*[score_pool3, score4], offset=(9,9),
name="score_pool3c")
score_final = score4 + score_pool3c
softmax = fcnxs_score(score_final, data, offset=(31,31), kernel=(16,16),
stride=(8,8), num_classes=num_classes)
return softmax
|
466867
|
from .. import util
from .base import TestAdmin
class TestAdminUnits(TestAdmin):
# Uses underlying paging
def test_get_administratrive_units(self):
response = self.client_list.get_administrative_units()
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/administrative_units')
def test_get_administrative_units_with_limit(self):
response = self.client_list.get_administrative_units(limit=20)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/administrative_units')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['0'],
})
def test_get_adminstrative_units_with_limit_offset(self):
response = self.client_list.get_administrative_units(limit=20, offset=2)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/administrative_units')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['2'],
})
def test_get_administrative_units_with_offset(self):
response = self.client_list.get_administrative_units(offset=9001)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/administrative_units')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_administrative_units_iterator(self):
expected_path = '/admin/v1/administrative_units'
expected_method = 'GET'
tests = [
{
'admin_id': 'aaaa',
'group_id': '1234',
'integration_key': '<KEY>',
},
{
'admin_id': 'aaaa',
'group_id': '1234',
},
{
'admin_id': 'aaaa',
},
{}
]
for test in tests:
response = (
self.client_list.get_administrative_units_iterator(**test)
)
response = next(response)
self.assertEqual(response['method'], expected_method)
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, expected_path)
expected_params = {
key: [value] for (key, value) in test.items()
}
expected_params.update(
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
}
)
self.assertEqual(util.params_to_dict(args), expected_params)
|
466889
|
import itertools
from typing import Tuple, Dict, Optional
import torch
from pytorch_toolbelt.inference.ensembling import Ensembler, ApplySigmoidTo, ApplySoftmaxTo
from torch import nn
from . import rgb_dct, rgb, dct, ela, rgb_ela_blur, timm, ycrcb, hpf_net, srnet, res, bit, timm_bits, unet, stacker
from ..dataset import *
from ..predict import *
MODEL_REGISTRY = {
"stacker": stacker.StackingModel,
# Unet
"nr_rgb_unet": unet.nr_rgb_unet,
# Big Transfer
"bit_m_rx152_2": bit.bit_m_rx152_2,
"bit_m_rx50_1": bit.bit_m_rx50_1,
"bit_m_rx50_3": bit.bit_m_rx50_3,
"bit_m_rx101_1": bit.bit_m_rx101_1,
# TIMM
"rgb_tresnet_m_448": timm.rgb_tresnet_m_448,
"rgb_skresnext50_32x4d": timm.rgb_skresnext50_32x4d,
"rgb_swsl_resnext101_32x8d": timm.rgb_swsl_resnext101_32x8d,
# EfficientNets (Rounded Input)
"rgb_tf_efficientnet_b1_ns": timm.rgb_tf_efficientnet_b1_ns,
"rgb_tf_efficientnet_b2_ns": timm.rgb_tf_efficientnet_b2_ns,
"rgb_tf_efficientnet_b3_ns": timm.rgb_tf_efficientnet_b3_ns,
"rgb_tf_efficientnet_b6_ns": timm.rgb_tf_efficientnet_b6_ns,
"rgb_tf_efficientnet_b7_ns": timm.rgb_tf_efficientnet_b7_ns,
# EfficientNets (Non-Rounded Input)
"nr_rgb_tf_efficientnet_b3_ns_mish": timm.nr_rgb_tf_efficientnet_b3_ns_mish,
"nr_rgb_tf_efficientnet_b3_ns_gn_mish": timm.nr_rgb_tf_efficientnet_b3_ns_gn_mish,
"nr_rgb_tf_efficientnet_b3_ns_in_mish": timm.nr_rgb_tf_efficientnet_b3_ns_in_mish,
"nr_rgb_tf_efficientnet_b6_ns": timm.nr_rgb_tf_efficientnet_b6_ns,
"nr_rgb_tf_efficientnet_b6_ns_mish": timm.nr_rgb_tf_efficientnet_b6_ns_mish,
"nr_rgb_tf_efficientnet_b6_ns_mish_gep": timm.nr_rgb_tf_efficientnet_b6_ns_mish_gep,
"nr_rgb_tf_efficientnet_b7_ns_mish": timm.nr_rgb_tf_efficientnet_b7_ns_mish,
"nr_rgb_mixnet_xl": timm.nr_rgb_mixnet_xl,
"nr_rgb_mixnet_xxl": timm.nr_rgb_mixnet_xxl,
# Bits
"nr_rgb_tf_efficientnet_b3_ns_in_mish_bits": timm_bits.nr_rgb_tf_efficientnet_b3_ns_in_mish_bits,
"nr_rgb_tf_efficientnet_b3_ns_mish_bits": timm_bits.nr_rgb_tf_efficientnet_b3_ns_mish_bits,
# RGB + QF
# "rgb_qf_tf_efficientnet_b2_ns": timm.rgb_qf_tf_efficientnet_b2_ns,
# "rgb_qf_tf_efficientnet_b6_ns": timm.rgb_qf_tf_efficientnet_b6_ns,
# "rgb_qf_swsl_resnext101_32x8d": timm.rgb_qf_swsl_resnext101_32x8d,
# "nr_rgb_tf_efficientnet_b3_ns_mish_mask": timm.nr_rgb_tf_efficientnet_b3_ns_mish_mask,
"rgb_dct_resnet34": rgb_dct.rgb_dct_resnet34,
"rgb_dct_efficientb3": rgb_dct.rgb_dct_efficientb3,
"rgb_dct_seresnext50": rgb_dct.rgb_dct_seresnext50,
#
"rgb_b0": rgb.rgb_b0,
"rgb_resnet18": rgb.rgb_resnet18,
"rgb_resnet34": rgb.rgb_resnet34,
"rgb_seresnext50": rgb.rgb_seresnext50,
"rgb_densenet121": rgb.rgb_densenet121,
"rgb_densenet201": rgb.rgb_densenet201,
"rgb_hrnet18": rgb.rgb_hrnet18,
#
# DCT
"dct_seresnext50": dct.dct_seresnext50,
"dct_efficientnet_b6": dct.dct_efficientnet_b6,
#
# ELA
"ela_tf_efficientnet_b2_ns": ela.ela_tf_efficientnet_b2_ns,
"ela_tf_efficientnet_b6_ns": ela.ela_tf_efficientnet_b6_ns,
"ela_skresnext50_32x4d": ela.ela_skresnext50_32x4d,
"ela_rich_skresnext50_32x4d": ela.ela_rich_skresnext50_32x4d,
"ela_wider_resnet38": ela.ela_wider_resnet38,
"ela_ecaresnext26tn_32x4d": ela.ela_ecaresnext26tn_32x4d,
#
# Residual
"res_tf_efficientnet_b2_ns": res.res_tf_efficientnet_b2_ns,
"rgb_res_tf_efficientnet_b2_ns": res.rgb_res_tf_efficientnet_b2_ns,
"rgb_res_sms_tf_efficientnet_b2_ns": res.rgb_res_sms_tf_efficientnet_b2_ns,
"rgb_res_sms_v2_tf_efficientnet_b2_ns": res.rgb_res_sms_v2_tf_efficientnet_b2_ns,
#
# YCrCb
"ycrcb_skresnext50_32x4d": ycrcb.ycrcb_skresnext50_32x4d,
"ela_s2d_skresnext50_32x4d": ycrcb.ela_s2d_skresnext50_32x4d,
#
# HPF
"hpf_net": hpf_net.hpf_net,
"hpf_net2": hpf_net.hpf_net_v2,
"hpf_b3_fixed_gap": hpf_net.hpf_b3_fixed_gap,
"hpf_b3_covpool": hpf_net.hpf_b3_covpool,
"hpf_b3_fixed_covpool": hpf_net.hpf_b3_fixed_covpool,
# SRNET
"srnet": srnet.srnet,
"srnet_inplace": srnet.srnet_inplace,
# OLD STUFF
"frank": rgb_ela_blur.frank,
}
__all__ = ["MODEL_REGISTRY", "get_model", "ensemble_from_checkpoints", "wrap_model_with_tta"]
def get_model(model_name, num_classes=4, pretrained=True, **kwargs):
return MODEL_REGISTRY[model_name](num_classes=num_classes, pretrained=pretrained, **kwargs)
def model_from_checkpoint(
model_checkpoint: str, model_name=None, report=True, need_embedding=False, strict=True
) -> Tuple[nn.Module, Dict]:
checkpoint = torch.load(model_checkpoint, map_location="cpu")
model_name = model_name or checkpoint["checkpoint_data"]["cmd_args"]["model"]
model = get_model(model_name, pretrained=False, need_embedding=need_embedding)
model.load_state_dict(checkpoint["model_state_dict"], strict=strict)
return model.eval(), checkpoint
def wrap_model_with_tta(model, tta_mode, inputs, outputs):
if tta_mode == "flip-hv":
model = HVFlipTTA(model, inputs=inputs, outputs=outputs, average=True)
elif tta_mode == "d4":
model = D4TTA(model, inputs=inputs, outputs=outputs, average=True)
else:
pass
return model
def ensemble_from_checkpoints(
checkpoints,
strict=True,
outputs=None,
activation: Optional[str] = "after_model",
tta=None,
temperature=1,
need_embedding=False,
model_name=None,
):
if activation not in {None, "after_model", "after_tta", "after_ensemble"}:
raise KeyError(activation)
models, loaded_checkpoints = zip(
*[
model_from_checkpoint(ck, model_name=model_name, need_embedding=need_embedding, strict=strict)
for ck in checkpoints
]
)
required_features = itertools.chain(*[m.required_features for m in models])
required_features = list(set(list(required_features)))
if activation == "after_model":
models = [ApplySigmoidTo(m, output_key=OUTPUT_PRED_MODIFICATION_FLAG, temperature=temperature) for m in models]
models = [ApplySoftmaxTo(m, output_key=OUTPUT_PRED_MODIFICATION_TYPE, temperature=temperature) for m in models]
print("Applying sigmoid activation to OUTPUT_PRED_MODIFICATION_FLAG", "after model")
print("Applying softmax activation to OUTPUT_PRED_MODIFICATION_TYPE", "after model")
if len(models) > 1:
model = Ensembler(models, outputs=outputs)
if activation == "after_ensemble":
model = ApplySigmoidTo(model, output_key=OUTPUT_PRED_MODIFICATION_FLAG, temperature=temperature)
model = ApplySoftmaxTo(model, output_key=OUTPUT_PRED_MODIFICATION_TYPE, temperature=temperature)
print("Applying sigmoid activation to outputs", outputs, "after ensemble")
else:
assert len(models) == 1
model = models[0]
if tta is not None:
model = wrap_model_with_tta(model, tta, inputs=required_features, outputs=outputs)
print("Wrapping models with TTA", tta)
if activation == "after_tta":
model = ApplySigmoidTo(model, output_key=OUTPUT_PRED_MODIFICATION_FLAG, temperature=temperature)
model = ApplySoftmaxTo(model, output_key=OUTPUT_PRED_MODIFICATION_TYPE, temperature=temperature)
print("Applying sigmoid activation to outputs", outputs, "after TTA")
return model.eval(), loaded_checkpoints, required_features
|
466934
|
import datetime
from haystack import indexes
from newsroom.models import Article
class ArticleIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='cached_byline')
published = indexes.DateTimeField(model_attr='published')
def get_updated_field(self):
return 'modified'
def get_model(self):
return Article
def index_queryset(self, using=None):
return self.get_model().objects.published().order_by("-published")
|
466943
|
import PyTango
dev_info = PyTango.DbDevInfo()
dev_info.server = "Publisher/test"
dev_info._class = "Publish"
dev_info.name = "test/publisher/1"
db = PyTango.Database()
db.add_device(dev_info)
|
466945
|
from tfbldr.datasets.audio import fetch_sample_speech_tapestry
from tfbldr.datasets.audio import soundsc
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import tensorflow as tf
import os
import numpy as np
from scipy.io import wavfile
from tfbldr.datasets.audio import linear_to_mel_weight_matrix
from tfbldr.datasets.audio import stft
from tfbldr.datasets.audio import iterate_invert_spectrogram
def sonify(spectrogram, samples, transform_op_fn, logscaled=True):
graph = tf.Graph()
with graph.as_default():
noise = tf.Variable(tf.random_normal([samples], stddev=1e-6))
x = transform_op_fn(noise)
y = spectrogram
if logscaled:
x = tf.expm1(x)
y = tf.expm1(y)
x = tf.nn.l2_normalize(x)
y = tf.nn.l2_normalize(y)
tf.losses.mean_squared_error(x, y[-tf.shape(x)[0]:])
optimizer = tf.contrib.opt.ScipyOptimizerInterface(
loss=tf.losses.get_total_loss(),
var_list=[noise],
tol=1e-16,
method='L-BFGS-B',
options={
'maxiter': 1000,
'disp': True
})
with tf.Session(graph=graph) as session:
session.run(tf.global_variables_initializer())
optimizer.minimize(session)
waveform = session.run(noise)
return waveform
fs, d = fetch_sample_speech_tapestry()
sample_rate = fs
window_size = 512
step = 128
n_mel = 80
wav_scale = 2 ** 15
waveform = d / float(wav_scale)
def logmel(waveform):
z = tf.contrib.signal.stft(waveform, window_size, step)
magnitudes = tf.abs(z)
filterbank = tf.contrib.signal.linear_to_mel_weight_matrix(
num_mel_bins=n_mel,
num_spectrogram_bins=magnitudes.shape[-1].value,
sample_rate=sample_rate,
lower_edge_hertz=125.,
upper_edge_hertz=7800.)
melspectrogram = tf.tensordot(magnitudes, filterbank, 1)
return tf.log1p(melspectrogram)
def logmel2(waveform):
res = np.abs(stft(waveform, windowsize=window_size, step=step, real=False, compute_onesided=True))
mels = linear_to_mel_weight_matrix(
res.shape[1],
sample_rate,
lower_edge_hertz=125.,
upper_edge_hertz=7800.,
n_filts=n_mel, dtype=np.float64)
mel_res = np.dot(res, mels)
return np.log1p(mel_res)
with tf.Session():
spectrogram = logmel(waveform).eval()
spectrogram2 = logmel2(waveform)
spectrogram = (spectrogram - spectrogram.min()) / float(spectrogram.max() - spectrogram.min())
spectrogram2 = (spectrogram2 - spectrogram2.min()) / float(spectrogram2.max() - spectrogram2.min())
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(spectrogram)
axarr[1].imshow(spectrogram2)
plt.savefig("tmpspec")
reconstructed_waveform = sonify(spectrogram, len(waveform), logmel)
wavfile.write("tmp.wav", sample_rate, soundsc(reconstructed_waveform))
reconstructed_waveform2 = sonify(spectrogram2, len(waveform), logmel)
wavfile.write("tmp2.wav", sample_rate, soundsc(reconstructed_waveform2))
fftsize = 512
substep = 32
rw_s = np.abs(stft(reconstructed_waveform, fftsize=fftsize, step=substep, real=False,
compute_onesided=False))
rw = iterate_invert_spectrogram(rw_s, fftsize, substep, n_iter=100, verbose=True)
rw2_s = np.abs(stft(reconstructed_waveform2, fftsize=fftsize, step=substep, real=False,
compute_onesided=False))
rw2 = iterate_invert_spectrogram(rw2_s, fftsize, substep, n_iter=100, verbose=True)
d_s = np.abs(stft(waveform, fftsize=fftsize, step=substep, real=False,
compute_onesided=False))
df = iterate_invert_spectrogram(d_s, fftsize, substep, n_iter=10, verbose=True)
wavfile.write("tmpif.wav", sample_rate, soundsc(df))
wavfile.write("tmpf.wav", sample_rate, soundsc(rw))
wavfile.write("tmpf2.wav", sample_rate, soundsc(rw2))
|
466961
|
from .src import log_gen
from .src import apache_gen
def help():
return 'lunaticlog: to help you generate fake log loads.'
|
466968
|
import tensorflow as tf
import tensorflow_datasets as tfds
# See all registered datasets
builders = tfds.list_builders()
print (builders)
# Load a given dataset by name, along with the DatasetInfo
data, info = tfds.load("mnist", with_info=True)
train_data, test_data = data['train'], data['test']
print(info)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.