code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tests/contrib/grpc/hello.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x1etests/contrib/grpc/hello.proto\x12\nhelloworld"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2\xa2\x02\n\x05Hello\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply"\x00\x12\x45\n\rSayHelloTwice\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply"\x00\x30\x01\x12L\n\x12SayHelloRepeatedly\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply"\x00(\x01\x30\x01\x12\x44\n\x0cSayHelloLast\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply"\x00(\x01\x62\x06proto3'
)
_HELLOREQUEST = DESCRIPTOR.message_types_by_name["HelloRequest"]
_HELLOREPLY = DESCRIPTOR.message_types_by_name["HelloReply"]
HelloRequest = _reflection.GeneratedProtocolMessageType(
"HelloRequest",
(_message.Message,),
{
"DESCRIPTOR": _HELLOREQUEST,
"__module__": "tests.contrib.grpc.hello_pb2"
# @@protoc_insertion_point(class_scope:helloworld.HelloRequest)
},
)
_sym_db.RegisterMessage(HelloRequest)
HelloReply = _reflection.GeneratedProtocolMessageType(
"HelloReply",
(_message.Message,),
{
"DESCRIPTOR": _HELLOREPLY,
"__module__": "tests.contrib.grpc.hello_pb2"
# @@protoc_insertion_point(class_scope:helloworld.HelloReply)
},
)
_sym_db.RegisterMessage(HelloReply)
_HELLO = DESCRIPTOR.services_by_name["Hello"]
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_HELLOREQUEST._serialized_start = 46
_HELLOREQUEST._serialized_end = 74
_HELLOREPLY._serialized_start = 76
_HELLOREPLY._serialized_end = 105
_HELLO._serialized_start = 108
_HELLO._serialized_end = 398
# @@protoc_insertion_point(module_scope)
| [
"google.protobuf.descriptor_pool.Default",
"google.protobuf.reflection.GeneratedProtocolMessageType",
"google.protobuf.symbol_database.Default"
] | [((495, 521), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (519, 521), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1328, 1491), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""HelloRequest"""', '(_message.Message,)', "{'DESCRIPTOR': _HELLOREQUEST, '__module__': 'tests.contrib.grpc.hello_pb2'}"], {}), "('HelloRequest', (_message.Message,\n ), {'DESCRIPTOR': _HELLOREQUEST, '__module__':\n 'tests.contrib.grpc.hello_pb2'})\n", (1368, 1491), True, 'from google.protobuf import reflection as _reflection\n'), ((1644, 1798), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""HelloReply"""', '(_message.Message,)', "{'DESCRIPTOR': _HELLOREPLY, '__module__': 'tests.contrib.grpc.hello_pb2'}"], {}), "('HelloReply', (_message.Message,),\n {'DESCRIPTOR': _HELLOREPLY, '__module__': 'tests.contrib.grpc.hello_pb2'})\n", (1684, 1798), True, 'from google.protobuf import reflection as _reflection\n'), ((537, 563), 'google.protobuf.descriptor_pool.Default', '_descriptor_pool.Default', ([], {}), '()\n', (561, 563), True, 'from google.protobuf import descriptor_pool as _descriptor_pool\n')] |
import tensorflow as tf
import sonnet as snt
from .build_utils import residual_stack, maybe_set_l2_conv_contractive_regularizer
from .AbstractResNetLayer import AbstractResNetLayer
class ResEnc(AbstractResNetLayer):
"""
res enc used in VQ
"""
#TODO remove biases before batch norm, see if it makes any difference. Remove dropouts?
def __init__(self, num_hiddens, num_residual_layers, num_residual_hiddens,
activation,
is_training,
name='ResEnc',
prob_drop=0.1,
bn_momentum=0.99,
bn_renormalization=True,
creg_scale=None,
**extra_params):
super().__init__(num_hiddens,
num_residual_layers,
num_residual_hiddens,
activation,
is_training,
name=name,
prob_drop=prob_drop,
bn_momentum=bn_momentum,
bn_renormalization=bn_renormalization,
creg_scale=creg_scale,
**extra_params)
def _build(self, x):
# h_pre = x
conv1 = snt.Conv2D(
output_channels=self._num_hiddens / 2,
kernel_shape=(4, 4),
stride=(2, 2),
# use_bias=False,
**self._extra_params,
name="enc_1")
h = conv1(x)
maybe_set_l2_conv_contractive_regularizer(conv1, h, self._activation, self._creg_scale, name="enc_1_creg")
h = self._dropout(h, training=self._is_training)
h = tf.layers.batch_normalization(h, training=self._is_training,
momentum=self._bn_momentum,
renorm=self._bn_renormalization,
renorm_momentum=self._bn_momentum,
renorm_clipping=self._renorm_clipping,
name="batch_norm_1")
h = self._activation(h)
conv2 = snt.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(4, 4),
stride=(2, 2),
# use_bias=False,
**self._extra_params,
name="enc_2")
h = conv2(h)
maybe_set_l2_conv_contractive_regularizer(conv2, h, self._activation, self._creg_scale, name="enc_2_creg")
h = self._dropout(h, training=self._is_training)
h = tf.layers.batch_normalization(h, training=self._is_training,
momentum=self._bn_momentum,
renorm=self._bn_renormalization,
renorm_momentum=self._bn_momentum,
renorm_clipping=self._renorm_clipping,
name="batch_norm_2")
h = self._activation(h)
h = residual_stack(
h,
self._num_hiddens,
self._num_residual_layers,
self._num_residual_hiddens,
activation=self._activation,
training=self._is_training,
prob_drop=self._prob_drop,
momentum=self._bn_momentum,
renorm=self._bn_renormalization,
renorm_momentum=self._bn_momentum,
renorm_clipping=self._renorm_clipping,
creg_scale = self._creg_scale,
**self._extra_params
)
return h
| [
"sonnet.Conv2D",
"tensorflow.layers.batch_normalization"
] | [((1247, 1372), 'sonnet.Conv2D', 'snt.Conv2D', ([], {'output_channels': '(self._num_hiddens / 2)', 'kernel_shape': '(4, 4)', 'stride': '(2, 2)', 'name': '"""enc_1"""'}), "(output_channels=self._num_hiddens / 2, kernel_shape=(4, 4),\n stride=(2, 2), **self._extra_params, name='enc_1')\n", (1257, 1372), True, 'import sonnet as snt\n'), ((1763, 1989), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['h'], {'training': 'self._is_training', 'momentum': 'self._bn_momentum', 'renorm': 'self._bn_renormalization', 'renorm_momentum': 'self._bn_momentum', 'renorm_clipping': 'self._renorm_clipping', 'name': '"""batch_norm_1"""'}), "(h, training=self._is_training, momentum=self.\n _bn_momentum, renorm=self._bn_renormalization, renorm_momentum=self.\n _bn_momentum, renorm_clipping=self._renorm_clipping, name='batch_norm_1')\n", (1792, 1989), True, 'import tensorflow as tf\n'), ((2250, 2372), 'sonnet.Conv2D', 'snt.Conv2D', ([], {'output_channels': 'self._num_hiddens', 'kernel_shape': '(4, 4)', 'stride': '(2, 2)', 'name': '"""enc_2"""'}), "(output_channels=self._num_hiddens, kernel_shape=(4, 4), stride=(\n 2, 2), **self._extra_params, name='enc_2')\n", (2260, 2372), True, 'import sonnet as snt\n'), ((2761, 2987), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['h'], {'training': 'self._is_training', 'momentum': 'self._bn_momentum', 'renorm': 'self._bn_renormalization', 'renorm_momentum': 'self._bn_momentum', 'renorm_clipping': 'self._renorm_clipping', 'name': '"""batch_norm_2"""'}), "(h, training=self._is_training, momentum=self.\n _bn_momentum, renorm=self._bn_renormalization, renorm_momentum=self.\n _bn_momentum, renorm_clipping=self._renorm_clipping, name='batch_norm_2')\n", (2790, 2987), True, 'import tensorflow as tf\n')] |
#this is a number guess game.
import random
secretnumber=random.randint(1,50)
#asking player to guess 6 times.
for guesstaken in range(1,7):
print(" guess number between 1 to 50 :)")
guess = int(input())
if guess < secretnumber:
print("your guess is too low. ")
elif guess>secretnumber:
print("your guess is too high")
else:
break
if guess == secretnumber:
print("Good Job! you guessed my number in" + " " + str(guesstaken) +"guesses!")
else:
print("nope. the number i was thinking of was"+" "+ str(secretnumber)) | [
"random.randint"
] | [((57, 78), 'random.randint', 'random.randint', (['(1)', '(50)'], {}), '(1, 50)\n', (71, 78), False, 'import random\n')] |
from typing import Optional
from redis import Redis
def get_redis_client(socket_path: str = "/tmp/redis.sock"):
return Redis(decode_responses=True, unix_socket_path=socket_path)
class FSMContext:
def __init__(
self, redis_client: Optional[Redis] = None, socket_path: Optional[str] = None
):
socket_path = "/tmp/redis.sock" if socket_path is None else socket_path
self.redis_client = (
redis_client if redis_client else get_redis_client(socket_path=socket_path)
)
def get(self, cache_key):
return self.redis_client.get(cache_key)
def set(self, cache_key, value):
self.redis_client.set(cache_key, value=value)
def set_by_generator(self):
pass
| [
"redis.Redis"
] | [((126, 184), 'redis.Redis', 'Redis', ([], {'decode_responses': '(True)', 'unix_socket_path': 'socket_path'}), '(decode_responses=True, unix_socket_path=socket_path)\n', (131, 184), False, 'from redis import Redis\n')] |
import random
from torch.utils.data import Dataset
from torchvision.transforms import ToTensor
from .binmnist import get_binmnist_datasets
from .fashion import get_fashion_datasets
from .gaussian_toy import GaussianToyDataset
from .gmm import GaussianMixtureDataset
from .omniglot import get_omniglot_datasets
from .shapes import get_shapes_datasets
MINI_TRAIN_SIZE = 5000
MINI_VALID_SIZE = 500
MINI_TEST_SIZE = 500
class MiniDataset(Dataset):
def __init__(self, dset, k, seed):
super().__init__()
if seed is not None:
random.seed(seed)
self.index = list(random.choices(list(range(len(dset))), k=k))
self.dset = dset
def __getitem__(self, item):
return self.dset[self.index[item]]
def __len__(self):
return len(self.index)
def get_datasets(opt, transform=ToTensor()):
if "shapes" in opt['dataset']:
output = get_shapes_datasets(transform=transform)
elif "gaussian-toy" in opt['dataset']:
dset = GaussianToyDataset()
output = dset, dset, dset
elif "gmm" in opt['dataset']:
_train_dset = GaussianMixtureDataset(N=100000, C=opt['N'])
_valid_dset = GaussianMixtureDataset(N=100, C=opt['N'])
_test_dset = GaussianMixtureDataset(N=100, C=opt['N'])
output = _train_dset, _valid_dset, _test_dset
elif "binmnist" in opt['dataset']:
output = get_binmnist_datasets(opt['data_root'], transform=transform)
elif "omniglot" in opt['dataset']:
output = get_omniglot_datasets(opt['data_root'], transform=transform, dynamic=True)
elif "fashion" in opt['dataset']:
output = get_fashion_datasets(opt['data_root'], transform=transform, binarize=True)
else:
raise ValueError(f"Unknown data: {opt['dataset']}")
if opt.get('mini', False):
def wrapper(dset_train, dset_valid, dset_test):
return MiniDataset(dset_train, MINI_TRAIN_SIZE, opt['seed']), \
MiniDataset(dset_valid, MINI_VALID_SIZE, opt['seed']), \
MiniDataset(dset_test, MINI_TEST_SIZE, opt['seed'] + 1)
output = wrapper(*output)
if opt.get('only_train_set', False):
def use_only_training(dset_train, *args):
return dset_train, dset_train, dset_train
output = use_only_training(*output)
return output
| [
"torchvision.transforms.ToTensor",
"random.seed"
] | [((838, 848), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (846, 848), False, 'from torchvision.transforms import ToTensor\n'), ((557, 574), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (568, 574), False, 'import random\n')] |
from django.urls import path
from async_demos.web.views import SlowTaskView
urlpatterns = (
path('<int:is_slow>/', SlowTaskView.as_view(), name='slow view'),
)
| [
"async_demos.web.views.SlowTaskView.as_view"
] | [((121, 143), 'async_demos.web.views.SlowTaskView.as_view', 'SlowTaskView.as_view', ([], {}), '()\n', (141, 143), False, 'from async_demos.web.views import SlowTaskView\n')] |
from __future__ import absolute_import
import pytest
import kslurm.models.validators as validators
from kslurm.exceptions import TemplateError
class TestJobTemplateValidator:
@pytest.mark.parametrize("arg", ["16core64gb24h", "Fat", "Regular"])
def test_args_that_should_work(self, arg: str):
assert validators.job_template(arg)
@pytest.mark.parametrize("arg", ["random", "nonsense", "notfound"])
def test_args_that_shouldnt_work(self, arg: str):
with pytest.raises(TemplateError):
assert not validators.job_template(arg)
| [
"pytest.mark.parametrize",
"pytest.raises",
"kslurm.models.validators.job_template"
] | [((184, 251), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""arg"""', "['16core64gb24h', 'Fat', 'Regular']"], {}), "('arg', ['16core64gb24h', 'Fat', 'Regular'])\n", (207, 251), False, 'import pytest\n'), ((354, 420), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""arg"""', "['random', 'nonsense', 'notfound']"], {}), "('arg', ['random', 'nonsense', 'notfound'])\n", (377, 420), False, 'import pytest\n'), ((319, 347), 'kslurm.models.validators.job_template', 'validators.job_template', (['arg'], {}), '(arg)\n', (342, 347), True, 'import kslurm.models.validators as validators\n'), ((488, 516), 'pytest.raises', 'pytest.raises', (['TemplateError'], {}), '(TemplateError)\n', (501, 516), False, 'import pytest\n'), ((541, 569), 'kslurm.models.validators.job_template', 'validators.job_template', (['arg'], {}), '(arg)\n', (564, 569), True, 'import kslurm.models.validators as validators\n')] |
from __future__ import unicode_literals
from django.db import models
from polymodels.fields import PolymorphicTypeField
from polymodels.models import PolymorphicModel
try:
from django.utils.encoding import python_2_unicode_compatible
except ImportError:
def python_2_unicode_compatible(cls):
return cls
class Zoo(models.Model):
animals = models.ManyToManyField('Animal', related_name='zoos')
@python_2_unicode_compatible
class Animal(PolymorphicModel):
name = models.CharField(max_length=50)
class Meta:
ordering = ['id']
def __str__(self):
return self.name
class NotInstalledAnimal(Animal):
class Meta:
app_label = 'not_installed'
class Mammal(Animal):
pass
class Monkey(Mammal):
friends = models.ManyToManyField('self')
class Trait(PolymorphicModel):
trait_type = PolymorphicTypeField('self', on_delete=models.CASCADE, blank=True, null=True)
mammal_type = PolymorphicTypeField(Mammal, on_delete=models.CASCADE, blank=True, null=True)
snake_type = PolymorphicTypeField('Snake', on_delete=models.CASCADE)
class AcknowledgedTrait(Trait):
class Meta:
proxy = True
class Reptile(Animal):
length = models.SmallIntegerField()
class Meta:
abstract = True
ordering = ['id']
class Snake(Reptile):
color = models.CharField(max_length=100, blank=True)
class Meta:
ordering = ['id']
class BigSnake(Snake):
class Meta:
proxy = True
class HugeSnake(BigSnake):
class Meta:
proxy = True
| [
"polymodels.fields.PolymorphicTypeField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.SmallIntegerField"
] | [((363, 416), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""Animal"""'], {'related_name': '"""zoos"""'}), "('Animal', related_name='zoos')\n", (385, 416), False, 'from django.db import models\n'), ((491, 522), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (507, 522), False, 'from django.db import models\n'), ((774, 804), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""self"""'], {}), "('self')\n", (796, 804), False, 'from django.db import models\n'), ((855, 932), 'polymodels.fields.PolymorphicTypeField', 'PolymorphicTypeField', (['"""self"""'], {'on_delete': 'models.CASCADE', 'blank': '(True)', 'null': '(True)'}), "('self', on_delete=models.CASCADE, blank=True, null=True)\n", (875, 932), False, 'from polymodels.fields import PolymorphicTypeField\n'), ((951, 1028), 'polymodels.fields.PolymorphicTypeField', 'PolymorphicTypeField', (['Mammal'], {'on_delete': 'models.CASCADE', 'blank': '(True)', 'null': '(True)'}), '(Mammal, on_delete=models.CASCADE, blank=True, null=True)\n', (971, 1028), False, 'from polymodels.fields import PolymorphicTypeField\n'), ((1046, 1101), 'polymodels.fields.PolymorphicTypeField', 'PolymorphicTypeField', (['"""Snake"""'], {'on_delete': 'models.CASCADE'}), "('Snake', on_delete=models.CASCADE)\n", (1066, 1101), False, 'from polymodels.fields import PolymorphicTypeField\n'), ((1211, 1237), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {}), '()\n', (1235, 1237), False, 'from django.db import models\n'), ((1341, 1385), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (1357, 1385), False, 'from django.db import models\n')] |
# -*- coding: utf-8 -*-
# author:jiangyu
# modify:2016-08-18
# gov_affair_detail.py
import pymongo
from settings import MONGO_URI, MONGO_DATABASE
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def show_result():
client = pymongo.MongoClient(MONGO_URI)
db = client[MONGO_DATABASE]
results = db["GovAffairDetailItem"].find()
client.close()
return render_template('result_index.html',p_results=results)
if __name__ == '__main__':
app.run() | [
"flask.render_template",
"pymongo.MongoClient",
"flask.Flask"
] | [((195, 210), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (200, 210), False, 'from flask import Flask, render_template\n'), ((257, 287), 'pymongo.MongoClient', 'pymongo.MongoClient', (['MONGO_URI'], {}), '(MONGO_URI)\n', (276, 287), False, 'import pymongo\n'), ((385, 440), 'flask.render_template', 'render_template', (['"""result_index.html"""'], {'p_results': 'results'}), "('result_index.html', p_results=results)\n", (400, 440), False, 'from flask import Flask, render_template\n')] |
"""Merge two or more tables as data frames.
"""
import argparse
from functools import reduce
import pandas as pd
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tables", nargs="+", help="tables to concatenate")
parser.add_argument("--separator", default="\t", help="separator between columns in the given tables")
parser.add_argument("--suffixes", nargs=2, help="what to add when two columns have the same value")
parser.add_argument("--output", help="concatenated table")
args = parser.parse_args()
# Read tables.
tables = []
for i in range(0, len(args.tables)):
tables.append(pd.read_csv(args.tables[i], sep=args.separator))
if args.suffixes is not None:
df = reduce(lambda x, y: pd.merge(x, y, on = 'strain', suffixes=(args.suffixes[0], args.suffixes[1])), tables)
else:
df = reduce(lambda x, y: pd.merge(x, y, on = 'strain'), tables)
df.to_csv(args.output, sep=args.separator, header=True, index=False) | [
"pandas.merge",
"argparse.ArgumentParser",
"pandas.read_csv"
] | [((155, 180), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (178, 180), False, 'import argparse\n'), ((663, 710), 'pandas.read_csv', 'pd.read_csv', (['args.tables[i]'], {'sep': 'args.separator'}), '(args.tables[i], sep=args.separator)\n', (674, 710), True, 'import pandas as pd\n'), ((784, 858), 'pandas.merge', 'pd.merge', (['x', 'y'], {'on': '"""strain"""', 'suffixes': '(args.suffixes[0], args.suffixes[1])'}), "(x, y, on='strain', suffixes=(args.suffixes[0], args.suffixes[1]))\n", (792, 858), True, 'import pandas as pd\n'), ((913, 940), 'pandas.merge', 'pd.merge', (['x', 'y'], {'on': '"""strain"""'}), "(x, y, on='strain')\n", (921, 940), True, 'import pandas as pd\n')] |
#//
#// -------------------------------------------------------------
#// Copyright 2010 Synopsys, Inc.
#// Copyright 2010 Mentor Graphics Corporation
#// Copyright 2010 Cadence Design Systems, Inc.
#// Copyright 2019-2020 <NAME> (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#// -------------------------------------------------------------
#//
from uvm.base.uvm_object import UVMObject
from uvm.base.uvm_pool import UVMObjectStringPool
from uvm.base.uvm_queue import UVMQueue
from uvm.macros import (uvm_error)
#//
#// CLASS: uvm_reg_file
#// Register file abstraction base class
#//
#// A register file is a collection of register files and registers
#// used to create regular repeated structures.
#//
#// Register files are usually instantiated as arrays.
#//
class UVMRegFile(UVMObject):
# local uvm_reg_block parent
# local uvm_reg_file m_rf
# local string default_hdl_path = "RTL"
# local uvm_object_string_pool #(uvm_queue #(string)) hdl_paths_pool
# //----------------------
# // Group: Initialization
# //----------------------
def __init__(self, name=""):
"""
Function: new
Create a new instance
Creates an instance of a register file abstraction class
with the specified name.
extern def new (self,string name=""):
Args:
name:
"""
super().__init__(name)
self.hdl_paths_pool = UVMObjectStringPool("hdl_paths", UVMQueue)
self.parent = None # uvm_reg_block
self.m_rf = None # uvm_reg_file
self.default_hdl_path = "RTL" # type: str
# self.l = None # type: hdl_paths_poo
def configure(self, blk_parent, regfile_parent, hdl_path=""):
"""
Function: configure
Configure a register file instance
Specify the parent block and register file of the register file
instance.
If the register file is instantiated in a block,
`regfile_parent` is specified as `None`.
If the register file is instantiated in a register file,
`blk_parent` must be the block parent of that register file and
`regfile_parent` is specified as that register file.
If the register file corresponds to a hierarchical RTL structure,
its contribution to the HDL path is specified as the `hdl_path`.
Otherwise, the register file does not correspond to a hierarchical RTL
structure (e.g. it is physically flattened) and does not contribute
to the hierarchical HDL path of any contained registers.
extern function void configure (uvm_reg_block blk_parent,
uvm_reg_file regfile_parent,
string hdl_path = "")
Args:
blk_parent:
regfile_parent:
hdl_path:
"""
if blk_parent is None:
uvm_error("UVM/RFILE/CFG/NOBLK",
("UVMRegFile::configure() called without a parent block for instance '"
+ self.get_name() + "' of register file type '", self.get_type_name()
+ "'."))
return
self.parent = blk_parent
self.m_rf = regfile_parent
self.add_hdl_path(hdl_path)
def get_full_name(self):
"""
---------------------
Group: Introspection
---------------------
Function: get_name
Get the simple name
Return the simple object name of self register file.
Function: get_full_name
Get the hierarchical name
Return the hierarchal name of self register file.
The base of the hierarchical name is the root block.
extern virtual def string get_full_name(self):
Returns:
"""
blk = None # uvm_reg_block
get_full_name = self.get_name()
# Is there a parent register file?
if (self.m_rf is not None):
return self.m_rf.get_full_name() + "." + get_full_name
# No: then prepend the full name of the parent block (if any)
if self.parent is None:
return get_full_name
get_full_name = self.parent.get_full_name() + "." + get_full_name
return get_full_name
#endfunction: get_full_name
#
# //
# // Function: get_parent
# // Get the parent block
# //
# extern virtual def uvm_reg_block get_parent (self):
def get_block(self):
"""
extern virtual def uvm_reg_block get_block (self):
Returns:
"""
return self.parent
# //
# // Function: get_regfile
# // Get the parent register file
# //
# // Returns ~None~ if self register file is instantiated in a block.
# //
# extern virtual def uvm_reg_file get_regfile (self):
#
#
# //----------------
# // Group: Backdoor
# //----------------
#
# //
# // Function: clear_hdl_path
# // Delete HDL paths
# //
# // Remove any previously specified HDL path to the register file instance
# // for the specified design abstraction.
# //
# extern def void clear_hdl_path (self,string kind = "RTL"):
#
def add_hdl_path(self, path, kind="RTL"):
"""
Function: add_hdl_path
Add an HDL path
Add the specified HDL path to the register file instance for the specified
design abstraction. This method may be called more than once for the
same design abstraction if the register file is physically duplicated
in the design abstraction
extern def void add_hdl_path (self,string path, string kind = "RTL"):
Args:
path:
kind:
"""
paths = self.hdl_paths_pool.get(kind)
paths.push_back(path)
#
# //
# // Function: has_hdl_path
# // Check if a HDL path is specified
# //
# // Returns TRUE if the register file instance has a HDL path defined for the
# // specified design abstraction. If no design abstraction is specified,
# // uses the default design abstraction specified for the nearest
# // enclosing register file or block
# //
# // If no design abstraction is specified, the default design abstraction
# // for self register file is used.
# //
# extern def bit has_hdl_path (self,string kind = ""):
#
# //
# // Function: get_hdl_path
# // Get the incremental HDL path(s)
# //
# // Returns the HDL path(s) defined for the specified design abstraction
# // in the register file instance. If no design abstraction is specified, uses
# // the default design abstraction specified for the nearest enclosing
# // register file or block.
# // Returns only the component of the HDL paths that corresponds to
# // the register file, not a full hierarchical path
# //
# // If no design abstraction is specified, the default design abstraction
# // for self register file is used.
# //
# extern def void get_hdl_path (self,ref string paths[$], input string kind = ""):
#
# //
# // Function: get_full_hdl_path
# // Get the full hierarchical HDL path(s)
# //
# // Returns the full hierarchical HDL path(s) defined for the specified
# // design abstraction in the register file instance. If no design abstraction
# // is specified, uses the default design abstraction specified for the
# // nearest enclosing register file or block.
# // There may be more than one path returned even
# // if only one path was defined for the register file instance, if any of the
# // parent components have more than one path defined for the same design
# // abstraction
# //
# // If no design abstraction is specified, the default design abstraction
# // for each ancestor register file or block is used to get each
# // incremental path.
# //
# extern function void get_full_hdl_path (ref string paths[$],
# input string kind = "",
# input string separator = ".")
#
# //
# // Function: set_default_hdl_path
# // Set the default design abstraction
# //
# // Set the default design abstraction for self register file instance.
# //
# extern def void set_default_hdl_path (self,string kind):
#
# //
# // Function: get_default_hdl_path
# // Get the default design abstraction
# //
# // Returns the default design abstraction for self register file instance.
# // If a default design abstraction has not been explicitly set for self
# // register file instance, returns the default design abstraction for the
# // nearest register file or block ancestor.
# // Returns "" if no default design abstraction has been specified.
# //
# extern def string get_default_hdl_path (self):
#
#
# extern virtual def void do_print (self,uvm_printer printer):
# extern virtual def string convert2string(self):
# extern virtual def uvm_object clone (self):
# extern virtual def void do_copy (self,uvm_object rhs):
# extern virtual function bit do_compare (uvm_object rhs,
# uvm_comparer comparer)
# extern virtual def void do_pack (self,uvm_packer packer):
# extern virtual def void do_unpack (self,uvm_packer packer):
#
#endclass: uvm_reg_file
#
#
#//------------------------------------------------------------------------------
#// IMPLEMENTATION
#//------------------------------------------------------------------------------
#
#
#
#
#
#
#// get_regfile
#
#def uvm_reg_file uvm_reg_file::get_regfile(self):
# return m_rf
#endfunction
#
#
#// clear_hdl_path
#
#def void uvm_reg_file::clear_hdl_path(self,string kind = "RTL"):
# if (kind == "ALL"):
# hdl_paths_pool = new("hdl_paths")
# return
# end
#
# if (kind == ""):
# if (m_rf is not None)
# kind = m_rf.get_default_hdl_path()
# else
# kind = parent.get_default_hdl_path()
# end
#
# if (!hdl_paths_pool.exists(kind)):
# `uvm_warning("RegModel",{"Unknown HDL Abstraction '",kind,"'"})
# return
# end
#
# hdl_paths_pool.delete(kind)
#endfunction
#
#
#
#
#// has_hdl_path
#
#def bit uvm_reg_file::has_hdl_path(self,string kind = ""):
# if (kind == ""):
# if (m_rf is not None)
# kind = m_rf.get_default_hdl_path()
# else
# kind = parent.get_default_hdl_path()
# end
#
# return hdl_paths_pool.exists(kind)
#endfunction
#
#
#// get_hdl_path
#
#def void uvm_reg_file::get_hdl_path(self,ref string paths[$], input string kind = ""):
#
# uvm_queue #(string) hdl_paths
#
# if (kind == ""):
# if (m_rf is not None)
# kind = m_rf.get_default_hdl_path()
# else
# kind = parent.get_default_hdl_path()
# end
#
# if (!has_hdl_path(kind)):
# `uvm_error("RegModel",{"Register does not have hdl path defined for abstraction '",kind,"'"})
# return
# end
#
# hdl_paths = hdl_paths_pool.get(kind)
#
# for (int i=0; i<hdl_paths.size();i++)
# paths.push_back(hdl_paths.get(i))
#
#endfunction
#
#
#// get_full_hdl_path
#
#function void uvm_reg_file::get_full_hdl_path(ref string paths[$],
# input string kind = "",
# input string separator = ".")
# if (kind == "")
# kind = get_default_hdl_path()
#
# if (!has_hdl_path(kind)):
# `uvm_error("RegModel",{"Register file does not have hdl path defined for abstraction '",kind,"'"})
# return
# end
#
# paths.delete()
#
# begin
# uvm_queue #(string) hdl_paths = hdl_paths_pool.get(kind)
# string parent_paths[$]
#
# if (m_rf is not None)
# m_rf.get_full_hdl_path(parent_paths, kind, separator)
# elif (parent is not None)
# parent.get_full_hdl_path(parent_paths, kind, separator)
#
# for (int i=0; i<hdl_paths.size();i++):
# string hdl_path = hdl_paths.get(i)
#
# if (parent_paths.size() == 0):
# if (hdl_path != "")
# paths.push_back(hdl_path)
#
# continue
# end
#
# foreach (parent_paths[j]) begin
# if (hdl_path == "")
# paths.push_back(parent_paths[j])
# else
# paths.push_back({ parent_paths[j], separator, hdl_path })
# end
# end
# end
#
#endfunction
#
#
#// get_default_hdl_path
#
#def string uvm_reg_file::get_default_hdl_path(self):
# if (default_hdl_path == ""):
# if (m_rf is not None)
# return m_rf.get_default_hdl_path()
# else
# return parent.get_default_hdl_path()
# end
# return default_hdl_path
#endfunction
#
#
#// set_default_hdl_path
#
#def void uvm_reg_file::set_default_hdl_path(self,string kind):
#
# if (kind == ""):
# if (m_rf is not None)
# kind = m_rf.get_default_hdl_path()
# elif (parent is None)
# kind = parent.get_default_hdl_path()
# else begin
# `uvm_error("RegModel",{"Register file has no parent. ",
# "Must specify a valid HDL abstraction (kind)"})
# return
# end
# end
#
# default_hdl_path = kind
#
#endfunction
#
#
#// get_parent
#
#def uvm_reg_block uvm_reg_file::get_parent(self):
# return get_block()
#endfunction
#
#
#
#
#//-------------
#// STANDARD OPS
#//-------------
#
#// convert2string
#
#def string uvm_reg_file::convert2string(self):
# `uvm_fatal("RegModel","RegModel register files cannot be converted to strings")
# return ""
#endfunction: convert2string
#
#
#// do_print
#
#def void uvm_reg_file::do_print (self,uvm_printer printer):
# super().do_print(printer)
#endfunction
#
#
#
#// clone
#
#def uvm_object uvm_reg_file::clone(self):
# `uvm_fatal("RegModel","RegModel register files cannot be cloned")
# return None
#endfunction
#
#// do_copy
#
#def void uvm_reg_file::do_copy(self,uvm_object rhs):
# `uvm_fatal("RegModel","RegModel register files cannot be copied")
#endfunction
#
#
#// do_compare
#
#function bit uvm_reg_file::do_compare (uvm_object rhs,
# uvm_comparer comparer)
# `uvm_warning("RegModel","RegModel register files cannot be compared")
# return 0
#endfunction
#
#
#// do_pack
#
#def void uvm_reg_file::do_pack (self,uvm_packer packer):
# `uvm_warning("RegModel","RegModel register files cannot be packed")
#endfunction
#
#
#// do_unpack
#
#def void uvm_reg_file::do_unpack (self,uvm_packer packer):
# `uvm_warning("RegModel","RegModel register files cannot be unpacked")
#endfunction
| [
"uvm.base.uvm_pool.UVMObjectStringPool"
] | [((2169, 2211), 'uvm.base.uvm_pool.UVMObjectStringPool', 'UVMObjectStringPool', (['"""hdl_paths"""', 'UVMQueue'], {}), "('hdl_paths', UVMQueue)\n", (2188, 2211), False, 'from uvm.base.uvm_pool import UVMObjectStringPool\n')] |
#!/usr/bin/python
import ut
from copy import copy
from random import choice, randrange
from tkinter import *
# Python 2 compatability
try:
input = raw_input
except NameError:
pass
STEP_SIZE = 7
STEP_GRID = ut.cp((-1*STEP_SIZE,0,STEP_SIZE),(-1*STEP_SIZE,0,STEP_SIZE))
STEP_GRID.remove((0,0))
class Nest:
"""An ant's nest: ants will leave the nest and bring food sources to the nest
"""
def __init__(self, canvas):
"""Gives a random position to the object and displays it in a tkinter canvas
"""
self.posx = randrange(50, 450)
self.posy = randrange(50, 450)
self.display = circle(self.posx, self.posy, 20, canvas, "#F27E1D")
class Food:
"""Represents the source of food that ants will seek
"""
def __init__(self, canvas):
"""Gives a random position to the object and displays it in a tkinter canvas
"""
self.posx = randrange(50, 450)
self.posy = randrange(50, 450)
self.display = circle(self.posx, self.posy, 10, canvas, "#04C3D9")
# a food source with a lifespan of 100 visits
self.life = 100
def replace(self, canvas):
"""Relocates the food source to another location when its lifespan reaches 0
"""
old_posx = self.posx
old_posy = self.posy
self.posx = randrange(50, 450)
self.posy = randrange(50, 450)
canvas.move(self.display, self.posx - old_posx, self.posy - old_posy)
# Gives his life back to 100, it's like a new food source is being created
self.life = 100
class Ant:
"""the ant object that will search for a food source in an environment
"""
def __init__(self, nest, canvas):
"""Birth of an ant in its nest
"""
self.posx = nest.posx
self.posy = nest.posy
self.display = circle(self.posx, self.posy, 2, canvas, "#AF0220")
# at birth the ant is in a search mode
self.scout_mode = True
class Pheromone:
"""Pheromones are objects that help ants in their movement
"""
def __init__(self, ant, canvas):
"""The pheromones are placed in the current position of the ant
"""
self.posx = ant.posx
self.posy = ant.posy
self.life = 100 # Life expectancy of the pheromone which expires after a certain time
self.display = circle(self.posx, self.posy, 0.1, canvas, "#050994")
class Environment:
"""Create the entire environment or a number x of ants will move
"""
def __init__(self, ant_number):
self.ant_number = ant_number
self.root = Tk()
self.root.title("Ant Colony Simulator")
self.root.bind("<Escape>", lambda quit: self.root.destroy())
# Environment size
global e_w, e_h
e_w = 500
e_h = 500
self.environment = Canvas(
self.root, width=e_w, height=e_h, background="#010326")
self.environment.pack()
# Initialization of the nest
self.nest = Nest(self.environment)
# Initialization of the food
self.food = Food(self.environment)
# Birth of ants
self.ant_data = [] # List contains all ants object
for i in range(self.ant_number):
ant = Ant(self.nest, self.environment)
self.ant_data.append(ant)
# All possible combinations of movement for an ant are in this list
global move_tab
move_tab = STEP_GRID
# Initiates the movement of ants in the environment after the creation of the environment
self.environment.after(
1, f_move(self.environment, self.ant_data, self.food))
self.root.mainloop()
def circle(x, y, radius, canvas, color):
"""Create a circle from the middle coordinates
:param x: coordinated x
:param y: coordinated y
:param radius: circle radius
:param color: circle color
:param canvas: environment
:return: a circle canvas object
"""
return canvas.create_oval(x - radius, y - radius, x + radius, y + radius, fill=color, outline='')
def dont_out(ant):
"""prevent ants from leaving the environment
"""
new_move_tab = copy(move_tab)
if not 0<= ant.posx <= e_w or 0 <= ant.posy <= e_h:
abs_grid = [(pos[0] + ant.posx,pos[1] + ant.posy) for pos in new_move_tab]
new_move_tab = [(pos[0] - ant.posx,pos[1] - ant.posy) for pos in abs_grid if (0<=pos[0]<=e_w and 0<=pos[1]<=e_h)]
return new_move_tab
def collide(canvas, ant):
"""Check if the ant is on an object or not
Returns 0 if the ant is not on anything
Returns 1 if the ant is on its nest
Returns 2 if the ant is on a food source
"""
ant_coords = canvas.coords(ant.display)
if canvas.find_overlapping(ant_coords[0], ant_coords[1], ant_coords[2], ant_coords[3])[0] == 1:
return 1
elif canvas.find_overlapping(ant_coords[0], ant_coords[1], ant_coords[2], ant_coords[3])[0] == 2:
return 2
else:
return 0
def find_nest(ant, canvas):
"""Returns a new movement table for which there will be a high probability of approaching its nest
"""
ant_coords = (ant.posx, ant.posy)
HGn = canvas.find_overlapping(0, 0, ant_coords[0], ant_coords[1])[0]
HDn = canvas.find_overlapping(e_w, 0, ant_coords[0], ant_coords[1])[0]
BGn = canvas.find_overlapping(0, e_h, ant_coords[0], ant_coords[1])[0]
BDn = canvas.find_overlapping(e_w, e_h, ant_coords[0], ant_coords[1])[0]
HG = len(canvas.find_overlapping(
0, 0, ant_coords[0], ant_coords[1])) - 2 - nb_ant
HD = len(canvas.find_overlapping(
e_w, 0, ant_coords[0], ant_coords[1])) - 2 - nb_ant
BG = len(canvas.find_overlapping(
0, e_h, ant_coords[0], ant_coords[1])) - 2 - nb_ant
BD = len(canvas.find_overlapping(
e_w, e_h, ant_coords[0], ant_coords[1])) - 2 - nb_ant
new_move_tab = []
if HGn == 1:
if not HG > 1:
new_move_tab += [(-1*STEP_SIZE, 0), (0, -STEP_SIZE), (-1*STEP_SIZE, -1*STEP_SIZE)]
else:
new_move_tab += [(-1*STEP_SIZE, 0), (0, -STEP_SIZE), (-1*STEP_SIZE, -1*STEP_SIZE)] * HG
if HDn == 1:
if not HD > 1:
new_move_tab += [(STEP_SIZE, 0), (0, -1*STEP_SIZE), (STEP_SIZE, -1*STEP_SIZE)]
else:
new_move_tab += [(STEP_SIZE, 0), (0, -1*STEP_SIZE), (STEP_SIZE, -1*STEP_SIZE)] * HD
if BGn == 1:
if not BG > 1:
new_move_tab += [(-1*STEP_SIZE, 0), (0, STEP_SIZE), (-1*STEP_SIZE, STEP_SIZE)]
else:
new_move_tab += [(-1*STEP_SIZE, 0), (0, STEP_SIZE), (-1*STEP_SIZE, STEP_SIZE)] * BG
if BDn == 1:
if not BD > 1:
new_move_tab += [(STEP_SIZE, 0), (0, STEP_SIZE), (STEP_SIZE, STEP_SIZE)]
else:
new_move_tab += [(STEP_SIZE, 0), (0, STEP_SIZE), (STEP_SIZE, STEP_SIZE)] * BD
if len(new_move_tab) > 0:
return new_move_tab
return move_tab
def pheromones_affinity(ant, canvas):
"""Returns a new movement table for which there will be a high probability of approaching pheromones
"""
ant_coords = (ant.posx, ant.posy)
HG = len(canvas.find_overlapping(
0, 0, ant_coords[0], ant_coords[1])) - (2 + nb_ant)
HD = len(canvas.find_overlapping(
e_w, 0, ant_coords[0], ant_coords[1])) - (2 + nb_ant)
BG = len(canvas.find_overlapping(
0, e_h, ant_coords[0], ant_coords[1])) - (2 + nb_ant)
BD = len(canvas.find_overlapping(
e_w, e_h, ant_coords[0], ant_coords[1])) - (2 + nb_ant)
new_move_tab = []
if HG > 1:
new_move_tab += [(-1*STEP_SIZE, 0), (0, -1*STEP_SIZE), (-1*STEP_SIZE, -1*STEP_SIZE)] * HG
if HD > 1:
new_move_tab += [(STEP_SIZE, 0), (0, -1*STEP_SIZE), (STEP_SIZE, -1*STEP_SIZE)] * HD
if BG > 1:
new_move_tab += [(-1*STEP_SIZE, 0), (0, STEP_SIZE), (-1*STEP_SIZE, STEP_SIZE)] * BG
if BD > 1:
new_move_tab += [(STEP_SIZE, 0), (0, STEP_SIZE), (STEP_SIZE, STEP_SIZE)] * BD
return new_move_tab
def f_move(canvas, ant_data, food):
"""simulates the movement of an ant
"""
pheromones = [] # list that contains all pheromone objects in the environment
while 1:
for pheromone in pheromones:
# At each loop the life expectancy of pheromones decreases by 1
pheromone.life -= 1
if pheromone.life <= 0: # If the life expectancy of a pheromone reaches 0 it is removed
canvas.delete(pheromone.display)
pheromones.remove(pheromone)
for ant in ant_data:
# Movement of ants
if ant.scout_mode: # if the ant is looking for a food source
# if the ant leaves the environment, we adapt its movements for which it stays there
if ant.posx <= 0 or ant.posy <= 0 or ant.posx >= e_w - 1 or ant.posy >= e_h - 1:
coord = choice(dont_out(ant))
else:
# Movement of an ant is adjusted according to the pheromones present. If there is no pheromone,
# there will be no modification on its movement.
coord = pheromones_affinity(ant, canvas)
if not coord:
coord = move_tab
coord = choice(coord)
ant.posx += coord[0]
ant.posy += coord[1]
canvas.move(ant.display, coord[0], coord[1])
if collide(canvas, ant) == 2:
# if there is a collision between a food source and an ant, the scout mode is removed
# with each collision between an ant and a food source, its life expectancy decreases by 1
food.life -= 1
# If the food source has been consumed, a new food source is replaced
if food.life < 1:
food.replace(canvas)
ant.scout_mode = False
canvas.itemconfig(ant.display, fill='#3BC302')
# the ant puts down its first pheromones when it touches food
for i in range(30):
pheromones.append(Pheromone(ant, canvas))
else: # If the ant found the food source
# The position of the nest will influence the movements of the ant
coord = choice(find_nest(ant, canvas))
proba = choice([0]*23+[1])
if proba:
pheromones.append(Pheromone(ant, canvas))
ant.posx += coord[0]
ant.posy += coord[1]
canvas.move(ant.display, coord[0], coord[1])
# if there is a collision between a nest and an ant, the ant switches to scout mode
if collide(canvas, ant) == 1:
ant.scout_mode = True
canvas.itemconfig(ant.display, fill='#AF0220')
canvas.update()
if __name__ == "__main__":
try:
nb_ant = int(input("Enter the number of ants you want for the simulation (recommended: 10-100) : "))
Environment(nb_ant)
except KeyboardInterrupt:
print("Exiting...")
exit(0)
| [
"copy.copy",
"random.choice",
"ut.cp",
"random.randrange"
] | [((217, 286), 'ut.cp', 'ut.cp', (['(-1 * STEP_SIZE, 0, STEP_SIZE)', '(-1 * STEP_SIZE, 0, STEP_SIZE)'], {}), '((-1 * STEP_SIZE, 0, STEP_SIZE), (-1 * STEP_SIZE, 0, STEP_SIZE))\n', (222, 286), False, 'import ut\n'), ((4189, 4203), 'copy.copy', 'copy', (['move_tab'], {}), '(move_tab)\n', (4193, 4203), False, 'from copy import copy\n'), ((557, 575), 'random.randrange', 'randrange', (['(50)', '(450)'], {}), '(50, 450)\n', (566, 575), False, 'from random import choice, randrange\n'), ((596, 614), 'random.randrange', 'randrange', (['(50)', '(450)'], {}), '(50, 450)\n', (605, 614), False, 'from random import choice, randrange\n'), ((921, 939), 'random.randrange', 'randrange', (['(50)', '(450)'], {}), '(50, 450)\n', (930, 939), False, 'from random import choice, randrange\n'), ((960, 978), 'random.randrange', 'randrange', (['(50)', '(450)'], {}), '(50, 450)\n', (969, 978), False, 'from random import choice, randrange\n'), ((1340, 1358), 'random.randrange', 'randrange', (['(50)', '(450)'], {}), '(50, 450)\n', (1349, 1358), False, 'from random import choice, randrange\n'), ((1379, 1397), 'random.randrange', 'randrange', (['(50)', '(450)'], {}), '(50, 450)\n', (1388, 1397), False, 'from random import choice, randrange\n'), ((10431, 10453), 'random.choice', 'choice', (['([0] * 23 + [1])'], {}), '([0] * 23 + [1])\n', (10437, 10453), False, 'from random import choice, randrange\n'), ((9291, 9304), 'random.choice', 'choice', (['coord'], {}), '(coord)\n', (9297, 9304), False, 'from random import choice, randrange\n')] |
from gpiozero import LED
from signal import pause
red = LED(17)
red.blink()
pause()
| [
"signal.pause",
"gpiozero.LED"
] | [((57, 64), 'gpiozero.LED', 'LED', (['(17)'], {}), '(17)\n', (60, 64), False, 'from gpiozero import LED\n'), ((79, 86), 'signal.pause', 'pause', ([], {}), '()\n', (84, 86), False, 'from signal import pause\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 14:03:04 2019
@author: <NAME>
"""
import pandas as pd
import numpy as np
import tflearn
import tensorflow
import random
df = pd.read_csv('s.csv')
inp =df.to_numpy()
final_inp =[]
final_inp=inp[:,0:4]
output=inp[:,4]
final_output=[]
for i in output:
if i=="Cancer":
final_output.append([1,0,0,0])
if i=="Diabeties":
final_output.append([0,1,0,0])
if i=="Stomach":
final_output.append([0,0,1,0])
if i=="Heart":
final_output.append([0,0,0,1])
final_output = np.array(final_output)
rbc=np.array(df['RBC'])
sugar=np.array(df['Sugar'])
acid=np.array(df['Acidity'])
coles=np.array(df['Colestrol'])
output=np.array(df['Disease'])
tensorflow.reset_default_graph()
net = tflearn.input_data(shape=[None, 4])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 4, activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
try:
model.load("model.tflearn")
except:
model = tflearn.DNN(net)
model.fit(final_inp, final_output, n_epoch=250, batch_size=1, show_metric=True)
model.save("model.tflearn")
results = model.predict([[40,40,40,47]])
results_index = np.argmax(results)
food={0:[["maidha","Coliflower","msdklsfngskl"]],1:[["maidha","Coliflower","msdklsfngskl"]],2:[["maidha","Coliflower","msdklsfngskl"]],3:[["maidha","Coliflower","msdklsfngskl"]]}
if results_index==0:
print("Cancer")
print(food[0])
if results_index==1:
print("Diabeties")
print(food[1])
if results_index==2:
print("Stomach")
print(food[2])
if results_index==3:
print("Heart")
print(food[3])
| [
"tensorflow.reset_default_graph",
"pandas.read_csv",
"tflearn.DNN",
"numpy.argmax",
"numpy.array",
"tflearn.regression",
"tflearn.fully_connected",
"tflearn.input_data"
] | [((180, 200), 'pandas.read_csv', 'pd.read_csv', (['"""s.csv"""'], {}), "('s.csv')\n", (191, 200), True, 'import pandas as pd\n'), ((645, 667), 'numpy.array', 'np.array', (['final_output'], {}), '(final_output)\n', (653, 667), True, 'import numpy as np\n'), ((723, 742), 'numpy.array', 'np.array', (["df['RBC']"], {}), "(df['RBC'])\n", (731, 742), True, 'import numpy as np\n'), ((749, 770), 'numpy.array', 'np.array', (["df['Sugar']"], {}), "(df['Sugar'])\n", (757, 770), True, 'import numpy as np\n'), ((776, 799), 'numpy.array', 'np.array', (["df['Acidity']"], {}), "(df['Acidity'])\n", (784, 799), True, 'import numpy as np\n'), ((806, 831), 'numpy.array', 'np.array', (["df['Colestrol']"], {}), "(df['Colestrol'])\n", (814, 831), True, 'import numpy as np\n'), ((840, 863), 'numpy.array', 'np.array', (["df['Disease']"], {}), "(df['Disease'])\n", (848, 863), True, 'import numpy as np\n'), ((866, 898), 'tensorflow.reset_default_graph', 'tensorflow.reset_default_graph', ([], {}), '()\n', (896, 898), False, 'import tensorflow\n'), ((906, 941), 'tflearn.input_data', 'tflearn.input_data', ([], {'shape': '[None, 4]'}), '(shape=[None, 4])\n', (924, 941), False, 'import tflearn\n'), ((948, 979), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(8)'], {}), '(net, 8)\n', (971, 979), False, 'import tflearn\n'), ((986, 1017), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(8)'], {}), '(net, 8)\n', (1009, 1017), False, 'import tflearn\n'), ((1024, 1055), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(8)'], {}), '(net, 8)\n', (1047, 1055), False, 'import tflearn\n'), ((1062, 1115), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(4)'], {'activation': '"""softmax"""'}), "(net, 4, activation='softmax')\n", (1085, 1115), False, 'import tflearn\n'), ((1122, 1145), 'tflearn.regression', 'tflearn.regression', (['net'], {}), '(net)\n', (1140, 1145), False, 'import tflearn\n'), ((1155, 1171), 'tflearn.DNN', 'tflearn.DNN', (['net'], {}), '(net)\n', (1166, 1171), False, 'import tflearn\n'), ((1422, 1440), 'numpy.argmax', 'np.argmax', (['results'], {}), '(results)\n', (1431, 1440), True, 'import numpy as np\n'), ((1230, 1246), 'tflearn.DNN', 'tflearn.DNN', (['net'], {}), '(net)\n', (1241, 1246), False, 'import tflearn\n')] |
from src.parser.text_extractor.docx import extract_text_from_doc
from src.parser.text_extractor.pdf import extract_text_from_pdf
import os
def extract_text_from_document(document_path):
text_file = os.path.join(os.path.dirname(document_path) , document_path.split("/")[-1].split(".")[0] + ".txt")
if os.path.exists(text_file):
with open(document_path.split(".")[0] + ".txt", "r") as infile:
data = infile.read()
return data
text = ""
if document_path.endswith(".pdf"):
for page in extract_text_from_pdf(document_path):
text += ' ' + page
elif document_path.endswith(".docx"):
text = extract_text_from_doc(document_path)
text = text.lower()
with open(text_file, "w") as outfile:
outfile.write(text)
return text
| [
"os.path.dirname",
"os.path.exists",
"src.parser.text_extractor.pdf.extract_text_from_pdf",
"src.parser.text_extractor.docx.extract_text_from_doc"
] | [((312, 337), 'os.path.exists', 'os.path.exists', (['text_file'], {}), '(text_file)\n', (326, 337), False, 'import os\n'), ((218, 248), 'os.path.dirname', 'os.path.dirname', (['document_path'], {}), '(document_path)\n', (233, 248), False, 'import os\n'), ((538, 574), 'src.parser.text_extractor.pdf.extract_text_from_pdf', 'extract_text_from_pdf', (['document_path'], {}), '(document_path)\n', (559, 574), False, 'from src.parser.text_extractor.pdf import extract_text_from_pdf\n'), ((664, 700), 'src.parser.text_extractor.docx.extract_text_from_doc', 'extract_text_from_doc', (['document_path'], {}), '(document_path)\n', (685, 700), False, 'from src.parser.text_extractor.docx import extract_text_from_doc\n')] |
import os.path
from setuptools import setup, find_packages
PATH = os.path.realpath(os.path.dirname(__file__))
README = open(os.path.join(PATH, 'README.md')).read()
LICENSE = open(os.path.join(PATH, 'LICENSE')).read()
def _requirements(filename: str = 'requirements.txt'):
with open(os.path.join(PATH, filename)) as f:
return list(map(lambda r: r.replace('\n', ''), f.readlines()))
setup(
name='repomaker',
use_scm_version=True,
license=LICENSE,
author='<NAME> (George)',
author_email='<EMAIL>',
maintainer='<NAME> (George)',
maintainer_email='<EMAIL>',
url='https://github.com/jfsanchez-gh/repomaker',
description='APT mobile repository creator.',
long_description=README,
platforms='any',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Topic :: Internet',
'Topic :: System :: Management',
'Topic :: System :: Networking',
],
packages=find_packages(),
setup_requires=[
'setuptools_scm',
],
install_requires=_requirements('requirements.txt'),
tests_require=_requirements('requirements-dev.txt'),
test_suite='tests',
entry_points={
'console_scripts': [
'repomaker=repomaker.main:main'
]
},
zip_safe=False,
)
| [
"setuptools.find_packages"
] | [((1048, 1063), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1061, 1063), False, 'from setuptools import setup, find_packages\n')] |
from models import Session, User
from models.types import SessionType
from datetime import datetime, timedelta
from secrets import token_urlsafe
from core.controllers.utils.Base64 import encoder
from fastapi.responses import JSONResponse
def get_some_minutes_more():
now = datetime.now()
result = timedelta(minutes=45) + now
return result
async def login_validate_bool(token, ip, id=0):
token = encoder.decode(token)
session = await Session.objects.get(token=token, ip=ip)
if(session.id > 0):
await session.update(
expiration_date=get_some_minutes_more()
)
await session.user.load()
await session.user.load_data()
return True, session.user
return False, None
async def login_validate(token, ip, id):
token = encoder.decode(token)
session = await Session.objects.get(token=token, ip=ip,)
if(session.id > 0):
await session.update(
expiration_date=get_some_minutes_more()
)
session.token = encoder.encode(session.token)
return session
return {'status': 'error'}
async def login(ip, username, password=""):
users = await User.objects.filter(user_name=username).all()
if(len(users) == 0):
return JSONResponse(content={'status': 'Revisa tu usuario.'}, status_code=403)
u = users[0]
u = await u.load_data()
try:
encoder.ph.verify(u.password, password)
if(encoder.ph.check_needs_rehash(u.password)):
await u.update(password=encoder.ph.hash(password))
token = token_urlsafe(16)
s = await Session.objects.create(
ip=ip,
user=u,
token=token,
session_type= await SessionType.objects.get(id = 1),
creation_date=datetime.now(),
expiration_date=get_some_minutes_more()
)
s.token = encoder.encode(token)
return s
except Exception as e:
print(e)
return JSONResponse(content={'status': 'Revisa la contraseña'}, status_code=403)
async def logout(ip, token):
token = encoder.decode(token)
session = await Session.objects.get(token=token, )
if(session.id > 0):
await session.update(
user=session.user.id,
expiration_date=datetime.now()
)
return {'status': 'ok'}
return {'status': 'error'}
| [
"core.controllers.utils.Base64.encoder.ph.verify",
"models.User.objects.filter",
"core.controllers.utils.Base64.encoder.decode",
"secrets.token_urlsafe",
"core.controllers.utils.Base64.encoder.encode",
"datetime.datetime.now",
"fastapi.responses.JSONResponse",
"core.controllers.utils.Base64.encoder.ph.hash",
"models.types.SessionType.objects.get",
"datetime.timedelta",
"core.controllers.utils.Base64.encoder.ph.check_needs_rehash",
"models.Session.objects.get"
] | [((279, 293), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (291, 293), False, 'from datetime import datetime, timedelta\n'), ((415, 436), 'core.controllers.utils.Base64.encoder.decode', 'encoder.decode', (['token'], {}), '(token)\n', (429, 436), False, 'from core.controllers.utils.Base64 import encoder\n'), ((798, 819), 'core.controllers.utils.Base64.encoder.decode', 'encoder.decode', (['token'], {}), '(token)\n', (812, 819), False, 'from core.controllers.utils.Base64 import encoder\n'), ((2091, 2112), 'core.controllers.utils.Base64.encoder.decode', 'encoder.decode', (['token'], {}), '(token)\n', (2105, 2112), False, 'from core.controllers.utils.Base64 import encoder\n'), ((307, 328), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(45)'}), '(minutes=45)\n', (316, 328), False, 'from datetime import datetime, timedelta\n'), ((457, 496), 'models.Session.objects.get', 'Session.objects.get', ([], {'token': 'token', 'ip': 'ip'}), '(token=token, ip=ip)\n', (476, 496), False, 'from models import Session, User\n'), ((840, 879), 'models.Session.objects.get', 'Session.objects.get', ([], {'token': 'token', 'ip': 'ip'}), '(token=token, ip=ip)\n', (859, 879), False, 'from models import Session, User\n'), ((1021, 1050), 'core.controllers.utils.Base64.encoder.encode', 'encoder.encode', (['session.token'], {}), '(session.token)\n', (1035, 1050), False, 'from core.controllers.utils.Base64 import encoder\n'), ((1255, 1326), 'fastapi.responses.JSONResponse', 'JSONResponse', ([], {'content': "{'status': 'Revisa tu usuario.'}", 'status_code': '(403)'}), "(content={'status': 'Revisa tu usuario.'}, status_code=403)\n", (1267, 1326), False, 'from fastapi.responses import JSONResponse\n'), ((1389, 1428), 'core.controllers.utils.Base64.encoder.ph.verify', 'encoder.ph.verify', (['u.password', 'password'], {}), '(u.password, password)\n', (1406, 1428), False, 'from core.controllers.utils.Base64 import encoder\n'), ((1440, 1481), 'core.controllers.utils.Base64.encoder.ph.check_needs_rehash', 'encoder.ph.check_needs_rehash', (['u.password'], {}), '(u.password)\n', (1469, 1481), False, 'from core.controllers.utils.Base64 import encoder\n'), ((1563, 1580), 'secrets.token_urlsafe', 'token_urlsafe', (['(16)'], {}), '(16)\n', (1576, 1580), False, 'from secrets import token_urlsafe\n'), ((1874, 1895), 'core.controllers.utils.Base64.encoder.encode', 'encoder.encode', (['token'], {}), '(token)\n', (1888, 1895), False, 'from core.controllers.utils.Base64 import encoder\n'), ((2133, 2165), 'models.Session.objects.get', 'Session.objects.get', ([], {'token': 'token'}), '(token=token)\n', (2152, 2165), False, 'from models import Session, User\n'), ((1972, 2045), 'fastapi.responses.JSONResponse', 'JSONResponse', ([], {'content': "{'status': 'Revisa la contraseña'}", 'status_code': '(403)'}), "(content={'status': 'Revisa la contraseña'}, status_code=403)\n", (1984, 2045), False, 'from fastapi.responses import JSONResponse\n'), ((1169, 1208), 'models.User.objects.filter', 'User.objects.filter', ([], {'user_name': 'username'}), '(user_name=username)\n', (1188, 1208), False, 'from models import Session, User\n'), ((1778, 1792), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1790, 1792), False, 'from datetime import datetime, timedelta\n'), ((2284, 2298), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2296, 2298), False, 'from datetime import datetime, timedelta\n'), ((1520, 1545), 'core.controllers.utils.Base64.encoder.ph.hash', 'encoder.ph.hash', (['password'], {}), '(password)\n', (1535, 1545), False, 'from core.controllers.utils.Base64 import encoder\n'), ((1719, 1748), 'models.types.SessionType.objects.get', 'SessionType.objects.get', ([], {'id': '(1)'}), '(id=1)\n', (1742, 1748), False, 'from models.types import SessionType\n')] |
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = plt.subplot2grid((1, 1), (0, 0))
HPI_data = pd.read_pickle('fiddy_states3.pickle')
TXT1yr = HPI_data['TX'].resample('A', how='ohlc')
print(TXT1yr.head())
HPI_data['TX'].plot(ax=ax1, label='Monthly TX HPI')
TXT1yr.plot(ax=ax1, label='Yearly TX HPI')
# plt.legend().remove()
plt.legend(loc=4)
plt.show()
| [
"pandas.read_pickle",
"matplotlib.pyplot.figure",
"matplotlib.style.use",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((81, 109), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (90, 109), False, 'from matplotlib import style\n'), ((117, 129), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (127, 129), True, 'import matplotlib.pyplot as plt\n'), ((136, 168), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 1)', '(0, 0)'], {}), '((1, 1), (0, 0))\n', (152, 168), True, 'import matplotlib.pyplot as plt\n'), ((181, 219), 'pandas.read_pickle', 'pd.read_pickle', (['"""fiddy_states3.pickle"""'], {}), "('fiddy_states3.pickle')\n", (195, 219), True, 'import pandas as pd\n'), ((413, 430), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (423, 430), True, 'import matplotlib.pyplot as plt\n'), ((431, 441), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (439, 441), True, 'import matplotlib.pyplot as plt\n')] |
# ======================================================================
#
# Cosmograil: cosmograil.tools.sexcatalog
#
# sexcatalog module.
#
# Author: <NAME> <<EMAIL>>
#
# $Id: sexcatalog.py,v 1.1 2005/06/29 13:07:41 hack Exp $
#
# ======================================================================
#
# "sexcatalog": python module to read and parse SExtractor catalogs
# A simple interface to read SExtractor text catalogs
#
# ======================================================================
#
# $Log: sexcatalog.py,v $
# Revision 1.1 2005/06/29 13:07:41 hack
# Added Python interface to SExtractor to STSDAS$Python for use with 'tweakshifts'. WJH
#
# Revision 1.9 2005/02/14 19:27:31 laurentl
# Added write facilities to rdb module.
#
# Revision 1.8 2005/02/14 17:47:02 laurentl
# Added iterator interface
#
# Revision 1.7 2005/02/14 17:16:30 laurentl
# clean now removes the NNW config file too.
#
# Revision 1.2 2005/02/14 17:13:49 laurentl
# *** empty log message ***
#
# Revision 1.1 2005/02/14 11:34:10 laurentl
# quality monitor now uses SExtractor wrapper.
#
# Revision 1.5 2005/02/11 14:40:35 laurentl
# minor changes
#
# Revision 1.4 2005/02/10 20:15:14 laurentl
# Improved SExtractor wrapper.
#
# Revision 1.2 2005/02/09 23:32:50 laurentl
# Implemented SExtractor wrapper
#
# Revision 1.1 2005/01/06 12:29:25 laurentl
# Added a SExtractor wrapper module. Renamed sextractor.py sexcatalog.py.
#
# Revision 1.1 2004/12/09 03:06:23 laurentl
# Changed tree structure
#
# Revision 1.5 2004/11/26 18:26:59 laurentl
# Added a module to manage the data tree.
#
# Revision 1.4 2004/11/24 15:11:31 laurentl
# Fixed a lot of bugs in sexcatalog module.
#
# Revision 1.2 2004/11/23 22:38:23 laurentl
# Added sexcatalog module.
#
#
# ======================================================================
"""
A simple interface to manipulate SExtractor ASCII catalogs
A simple interface to manipulate SExtractor ASCII catalogs
through a file-like API (open, read, readline, etc.).
For the moment only reading ('r' mode) is supported.
by <NAME>
version: 0.1.5 - last modified: 2005-02-14
Future: implement a 'w' mode to be able to save catalogs
in SExtractor format.
Examples:
-----------------------------------------------------------------
# Through sexcatalog module
import sexcatalog
# Read a SExtractor ASCII catalog
# First method: read the whole catalog at once
catalog_f = sexcatalog.open(catalog_name)
catalog = catalog_f.readlines()
for star in catalog:
print star['FLUX_BEST'], star['FLAGS']
if (star['FLAGS'] & sexcatalog.BLENDED):
print "This star is BLENDED"
catalog_f.close()
# Second method: read the catalog star by star
catalog_f = sexcatalog.open(catalog_name)
for star in catalog_f:
print star['FLUX_BEST'], star['FLAGS']
if (star['FLAGS'] & sexcatalog.BLENDED):
print "This star is BLENDED"
catalog_f.close()
# -------------
# Through sextractor module
import sextractor
# Read a SExtractor ASCII catalog
# First method: read the whole catalog at once
catalog_f = sextractor.open(catalog_name)
catalog = catalog_f.readlines()
for star in catalog:
print star['FLUX_BEST'], star['FLAGS']
if (star['FLAGS'] & sextractor.BLENDED):
print "This star is BLENDED"
catalog_f.close()
# Second method: read the catalog star by star
catalog_f = sextractor.open(catalog_name)
star = catalog_f.readline()
while star:
print star['FLUX_BEST'], star['FLAGS']
if (star['FLAGS'] & sextractor.BLENDED):
print "This star is BLENDED"
star = catalog_f.readline()
catalog_f.close()
-----------------------------------------------------------------
"""
# ======================================================================
from __future__ import division, print_function # confidence high
import sys
import exceptions
PY3 = sys.version_info[0] >= 3
if PY3:
import builtins
else:
import __builtin__
# ======================================================================
__version__ = "0.1.5 (2005-02-14)"
# ======================================================================
# -- FLAGS meaning
NEIGHBOURS = 1
BLENDED = 2
SATURATED = 4
TRUNCATED = 8
CORRUPTED_APER = 16
CORRUPTED_ISO = 32
OVERFLOW_DEBLEND = 64
OVERFLOW_EXTRACT = 128
class WrongSExtractorfileException(Exception):
pass
class SExtractorfile:
"""
A class to manipulate SExtractor ASCII catalogs.
For the moment only reading ('r' mode) is supported.
"""
_SE_keys = \
{"NUMBER" : {"comment": "Running object number",
"infunc": int,
"format": "%10d",
"unit": ""},
"FLAGS" : {"comment": "Extraction flags",
"infunc": int,
"format": "%3d",
"unit": ""},
"FLUX_ISO" : {"comment": "Isophotal flux",
"infunc": float,
"format": "%12g",
"unit": "count"},
"FLUXERR_ISO" : {"comment": "RMS error for isophotal flux",
"infunc": float,
"format": "%12g",
"unit": "count"},
"MAG_ISO" : {"comment": "Isophotal magnitude",
"infunc": float,
"format": "%8.4f",
"unit": "mag"},
"MAGERR_ISO" : {"comment":
"RMS error for isophotal magnitude",
"infunc": float,
"format": "%8.4f",
"unit": "mag"},
"FLUX_ISOCOR" : {"comment": "Corrected isophotal flux",
"infunc": float,
"format": "%12g",
"unit": "count"},
"FLUXERR_ISOCOR" : {"comment":
"RMS error for corrected isophotal flux",
"infunc": float,
"format": "%12g",
"unit": "count"},
"MAG_ISOCOR" : {"comment": "Corrected isophotal magnitude",
"infunc": float,
"format": "%8.4f",
"unit": "mag"},
"MAGERR_ISOCOR" : {"comment":
"RMS error for corrected isophotal magnitude",
"infunc": float,
"format": "%8.4f",
"unit": "mag"},
"FLUX_AUTO" : {"comment":
"Flux within a Kron-like elliptical aperture",
"infunc": float,
"format": "%12g",
"unit": "count"},
"FLUXERR_AUTO" : {"comment": "RMS error for AUTO flux",
"infunc": float,
"format": "%12g",
"unit": "count"},
"MAG_AUTO" : {"comment":
"Kron-like elliptical aperture magnitude",
"infunc": float,
"format": "%8.4f",
"unit": "mag"},
"MAGERR_AUTO" : {"comment": "RMS error for AUTO magnitude",
"infunc": float,
"format": "%8.4f",
"unit": "mag"},
"FLUX_BEST" : {"comment":
"Best of FLUX_AUTO and FLUX_ISOCOR",
"infunc": float,
"format": "%12g",
"unit": "count"},
"FLUXERR_BEST" : {"comment": "RMS error for BEST flux",
"infunc": float,
"format": "%12g",
"unit": "count"},
"MAG_BEST" : {"comment": "Best of MAG_AUTO and MAG_ISOCOR",
"infunc": float,
"format": "%8.4f",
"unit": "mag"},
"MAGERR_BEST" : {"comment": "RMS error for MAG_BEST",
"infunc": float,
"format": "%8.4f",
"unit": "mag"},
"KRON_RADIUS" : {"comment":
"Kron apertures in units of A or B",
"infunc": float,
"format": "%5.2f",
"unit": ""},
"BACKGROUND" : {"comment": "Background at centroid position",
"infunc": float,
"format": "%12g",
"unit": "count"},
"THRESHOLD" : {"comment":
"Detection threshold above background",
"infunc": float,
"format": "%12g",
"unit": "count"},
"MU_THRESHOLD" : {"comment":
"Detection threshold above background",
"infunc": float,
"format": "%8.4f",
"unit": "mag * arcsec**(-2)"},
"FLUX_MAX" : {"comment": "Peak flux above background",
"infunc": float,
"format": "%12g",
"unit": "count"},
"MU_MAX" : {"comment":
"Peak surface brightness above background",
"infunc": float,
"format": "%8.4f",
"unit": "mag * arcsec**(-2)"},
"ISOAREA_WORLD" : {"comment":
"Isophotal area above Analysis threshold",
"infunc": float,
"format": "%12g",
"unit": "deg**2"},
"XMIN_IMAGE" : {"comment":
"Minimum x-coordinate among detected pixels",
"infunc": int,
"format": "%10d",
"unit": "pixel"},
"YMIN_IMAGE" : {"comment":
"Minimum y-coordinate among detected pixels",
"infunc": int,
"format": "%10d",
"unit": "pixel"},
"XMAX_IMAGE" : {"comment":
"Maximum x-coordinate among detected pixels",
"infunc": int,
"format": "%10d",
"unit": "pixel"},
"YMAX_IMAGE" : {"comment":
"Maximum y-coordinate among detected pixels",
"infunc": int,
"format": "%10d",
"unit": "pixel"},
"X_IMAGE" : {"comment": "Object position along x",
"infunc": float,
"format": "%10.3f",
"unit": "pixel"},
"Y_IMAGE" : {"comment": "Object position along y",
"infunc": float,
"format": "%10.3f",
"unit": "pixel"},
"X_WORLD" : {"comment":
"Barycenter position along world x axis",
"infunc": float,
"format": "%15e",
"unit": "deg"},
"Y_WORLD" : {"comment":
"Barycenter position along world y axis",
"infunc": float,
"format": "%15e",
"unit": "deg"},
"ALPHA_SKY" : {"comment":
"Right ascension of barycenter (native)",
"infunc": float,
"format": "%11.7f",
"unit": "deg"},
"DELTA_SKY" : {"comment":
"Declination of barycenter (native)",
"infunc": float,
"format": "%+11.7f",
"unit": "deg"},
"ALPHA_J2000" : {"comment":
"Right ascension of barycenter (J2000)",
"infunc": float,
"format": "%11.7f",
"unit": "deg"},
"DELTA_J2000" : {"comment":
"Declination of barycenter (J2000)",
"infunc": float,
"format": "%+11.7f",
"unit": "deg"},
"ALPHA_B1950" : {"comment":
"Right ascension of barycenter (B1950)",
"infunc": float,
"format": "%11.7f",
"unit": "deg"},
"DELTA_B1950" : {"comment":
"Declination of barycenter (B1950)",
"infunc": float,
"format": "%+11.7f",
"unit": "deg"},
"X2_IMAGE" : {"comment": "Variance along x",
"infunc": float,
"format": "%15e",
"unit": "pixel**2"},
"Y2_IMAGE" : {"comment": "Variance along y",
"infunc": float,
"format": "%15e",
"unit": "pixel**2"},
"XY_IMAGE" : {"comment": "Covariance between x and y",
"infunc": float,
"format": "%15e",
"unit": "pixel**2"},
"CXX_IMAGE" : {"comment": "Cxx object ellipse parameter",
"infunc": float,
"format": "%12e",
"unit": "pixel**(-2)"},
"CYY_IMAGE" : {"comment": "Cyy object ellipse parameter",
"infunc": float,
"format": "%12e",
"unit": "pixel**(-2)"},
"CXY_IMAGE" : {"comment": "Cxy object ellipse parameter",
"infunc": float,
"format": "%12e",
"unit": "pixel**(-2)"},
"A_IMAGE" : {"comment": "Profile RMS along major axis",
"infunc": float,
"format": "%9.3f",
"unit": "pixel"},
"B_IMAGE" : {"comment": "Profile RMS along minor axis",
"infunc": float,
"format": "%9.3f",
"unit": "pixel"},
"THETA_IMAGE" : {"comment": "Position angle (CCW/x)",
"infunc": float,
"format": "%5.1f",
"unit": "deg"},
"ELONGATION" : {"comment": "A_IMAGE/B_IMAGE",
"infunc": float,
"format": "%8.3f",
"unit": ""},
"ELLIPTICITY" : {"comment": "1 - B_IMAGE/A_IMAGE",
"infunc": float,
"format": "%8.3f",
"unit": ""},
"ERRX2_IMAGE" : {"comment": "Variance of position along x",
"infunc": float,
"format": "%15e",
"unit": "pixel**2"},
"ERRY2_IMAGE" : {"comment": "Variance of position along y",
"infunc": float,
"format": "%15e",
"unit": "pixel**2"},
"ERRXY_IMAGE" : {"comment":
"Covariance of position between x and y",
"infunc": float,
"format": "%15e",
"unit": "pixel**2"},
"ERRCXX_IMAGE" : {"comment": "Cxx error ellipse parameter",
"infunc": float,
"format": "%12g",
"unit": "pixel**(-2)"},
"ERRCYY_IMAGE" : {"comment": "Cyy error ellipse parameter",
"infunc": float,
"format": "%12g",
"unit": "pixel**(-2)"},
"ERRCXY_IMAGE" : {"comment": "Cxy error ellipse parameter",
"infunc": float,
"format": "%12g",
"unit": "pixel**(-2)"},
"ERRA_IMAGE" : {"comment":
"RMS position error along major axis",
"infunc": float,
"format": "%8.4f",
"unit": "pixel"},
"ERRB_IMAGE" : {"comment":
"RMS position error along minor axis",
"infunc": float,
"format": "%8.4f",
"unit": "pixel"},
"ERRTHETA_IMAGE" : {"comment":
"Error ellipse position angle (CCW/x)",
"infunc": float,
"format": "%5.1f",
"unit": "deg"},
"FWHM_IMAGE" : {"comment": "FWHM assuming a gaussian core",
"infunc": float,
"format": "%8.2f",
"unit": "pixel"},
"X2_WORLD" : {"comment": "Variance along X-WORLD (alpha)",
"infunc": float,
"format": "%15e",
"unit": "deg**2"},
"Y2_WORLD" : {"comment": "Variance along Y-WORLD (delta)",
"infunc": float,
"format": "%15e",
"unit": "deg**2"},
"XY_WORLD" : {"comment":
"Covariance between X-WORLD and Y-WORLD",
"infunc": float,
"format": "%15e",
"unit": "deg**2"},
"CXX_WORLD" : {"comment":
"Cxx object ellipse parameter (WORLD units)",
"infunc": float,
"format": "%12e",
"unit": "deg**(-2)"},
"CYY_WORLD" : {"comment":
"Cyy object ellipse parameter (WORLD units)",
"infunc": float,
"format": "%12e",
"unit": "deg**(-2)"},
"CXY_WORLD" : {"comment":
"Cxy object ellipse parameter (WORLD units)",
"infunc": float,
"format": "%12e",
"unit": "deg**(-2)"},
"A_WORLD" : {"comment":
"Profile RMS along major axis (world units)",
"infunc": float,
"format": "%12g",
"unit": "deg"},
"B_WORLD" : {"comment":
"Profile RMS along minor axis (world units)",
"infunc": float,
"format": "%12g",
"unit": "deg"},
"THETA_WORLD" : {"comment": "Position angle (CCW/world-x)",
"infunc": float,
"format": "%5.1f",
"unit": "deg"},
"THETA_SKY" : {"comment":
"Position angle (east of north) (native)",
"infunc": float,
"format": "%+6.2f",
"unit": "deg"},
"THETA_J2000" : {"comment":
"Position angle (east of north) (J2000)",
"infunc": float,
"format": "%+6.2f",
"unit": "deg"},
"THETA_B1950" : {"comment":
"Position angle (east of north) (B1950)",
"infunc": float,
"format": "%+6.2f",
"unit": "deg"},
"ERRX2_WORLD" : {"comment":
"Variance of position along X-WORLD (alpha)",
"infunc": float,
"format": "%15e",
"unit": "deg**2"},
"ERRY2_WORLD" : {"comment":
"Variance of position along Y-WORLD (delta)",
"infunc": float,
"format": "%15e",
"unit": "deg**2"},
"ERRXY_WORLD" : {"comment":
"Covariance of position X-WORLD/Y-WORLD",
"infunc": float,
"format": "%15e",
"unit": "deg**2"},
"ERRCXX_WORLD" : {"comment":
"Cxx error ellipse parameter (WORLD units)",
"infunc": float,
"format": "%12g",
"unit": "deg**(-2)"},
"ERRCYY_WORLD" : {"comment":
"Cyy error ellipse parameter (WORLD units)",
"infunc": float,
"format": "%12g",
"unit": "deg**(-2)"},
"ERRCXY_WORLD" : {"comment":
"Cxy error ellipse parameter (WORLD units)",
"infunc": float,
"format": "%12g",
"unit": "deg**(-2)"},
"ERRA_WORLD" : {"comment":
"World RMS position error along major axis",
"infunc": float,
"format": "%12g",
"unit": "pixel"},
"ERRB_WORLD" : {"comment":
"World RMS position error along minor axis",
"infunc": float,
"format": "%12g",
"unit": "pixel"},
"ERRTHETA_WORLD" : {"comment":
"Error ellipse pos. angle (CCW/world-x)",
"infunc": float,
"format": "%5.1f",
"unit": "deg"},
"ERRTHETA_SKY" : {"comment":
"Native error ellipse pos." + \
"angle (east of north)",
"infunc": float,
"format": "%5.1f",
"unit": "deg"},
"ERRTHETA_J2000" : {"comment":
"J2000 error ellipse pos." + \
"angle (east of north)",
"infunc": float,
"format": "%5.1f",
"unit": "deg"},
"ERRTHETA_B1950" : {"comment":
"B1950 error ellipse pos." + \
"angle (east of north)",
"infunc": float,
"format": "%5.1f",
"unit": "deg"},
"FWHM_WORLD" : {"comment": "FWHM assuming a gaussian core",
"infunc": float,
"format": "%12g",
"unit": "deg"},
"CLASS_STAR" : {"comment": "S/G classifier output",
"infunc": float,
"format": "%5.2f",
"unit": ""}
}
def __init__(self, name, mode='r'):
self.name = name
self.mode = mode
self.closed = True
self._file = None
self._keys = list()
self._keys_positions = {}
self._output = None
self._firstline = True
if self.mode != 'r':
raise ValueError('only read-only access is now implemented.')
if PY3:
self._file = builtins.open(self.name, self.mode)
else:
self._file = __builtin__.open(self.name, self.mode)
self.closed = False
# Reading header
self._line = self._file.readline()
if not(self._line):
raise WrongSExtractorfileException(
'not a SExtractor text catalog (empty file)')
while (self._line):
__ll = (self._line).replace('\n', '')
if __ll[0] == '#': # Still in header
columns = __ll.split()
if len(columns) < 3:
raise WrongSExtractorfileException(
'not a SExtractor text catalog (invalid header)')
name=columns[2]
if not(name in SExtractorfile._SE_keys.keys()):
raise WrongSExtractorfileException(
'not a SExtractor text catalog (unknown keyword %s)'
% name)
self._keys_positions[name]=int(columns[1])-1
self._keys.append(name)
else:
break
self._line = self._file.readline()
if not(self._keys):
raise WrongSExtractorfileException( \
'not a SExtractor text catalog (empty header)')
self._outdict = dict([(k, None) for k in self._keys])
self._firstline = True
def __del__(self):
self.close()
def __iter__(self):
return self
def _iter(self):
return self.__iter__()
def __next__(self):
rr = self.readline()
if not(rr):
raise StopIteration
return rr
def next(self):
return self.__next__()
def __bool__(self):
return self._file
def __nonzero__(self):
return self.__bool__()
def keys(self):
"Return the list of available parameters."
return self._keys
def getcolumns(self):
"Return the list of available parameters."
return list(self.keys())
def readline(self):
"""
Read and analyse the next line of the SExtractor catalog
and return a dictionary {'param1': value, 'param2': value, ...}.
"""
if not(self._firstline):
self._line = self._file.readline()
self._firstline = False
if not(self._line):
return None
__ll = (self._line).replace('\n', '')
__values = __ll.split()
self._outdict.update(dict(list(zip(self._keys, __values))))
for i in self._keys:
self._outdict[i] = (
SExtractorfile._SE_keys[i]["infunc"](self._outdict[i]))
return self._outdict.copy()
def read(self):
"""
Read the file until EOF and return a list of dictionaries.
"""
__result = []
__ll = self.readline()
while __ll:
__result.append(__ll)
__ll = self.readline()
return list(__result)
def readlines(self):
return self.read()
def close(self):
"""
Close the SExtractor file.
"""
if self._file:
if not(self._file.closed):
self._file.close()
self.closed = True
# ======================================================================
def open(name, mode='r'):
"""
Factory function.
Open a SExtractor file and return a SExtractor file object.
"""
return SExtractorfile(name, mode)
# ======================================================================
| [
"__builtin__.open",
"builtins.open"
] | [((28190, 28225), 'builtins.open', 'builtins.open', (['self.name', 'self.mode'], {}), '(self.name, self.mode)\n', (28203, 28225), False, 'import builtins\n'), ((28265, 28303), '__builtin__.open', '__builtin__.open', (['self.name', 'self.mode'], {}), '(self.name, self.mode)\n', (28281, 28303), False, 'import __builtin__\n')] |
import falcon
import json
from falcon import Request, Response
from uuid import UUID
from mmlp.manager import ResourceManager
# TODO: Limit maximum items to 500
MAX_ITEMS = 500
SORT_ATTRIBUTES = ['id', 'name', 'description', 'license', 'origin', 'maintainer', 'storage_path', 'created']
class ResourceCollection:
def __init__(self, resource_manager: ResourceManager):
self._resource_manager: ResourceManager = resource_manager
def on_get(self, req: Request, resp: Response):
monitors = self._resource_manager.monitors()
if isinstance(monitors, Exception):
resp.body = json.dumps(dict(error=str(monitors)))
resp.status = falcon.HTTP_500
else:
resp.body = json.dumps(monitors)
resp.status = falcon.HTTP_201
def on_post(self, req: Request, resp: Response, snap_id: UUID):
result = self._resource_manager.create_monitor(req.media['id'])
if isinstance(result, Exception):
resp.body = json.dumps(dict(error=str(result)))
resp.status = falcon.HTTP_500
else:
resp.body = result.json()
resp.status = falcon.HTTP_201
def on_delete(self, _: Request, resp: Response, snap_id: UUID):
result = self._resource_manager.remove_monitor(snap_id)
if isinstance(result, Exception):
resp.body = json.dumps(dict(error=str(result)))
resp.status = falcon.HTTP_404
else:
resp.body = result.json()
resp.status = falcon.HTTP_202
# @staticmethod
# def get_payload(req):
# try:
# return req.context['doc']
# except Exception as ex:
# throw_exception("get_payload", "ERROR: {}".format(repr(ex)))
| [
"json.dumps"
] | [((735, 755), 'json.dumps', 'json.dumps', (['monitors'], {}), '(monitors)\n', (745, 755), False, 'import json\n')] |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic import TemplateView, RedirectView
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'', include('devices.urls')),
url(r'', include('captures.urls')),
url(r'', include('device_captures.urls')),
url(r'', include('accounts.urls')),
# admin site
url(r'^admin/', include(admin.site.urls)),
# angular frontend
url(r'^$', RedirectView.as_view(url='/web/', permanent=True)),
url(r'^web/*', TemplateView.as_view(template_name='index.html')),
# JSON Web Token authentication
url(r'^api-token-auth/', 'rest_framework_jwt.views.obtain_jwt_token'),
]
format_suffix_patterns(urlpatterns)
| [
"django.conf.urls.url",
"django.views.generic.TemplateView.as_view",
"django.conf.urls.include",
"django.views.generic.RedirectView.as_view",
"rest_framework.urlpatterns.format_suffix_patterns"
] | [((731, 766), 'rest_framework.urlpatterns.format_suffix_patterns', 'format_suffix_patterns', (['urlpatterns'], {}), '(urlpatterns)\n', (753, 766), False, 'from rest_framework.urlpatterns import format_suffix_patterns\n'), ((657, 725), 'django.conf.urls.url', 'url', (['"""^api-token-auth/"""', '"""rest_framework_jwt.views.obtain_jwt_token"""'], {}), "('^api-token-auth/', 'rest_framework_jwt.views.obtain_jwt_token')\n", (660, 725), False, 'from django.conf.urls import patterns, include, url\n'), ((237, 260), 'django.conf.urls.include', 'include', (['"""devices.urls"""'], {}), "('devices.urls')\n", (244, 260), False, 'from django.conf.urls import patterns, include, url\n'), ((276, 300), 'django.conf.urls.include', 'include', (['"""captures.urls"""'], {}), "('captures.urls')\n", (283, 300), False, 'from django.conf.urls import patterns, include, url\n'), ((316, 347), 'django.conf.urls.include', 'include', (['"""device_captures.urls"""'], {}), "('device_captures.urls')\n", (323, 347), False, 'from django.conf.urls import patterns, include, url\n'), ((363, 387), 'django.conf.urls.include', 'include', (['"""accounts.urls"""'], {}), "('accounts.urls')\n", (370, 387), False, 'from django.conf.urls import patterns, include, url\n'), ((428, 452), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (435, 452), False, 'from django.conf.urls import patterns, include, url\n'), ((494, 543), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/web/"""', 'permanent': '(True)'}), "(url='/web/', permanent=True)\n", (514, 543), False, 'from django.views.generic import TemplateView, RedirectView\n'), ((565, 613), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""index.html"""'}), "(template_name='index.html')\n", (585, 613), False, 'from django.views.generic import TemplateView, RedirectView\n')] |
# Copyright (c) 2019 <NAME>
import unittest
import os
import syms as syms
class TestSyms(unittest.TestCase):
filename = 'test_syms.ps'
def setUp(self) -> None:
if os.path.exists(self.filename):
os.remove(self.filename)
def tearDown(self) -> None:
if os.path.exists(self.filename):
os.remove(self.filename)
def test__add_cv(self):
f1 = 0.5
f2 = 0.5
p =[[-10, -2], [0, 15], [1, -11], [10, 2], [0, -15],
[-1, 11], [-10, -2]]
with open(self.filename, 'w') as fp:
syms._add_cv(fp, f1, f2, p, 1, 2)
with open(self.filename) as fp:
lines = fp.readlines()
self.assertEqual(" 5.00 8.50 5.50 -4.50 10.00 2.00 rcurveto",
lines[0].rstrip())
self.assertEqual(' -5.00 -8.50 -5.50 4.50 -10.00 -2.00 rcurveto',
lines[1].rstrip())
def test__add_sg(self):
f1 = 0.5
f2 = 0.5
p = [[-15, 0], [-15, 23], [15, 23], [15, 0], [14.5, 0],
[12, 18], [-12, 18], [-14.5, 0]]
with open(self.filename, 'w') as fp:
syms._add_sg(fp, f1, f2, p, 4, 1)
with open(self.filename) as fp:
lines = fp.readlines()
self.assertEqual(' -0.25 0.00 rlineto', lines[0].rstrip())
def test__add_mv(self):
f1 = 0.5
f2 = 0.5
p =[[-10, -2], [0, 15], [1, -11], [10, 2], [0, -15], [-1, 11], [-10, -2]]
with open(self.filename, 'w') as fp:
syms._add_mv(fp, f1, f2, p, 0)
with open(self.filename) as fp:
lines = fp.readlines()
self.assertEqual(' -5.00 -1.00 rmoveto', lines[0].rstrip())
| [
"os.path.exists",
"syms._add_cv",
"syms._add_sg",
"syms._add_mv",
"os.remove"
] | [((184, 213), 'os.path.exists', 'os.path.exists', (['self.filename'], {}), '(self.filename)\n', (198, 213), False, 'import os\n'), ((296, 325), 'os.path.exists', 'os.path.exists', (['self.filename'], {}), '(self.filename)\n', (310, 325), False, 'import os\n'), ((227, 251), 'os.remove', 'os.remove', (['self.filename'], {}), '(self.filename)\n', (236, 251), False, 'import os\n'), ((339, 363), 'os.remove', 'os.remove', (['self.filename'], {}), '(self.filename)\n', (348, 363), False, 'import os\n'), ((578, 611), 'syms._add_cv', 'syms._add_cv', (['fp', 'f1', 'f2', 'p', '(1)', '(2)'], {}), '(fp, f1, f2, p, 1, 2)\n', (590, 611), True, 'import syms as syms\n'), ((1125, 1158), 'syms._add_sg', 'syms._add_sg', (['fp', 'f1', 'f2', 'p', '(4)', '(1)'], {}), '(fp, f1, f2, p, 4, 1)\n', (1137, 1158), True, 'import syms as syms\n'), ((1504, 1534), 'syms._add_mv', 'syms._add_mv', (['fp', 'f1', 'f2', 'p', '(0)'], {}), '(fp, f1, f2, p, 0)\n', (1516, 1534), True, 'import syms as syms\n')] |
from unittest import TestCase
from collections import namedtuple
import os
import shutil
import inspect
from seqcluster.prepare_data import _read_fastq_files, _create_matrix_uniq_seq
import seqcluster
from nose.plugins.attrib import attr
class TestPreparedata(TestCase):
@attr(collapse=True)
def test_preparedata(self):
out_dir = "test/test_out_prepare"
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.mkdir(out_dir)
arg = namedtuple('args', 'minl maxl minc out')
args = arg(15, 40, 1, out_dir)
seq_l, list_s = _read_fastq_files(open("data/examples/collapse/config"), args)
ma_out = open(os.path.join(out_dir, "seqs.ma"), 'w')
seq_out = open(os.path.join(out_dir, "seqs.fa"), 'w')
_create_matrix_uniq_seq(list_s, seq_l, ma_out, seq_out, 1)
self.assertTrue(os.path.exists(os.path.join(out_dir, "seqs.ma")))
self.assertTrue(os.path.exists(os.path.join(out_dir, "seqs.fa")))
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
@attr(umis=True)
def test_umis(self):
from seqcluster.libs.fastq import collapse, write_output
umis = collapse(os.path.abspath("data/examples/umis/sample.fastq"))
if len(umis.keys()) != 2:
raise ValueError("umis didn't detect two unique sequences")
out_dir = "test/test_automated_output"
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.mkdir(out_dir)
write_output(os.path.join(out_dir, "umis.fastq"), umis)
| [
"os.path.exists",
"collections.namedtuple",
"nose.plugins.attrib.attr",
"os.path.join",
"os.mkdir",
"shutil.rmtree",
"os.path.abspath",
"seqcluster.prepare_data._create_matrix_uniq_seq"
] | [((278, 297), 'nose.plugins.attrib.attr', 'attr', ([], {'collapse': '(True)'}), '(collapse=True)\n', (282, 297), False, 'from nose.plugins.attrib import attr\n'), ((1065, 1080), 'nose.plugins.attrib.attr', 'attr', ([], {'umis': '(True)'}), '(umis=True)\n', (1069, 1080), False, 'from nose.plugins.attrib import attr\n'), ((383, 406), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (397, 406), False, 'import os\n'), ((451, 468), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (459, 468), False, 'import os\n'), ((483, 523), 'collections.namedtuple', 'namedtuple', (['"""args"""', '"""minl maxl minc out"""'], {}), "('args', 'minl maxl minc out')\n", (493, 523), False, 'from collections import namedtuple\n'), ((781, 839), 'seqcluster.prepare_data._create_matrix_uniq_seq', '_create_matrix_uniq_seq', (['list_s', 'seq_l', 'ma_out', 'seq_out', '(1)'], {}), '(list_s, seq_l, ma_out, seq_out, 1)\n', (804, 839), False, 'from seqcluster.prepare_data import _read_fastq_files, _create_matrix_uniq_seq\n'), ((999, 1022), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (1013, 1022), False, 'import os\n'), ((1411, 1434), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (1425, 1434), False, 'import os\n'), ((1479, 1496), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (1487, 1496), False, 'import os\n'), ((420, 442), 'shutil.rmtree', 'shutil.rmtree', (['out_dir'], {}), '(out_dir)\n', (433, 442), False, 'import shutil\n'), ((672, 704), 'os.path.join', 'os.path.join', (['out_dir', '"""seqs.ma"""'], {}), "(out_dir, 'seqs.ma')\n", (684, 704), False, 'import os\n'), ((734, 766), 'os.path.join', 'os.path.join', (['out_dir', '"""seqs.fa"""'], {}), "(out_dir, 'seqs.fa')\n", (746, 766), False, 'import os\n'), ((1036, 1058), 'shutil.rmtree', 'shutil.rmtree', (['out_dir'], {}), '(out_dir)\n', (1049, 1058), False, 'import shutil\n'), ((1195, 1245), 'os.path.abspath', 'os.path.abspath', (['"""data/examples/umis/sample.fastq"""'], {}), "('data/examples/umis/sample.fastq')\n", (1210, 1245), False, 'import os\n'), ((1448, 1470), 'shutil.rmtree', 'shutil.rmtree', (['out_dir'], {}), '(out_dir)\n', (1461, 1470), False, 'import shutil\n'), ((1518, 1553), 'os.path.join', 'os.path.join', (['out_dir', '"""umis.fastq"""'], {}), "(out_dir, 'umis.fastq')\n", (1530, 1553), False, 'import os\n'), ((879, 911), 'os.path.join', 'os.path.join', (['out_dir', '"""seqs.ma"""'], {}), "(out_dir, 'seqs.ma')\n", (891, 911), False, 'import os\n'), ((953, 985), 'os.path.join', 'os.path.join', (['out_dir', '"""seqs.fa"""'], {}), "(out_dir, 'seqs.fa')\n", (965, 985), False, 'import os\n')] |
# Crie um programa que leia o ano de nascimento de
# sete pessoas. No final, mostre quantas pessoas
# ainda nao atingiram a maioridade e quantas já sao maiores.
from datetime import date
maior = 0
menor = 0
for c in range(1, 8):
nasc = int(input('Em que ano a {}a pessoa nasceu? ' .format(c)))
idade = date.today().year - nasc
if idade < 18:
menor += 1
else:
maior += 1
print('Ao todo tivemos {} pessoas maiores de idade' .format(maior))
print('E tambem tivemos {} pessoas menores de idade' .format(menor))
| [
"datetime.date.today"
] | [((310, 322), 'datetime.date.today', 'date.today', ([], {}), '()\n', (320, 322), False, 'from datetime import date\n')] |
import pytest
def sum(a, b):
if not isinstance(a, (float, int)):
raise TypeError('Error de tipo')
return a + b
def test_mytest():
with pytest.raises(TypeError):
sum('1', 2) | [
"pytest.raises"
] | [((168, 192), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (181, 192), False, 'import pytest\n')] |
"""
Copyright 2017 Globo.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest2
from globomap_api import exceptions as gmap_exceptions
from globomap_api.app import create_app
from globomap_api.models.db import DB
from globomap_api.models.document import Document
class TestDocument(unittest2.TestCase):
def setUp(self):
self.app = create_app('tests.config')
self.db_inst = DB(self.app.config)
self.conn_db()
self.cleanup()
self.db_inst.database.create_database('test')
self.db_inst.conn_database('test')
self.db_inst.database.create_collection('test_collection_db')
self.db_inst.get_collection('test_collection_db')
def tearDown(self):
self.conn_db()
self.cleanup()
def conn_db(self):
db_name = self.app.config['ARANGO_DB']
self.db_inst.conn_database(db_name)
def cleanup(self):
try:
self.db_inst.database.delete_database('test')
except:
pass
def test_search_document(self):
"""Test search document by property"""
with self.app.app_context():
col_name = 'test_collection_db'
self._import_bulk(col_name)
search = [[{'field': 'value', 'operator': '==', 'value': 1}]]
docs = self.db_inst.search_in_collection(
'test_collection_db', search)
docs = (set(sorted([d['_key'] for d in docs])))
self.assertEqual(docs, {'doc04', 'doc05'})
def test_get_document(self):
"""Test get document"""
with self.app.app_context():
self._import_bulk('test_collection_db')
inst_doc = Document(self.db_inst.collection)
doc = inst_doc.get_document('doc04')
doc = {'_key': doc['_key'], 'value': doc['value'], }
self.assertDictEqual(doc, {'_key': 'doc04', 'value': 1})
def test_create_document(self):
"""Test create document"""
with self.app.app_context():
inst_doc = Document(self.db_inst.collection)
doc = inst_doc.create_document({'_key': 'doc04', 'value': 1})
doc = {'_key': doc['_key'], '_id': doc['_id'], }
self.assertDictEqual(
doc, {'_key': 'doc04', '_id': 'test_collection_db/doc04', })
def test_get_document_not_exist(self):
"""Test get document not existing"""
with self.app.app_context():
inst_doc = Document(self.db_inst.collection)
with self.assertRaises(gmap_exceptions.DocumentNotExist):
inst_doc.get_document('doc04')
def test_delete_document(self):
"""Test delete document"""
with self.app.app_context():
col_name = 'test_collection_db'
self._import_bulk(col_name)
inst_doc = Document(self.db_inst.collection)
inst_doc.delete_document('doc04')
with self.assertRaises(gmap_exceptions.DocumentNotExist):
inst_doc.get_document('doc04')
def test_delete_document_not_exist(self):
"""Test delee document not existing"""
with self.app.app_context():
inst_doc = Document(self.db_inst.collection)
with self.assertRaises(gmap_exceptions.DocumentNotExist):
inst_doc.delete_document('doc04')
def _import_bulk(self, col_name):
collection = self.db_inst.database.collection(col_name)
collection.import_bulk([
{'_key': 'doc04', 'value': 1},
{'_key': 'doc05', 'value': 1},
{'_key': 'doc06', 'value': 3},
])
| [
"globomap_api.models.db.DB",
"globomap_api.models.document.Document",
"globomap_api.app.create_app"
] | [((872, 898), 'globomap_api.app.create_app', 'create_app', (['"""tests.config"""'], {}), "('tests.config')\n", (882, 898), False, 'from globomap_api.app import create_app\n'), ((922, 941), 'globomap_api.models.db.DB', 'DB', (['self.app.config'], {}), '(self.app.config)\n', (924, 941), False, 'from globomap_api.models.db import DB\n'), ((2204, 2237), 'globomap_api.models.document.Document', 'Document', (['self.db_inst.collection'], {}), '(self.db_inst.collection)\n', (2212, 2237), False, 'from globomap_api.models.document import Document\n'), ((2555, 2588), 'globomap_api.models.document.Document', 'Document', (['self.db_inst.collection'], {}), '(self.db_inst.collection)\n', (2563, 2588), False, 'from globomap_api.models.document import Document\n'), ((2986, 3019), 'globomap_api.models.document.Document', 'Document', (['self.db_inst.collection'], {}), '(self.db_inst.collection)\n', (2994, 3019), False, 'from globomap_api.models.document import Document\n'), ((3356, 3389), 'globomap_api.models.document.Document', 'Document', (['self.db_inst.collection'], {}), '(self.db_inst.collection)\n', (3364, 3389), False, 'from globomap_api.models.document import Document\n'), ((3709, 3742), 'globomap_api.models.document.Document', 'Document', (['self.db_inst.collection'], {}), '(self.db_inst.collection)\n', (3717, 3742), False, 'from globomap_api.models.document import Document\n')] |
from flask import current_app, render_template
from . import main
@main.route('/')
def index():
return render_template('index.html')
@main.route('/use_method')
def use_method():
content = ""
with open('use_method.md', 'r') as f:
content = f.read()
return render_template('use_method.html', text=content)
| [
"flask.render_template"
] | [((109, 138), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (124, 138), False, 'from flask import current_app, render_template\n'), ((285, 333), 'flask.render_template', 'render_template', (['"""use_method.html"""'], {'text': 'content'}), "('use_method.html', text=content)\n", (300, 333), False, 'from flask import current_app, render_template\n')] |
"""Contains numerical solver routines for the schroedinger equation"""
import numpy as np
from scipy.linalg import eigh_tridiagonal
from qmpy._interpolation import _interpolate
def schroedinger(vals, select_range=None, interpol=False,
interpoltype='linear'):
"""
Solves the 1-dimensional schroedinger equation for given numerical
values of x-coordinates and the corresponding value of the potential.
It also supports interpolation of the given data points aswell as settings
for higher accuracy.
Args:
vals (dict): Needed values for computation. Necessary keys are:
- **mass** (*float*) - The mass of the system
- **xcords** (*1darray*) - The xcoordinates corresponding to the
potential values
- **potential** (*1darray*) - The values of the potential.
Optional keys are:
- **xopt** (*tuple*) - Options for the x-range of the output of
form ``(xmin, xmax, npoints)``. If vals does not have a key
named \'xopt\' he range of the xcords-array and 1999 points
will be used for the interpolation (if interpol is set to
True).
select_range (tuple, optional): Indices of the desired eigenvalues as
tuple ``(ev_min, ev_max)``. Defaults to None meaning all
eigenvalues are calculated.
interpol (bool): Interpolate the given data points. Defaults to False.
interpoltype (str, optional): The kind of interpolation to use.
Accepted options are 'linear', 'cspline' or 'polynomial'. Defaults
to 'linear'.
Returns:
touple: ``(energies, wfuncs, pot)``
- **energies** (*1darray*) - The energy levels of each
wavefunction. The entries correspond to the rows in wfuncs.
- **wfuncs** (*ndarray*) - Array where each row contains the
numerical value of a computed normalized wavefunction. Each
column corresponds to one x-coordinate of the input array.
- **pot** (*2darray or None*) - The interpolated values of x- and
y-coordinates. If interpol is set to False None will be returned
instead.
"""
if interpol:
if 'xopt' in vals.keys():
xopt = vals['xopt']
else:
with vals['xcords'] as xx:
xopt = (xx[0], xx[-1], 1999)
xint, yint = _interpolate(vals['xcords'], vals['potential'],
xopt, kind=interpoltype)
pot = np.vstack((xint, yint)).T
else:
xint, yint = vals['xcords'], vals['potential']
pot = None
energies, wfuncs = _basic_schroedinger(vals['mass'], xint, yint,
select_range=select_range)
return energies, wfuncs, pot
def calculate_expval(xcoords, wfuncs):
"""
Calculates the expected values :math:`<x>` for the x-coordinate by
numerically calculating the integral
.. math::
\\int_{x_{min}}^{x_{max}} | \\psi (x) |^2 x dx
Args:
xcoords (1darray): Array containing the x-coordinates
wfuncs (ndarray): Array containing the wave functions that
correspond to the x-coordinates
Returns:
1darray: The expected values of the x-coordinate
"""
delta = np.abs(xcoords[0] - xcoords[-1]) / (len(xcoords) + 1)
expval = np.empty((len(wfuncs), ))
for index, wfunc in enumerate(wfuncs):
expval[index] = np.sum((wfunc ** 2) * xcoords) * delta
return expval
def calculate_uncertainty(xcoords, wfuncs):
"""
Calculates the uncertainity :math:`\\Delta x` defined as
.. math::
\\Delta x = \\sqrt{<x^2> - <x>^2}
for each wavefunction.
Args:
xcoords (1darray): Array containing the x-coordinates
wfuncs (ndarray): Array containing the wave functions that
correspond to the x-coordinates
Returns:
1darray: The uncertainity of the x-coordinate.
"""
delta = np.abs(xcoords[0] - xcoords[-1]) / (len(xcoords) + 1)
expval = calculate_expval(xcoords, wfuncs)
uncertainty = np.empty((len(wfuncs), ))
index = 0
for wfunc, expv in zip(wfuncs, expval):
expvalsq = np.sum((wfunc ** 2) * (xcoords ** 2)) * delta
uncertainty[index] = np.sqrt(expvalsq - expv ** 2)
index += 1
return uncertainty
def _basic_schroedinger(mass, xcords, potential, select_range=None):
"""
Solves the 1-dimensional schroedinger equation for given numerical
values of x-coordinates and the corresponding value of the potential.
Args:
mass (float): The mass of the system in atomic units.
xcords (1darray): X-coordinates corresponding to the potential
values.
potential (1darray): Numerical values of the potential.
select_range (touple): Indices of the desired eigenvalues. Defaults to
None meaning all eigenvalues are calculated.
Returns:
touple: ``(energies, wfuncs)``
- **energies** (*1darray*) - The energy levels of each
wavefunction. The entries correspond to the rows in wfuncs.
- **wfuncs** (*ndarray*) - Array where each row contains the
numerical value of a computed normalized wavefunction. Each
column corresponds to one x-coordinate of the input array.
"""
delta = np.abs(xcords[0] - xcords[-1]) / (len(xcords) + 1)
diag = potential + 1 / (mass * delta ** 2)
offdiag = -1 / (2 * mass * delta ** 2) * np.ones((len(potential) - 1))
if select_range:
energies, wfuncs = eigh_tridiagonal(diag, offdiag, select='i',
select_range=select_range)
else:
energies, wfuncs = eigh_tridiagonal(diag, offdiag)
wfuncs = wfuncs.copy().T
for index, wfunc in enumerate(wfuncs):
norm = 1 / np.sqrt(np.sum(wfunc ** 2) * delta)
wfuncs[index, :] = wfunc * norm
return energies, wfuncs
| [
"numpy.abs",
"scipy.linalg.eigh_tridiagonal",
"numpy.sqrt",
"numpy.sum",
"numpy.vstack",
"qmpy._interpolation._interpolate"
] | [((2474, 2546), 'qmpy._interpolation._interpolate', '_interpolate', (["vals['xcords']", "vals['potential']", 'xopt'], {'kind': 'interpoltype'}), "(vals['xcords'], vals['potential'], xopt, kind=interpoltype)\n", (2486, 2546), False, 'from qmpy._interpolation import _interpolate\n'), ((3386, 3418), 'numpy.abs', 'np.abs', (['(xcoords[0] - xcoords[-1])'], {}), '(xcoords[0] - xcoords[-1])\n', (3392, 3418), True, 'import numpy as np\n'), ((4078, 4110), 'numpy.abs', 'np.abs', (['(xcoords[0] - xcoords[-1])'], {}), '(xcoords[0] - xcoords[-1])\n', (4084, 4110), True, 'import numpy as np\n'), ((4375, 4404), 'numpy.sqrt', 'np.sqrt', (['(expvalsq - expv ** 2)'], {}), '(expvalsq - expv ** 2)\n', (4382, 4404), True, 'import numpy as np\n'), ((5474, 5504), 'numpy.abs', 'np.abs', (['(xcords[0] - xcords[-1])'], {}), '(xcords[0] - xcords[-1])\n', (5480, 5504), True, 'import numpy as np\n'), ((5696, 5766), 'scipy.linalg.eigh_tridiagonal', 'eigh_tridiagonal', (['diag', 'offdiag'], {'select': '"""i"""', 'select_range': 'select_range'}), "(diag, offdiag, select='i', select_range=select_range)\n", (5712, 5766), False, 'from scipy.linalg import eigh_tridiagonal\n'), ((5848, 5879), 'scipy.linalg.eigh_tridiagonal', 'eigh_tridiagonal', (['diag', 'offdiag'], {}), '(diag, offdiag)\n', (5864, 5879), False, 'from scipy.linalg import eigh_tridiagonal\n'), ((2595, 2618), 'numpy.vstack', 'np.vstack', (['(xint, yint)'], {}), '((xint, yint))\n', (2604, 2618), True, 'import numpy as np\n'), ((3546, 3574), 'numpy.sum', 'np.sum', (['(wfunc ** 2 * xcoords)'], {}), '(wfunc ** 2 * xcoords)\n', (3552, 3574), True, 'import numpy as np\n'), ((4300, 4333), 'numpy.sum', 'np.sum', (['(wfunc ** 2 * xcoords ** 2)'], {}), '(wfunc ** 2 * xcoords ** 2)\n', (4306, 4333), True, 'import numpy as np\n'), ((5981, 5999), 'numpy.sum', 'np.sum', (['(wfunc ** 2)'], {}), '(wfunc ** 2)\n', (5987, 5999), True, 'import numpy as np\n')] |
"""setup-services.py
Script to help bring up docker services for testing.
"""
import argparse
import logging
import shutil
import subprocess
import sys
from pathlib import Path
from typing import Dict, List
logger = logging.getLogger("setup-services")
streamHandler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(message)s")
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
logger.setLevel(level=logging.INFO)
HERE = Path(".").absolute()
BASE = Path(__file__).parent.absolute()
COMPOSE_FILE = BASE / "docker-compose.yaml"
TEST_DATA_PATH = HERE / "echopype" / "test_data"
def parse_args():
parser = argparse.ArgumentParser(description="Setup services for testing")
parser.add_argument("--deploy", action="store_true", help="Flag to setup docker services")
parser.add_argument(
"--no-pull",
action="store_true",
help="Optional flag to skip pulling the latest images from dockerhub",
)
parser.add_argument(
"--tear-down",
action="store_true",
help="Flag to tear down docker services",
)
parser.add_argument(
"--images",
action="store_true",
help="Optional flag to remove images also during tear down",
)
return parser.parse_args()
def run_commands(commands: List[Dict]) -> None:
for idx, command in enumerate(commands, start=1):
msg = command.get("msg")
cmd = command.get("cmd")
args = command.get("args", None)
logger.info(f"{idx}) {msg}")
if cmd is None:
continue
elif isinstance(cmd, list):
subprocess.run(cmd)
elif callable(cmd):
cmd(args)
else:
raise ValueError(f"command of {type(cmd)} is invalid.")
if __name__ == "__main__":
args = parse_args()
commands = []
if all([args.deploy, args.tear_down]):
print("Cannot have both --deploy and --tear-down. Exiting.")
sys.exit(1)
if not any([args.deploy, args.tear_down]):
print("Please provide either --deploy or --tear-down flags. For more help use --help flag.")
sys.exit(0)
if args.deploy:
commands.append({"msg": "Starting test services deployment ...", "cmd": None})
if not args.no_pull:
commands.append(
{
"msg": "Pulling latest images ...",
"cmd": ["docker-compose", "-f", COMPOSE_FILE, "pull"],
}
)
commands.append(
{
"msg": "Bringing up services ...",
"cmd": [
"docker-compose",
"-f",
COMPOSE_FILE,
"up",
"-d",
"--remove-orphans",
"--force-recreate",
],
}
)
if TEST_DATA_PATH.exists():
commands.append(
{
"msg": f"Deleting old test folder at {TEST_DATA_PATH} ...",
"cmd": shutil.rmtree,
"args": TEST_DATA_PATH,
}
)
commands.append(
{
"msg": "Copying new test folder from http service ...",
"cmd": [
"docker",
"cp",
"-L",
"docker_httpserver_1:/usr/local/apache2/htdocs/data",
TEST_DATA_PATH,
],
}
)
if args.tear_down:
command = ["docker-compose", "-f", COMPOSE_FILE, "down", "--remove-orphans", "--volumes"]
if args.images:
command = command + ["--rmi", "all"]
commands.append({"msg": "Stopping test services deployment ...", "cmd": command})
commands.append({"msg": "Done.", "cmd": ["docker", "ps", "--last", "2"]})
run_commands(commands)
| [
"logging.getLogger",
"logging.StreamHandler",
"argparse.ArgumentParser",
"pathlib.Path",
"logging.Formatter",
"subprocess.run",
"sys.exit"
] | [((219, 254), 'logging.getLogger', 'logging.getLogger', (['"""setup-services"""'], {}), "('setup-services')\n", (236, 254), False, 'import logging\n'), ((271, 304), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (292, 304), False, 'import logging\n'), ((317, 349), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (334, 349), False, 'import logging\n'), ((652, 717), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Setup services for testing"""'}), "(description='Setup services for testing')\n", (675, 717), False, 'import argparse\n'), ((465, 474), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (469, 474), False, 'from pathlib import Path\n'), ((1971, 1982), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1979, 1982), False, 'import sys\n'), ((2140, 2151), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2148, 2151), False, 'import sys\n'), ((493, 507), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (497, 507), False, 'from pathlib import Path\n'), ((1628, 1647), 'subprocess.run', 'subprocess.run', (['cmd'], {}), '(cmd)\n', (1642, 1647), False, 'import subprocess\n')] |
from sklearn.linear_model import LinearRegression
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def getData(data):
# data = pd.read_csv(location)
x = np.arange(0, len(data)).reshape(-1, 1)
y = data.iloc[:, 5].values.reshape(-1, 1)
return x, y
def createModel():
lr = LinearRegression()
return lr
def predict(model, forecast, x, y):
forecast = 30
for day in range(1, forecast+1):
model.fit(x, y)
x_pred = np.arange(len(x), len(x) + 1).reshape(-1, 1)
y_pred = model.predict(x_pred)
x = np.append(x, x_pred)
y = np.append(y, y_pred)
y_pred = np.delete(y_pred, 0)
x = x.reshape(-1, 1)
return x, y
def normalizeLRpredict(y, forecast, data_len):
diffy = y[len(y)-forecast-1] - y[len(y)-forecast]
for i in range(data_len, data_len + forecast):
y[i] = y[i] + diffy
return y
def plotPred(x, y, data_len):
y_pred = y[data_len:data_len + forecast]
x_pred = x[data_len:data_len + forecast]
x = x[0:data_len]
y = y[0:data_len]
plt.plot(x, y)
plt.plot(x_pred, y_pred)
plt.show()
if __name__ == '__main__':
dataloc = "../data/VTI.csv"
forecast = 30
x, y = getData(dataloc)
data_len = len(x)
model = createModel()
x, y = predict(model, forecast, x, y)
y = normalizeLRpredict(y, forecast, data_len)
# plotPred(x, y, data_len)
| [
"numpy.delete",
"matplotlib.pyplot.plot",
"numpy.append",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show"
] | [((315, 333), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (331, 333), False, 'from sklearn.linear_model import LinearRegression\n'), ((1093, 1107), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1101, 1107), True, 'import matplotlib.pyplot as plt\n'), ((1112, 1136), 'matplotlib.pyplot.plot', 'plt.plot', (['x_pred', 'y_pred'], {}), '(x_pred, y_pred)\n', (1120, 1136), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1151), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1149, 1151), True, 'import matplotlib.pyplot as plt\n'), ((586, 606), 'numpy.append', 'np.append', (['x', 'x_pred'], {}), '(x, x_pred)\n', (595, 606), True, 'import numpy as np\n'), ((619, 639), 'numpy.append', 'np.append', (['y', 'y_pred'], {}), '(y, y_pred)\n', (628, 639), True, 'import numpy as np\n'), ((657, 677), 'numpy.delete', 'np.delete', (['y_pred', '(0)'], {}), '(y_pred, 0)\n', (666, 677), True, 'import numpy as np\n')] |
"""A checker for application health.
When configured with a Sanic application, this checker provides a means
for the application to specify whether or not it is operating in a healthy state.
By identifying broken/unhealthy states, a management system could restart the
application, potentially allowing it to recover.
This checker can be used to set up liveness probes for Kubernetes deployments:
https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command
It may also be used to define container health checks in docker-compose:
https://docs.docker.com/compose/compose-file/#healthcheck
This checker exposes the ``/health`` endpoint by default.
"""
import logging
import time
from typing import Callable, Mapping, Optional
from sanic import Sanic, response
from .checker import MSG_FAIL, MSG_OK, BaseChecker
log = logging.getLogger(__name__)
class HealthCheck(BaseChecker):
"""A checker allowing a Sanic application to describe the health of the
application at runtime.
The results of registered check functions are cached by this checker by
default. To disable result caching, initialize the checker with ``no_cache=True``.
Since the health endpoint may be polled frequently (and potentially by multiple
systems), the cache allows the check function results to be valid for a window of
time, reducing the execution cost. This may be particularly helpful if a given
health check is more expensive.
Args:
app: The Sanic application instance to register the checker to. If not specified on
initialization, the user must pass it to the ``init`` method to register the checker
route with the application. If specified on initialization, ``init`` will be called
automatically.
uri: The route URI to expose for the checker.
checks: A collection of checks to register with the checker on init. A check is a
function which takes no arguments and returns (``bool``, ``str``), where the
boolean signifies whether the check passed or not, and the string is a message
associated with the success/failure.
no_cache: Disable the checker from caching check results. If this is set to ``True``, the
``success_ttl`` and ``failure_ttl`` do nothing.
success_handler: A handler function which takes the check results (a list[dict])
and returns a message string. This is called when all checks pass.
success_headers: Headers to include in the checker response on success. By default, no
additional headers are sent. This can be useful if, for example, a success
handler is specified which returns a JSON message. The Content-Type: application/json
header could be included here.
success_status: The HTTP status code to use when the checker passes its checks.
success_ttl: The TTL for a successful check result to live in the cache before it is updated.
failure_handler: A handler function which takes the check results (a list[dict])
and returns a message string. This is called when any check fails.
failure_headers: Headers to include in the checker response on failure. By default, no
additional headers are sent. This can be useful if, for example, a failure
handler is specified which returns a JSON message. The Content-Type: application/json
header could be included here.
failure_status: The HTTP status code to use when the checker fails its checks.
failure_ttl: The TTL for a failed check result to live in the cache before it is updated.
exception_handler: A function which would get called when a registered check
raises an exception. This handler must take two arguments: the check function
which raised the exception, and the tuple returned by ``sys.exc_info``. It must
return a tuple of (bool, string), where the boolean is whether or not it passed
and the string is the message to use for the check response. By default, no
exception handler is registered, so an exception will lead to a check failure.
options: Any additional options to pass to the ``Sanic.add_route`` method
on ``init``.
"""
default_uri = '/health'
def __init__(
self,
app: Optional[Sanic] = None,
uri: Optional[str] = None,
checks=None,
no_cache: bool = False,
success_handler: Optional[Callable] = None,
success_headers: Optional[Mapping] = None,
success_status: Optional[int] = 200,
success_ttl: Optional[int] = 25,
failure_handler: Optional[Callable] = None,
failure_headers: Optional[Mapping] = None,
failure_status: Optional[int] = 500,
failure_ttl: Optional[int] = 5,
exception_handler: Optional[Callable] = None,
**options,
) -> None:
self.cache = {}
self.no_cache = no_cache
self.success_ttl = success_ttl
self.failure_ttl = failure_ttl
super(HealthCheck, self).__init__(
app=app,
uri=uri,
checks=checks,
success_handler=success_handler,
success_headers=success_headers,
success_status=success_status,
failure_handler=failure_handler,
failure_headers=failure_headers,
failure_status=failure_status,
exception_handler=exception_handler,
**options,
)
async def run(self, request) -> response.HTTPResponse:
"""Run all checks and generate an HTTP response for the results."""
results = []
for check in self.checks:
# See if the check already has a cached health state. If so, use it;
# otherwise, re-run the check.
if not self.no_cache and check in self.cache and self.cache[check].get('expires') >= time.time():
results.append(self.cache[check])
else:
result = await self.exec_check(check)
if not self.no_cache:
if result.get('passed'):
ttl = self.success_ttl
else:
ttl = self.failure_ttl
result['expires'] = result['timestamp'] + ttl
self.cache[check] = result
results.append(result)
passed = all((r['passed'] for r in results))
if passed:
msg = MSG_OK
if self.success_handler:
msg = self.success_handler(results)
return response.text(
body=msg,
status=self.success_status,
headers=self.success_headers,
)
else:
msg = MSG_FAIL
if self.failure_handler:
msg = self.failure_handler(results)
return response.text(
body=msg,
status=self.failure_status,
headers=self.failure_headers,
)
| [
"logging.getLogger",
"time.time",
"sanic.response.text"
] | [((891, 918), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (908, 918), False, 'import logging\n'), ((6796, 6882), 'sanic.response.text', 'response.text', ([], {'body': 'msg', 'status': 'self.success_status', 'headers': 'self.success_headers'}), '(body=msg, status=self.success_status, headers=self.\n success_headers)\n', (6809, 6882), False, 'from sanic import Sanic, response\n'), ((7092, 7178), 'sanic.response.text', 'response.text', ([], {'body': 'msg', 'status': 'self.failure_status', 'headers': 'self.failure_headers'}), '(body=msg, status=self.failure_status, headers=self.\n failure_headers)\n', (7105, 7178), False, 'from sanic import Sanic, response\n'), ((6097, 6108), 'time.time', 'time.time', ([], {}), '()\n', (6106, 6108), False, 'import time\n')] |
import re
# Declare a dictionary
dict = {}
# Method to check whether the word
# exists in dictionary or not
def uniqueWord(Word):
if Word in dict:
# If the word exists in dictionary then
# simply increase its count
dict[words] += 1
else:
# If the word does not exists in
# dictionary update the dictionary
# and make its count 1
dict.update({words: 1})
# Driver code
if __name__ == '__main__':
string = "one, two, three, one, four, five, Kamila , flower, cactis, flower"
# re.split() method is used to split
# all the words in a string seperated
# by non-alphanumeric characters (\W)
ListOfWords = re.split("[\W]+", string)
# Extract each word from ListOfWords
# and pass it to the method uniqueWord()
for words in ListOfWords:
uniqueWord(words)
# Iterate over dictionary if the value
# of the key is 1, then print the element
for elements in dict:
if dict[elements] == 1:
print(elements)
| [
"re.split"
] | [((769, 795), 're.split', 're.split', (['"""[\\\\W]+"""', 'string'], {}), "('[\\\\W]+', string)\n", (777, 795), False, 'import re\n')] |
"""
File: word_guess.py
-------------------
My project is an improved version of the Word Guessing Game.
It allows a multiplayer mode (up to 4 players)
"""
import random
LEXICON_FILE = "Lexicon.txt" # File to read word list from
INITIAL_GUESSES = 8 # Initial number of guesses player starts with
NUM_OF_PLAYERS = 3
DEFAULT_FILE = 'word-guessing-banner.jpg'
def play_game(secret_word):
secret_word_length = len(secret_word)
hidden_string = ""
new_list = []
guesses = INITIAL_GUESSES
correct_guesses = 0
score = 100
for i in range(secret_word_length):
new_list.append("-")
new_string = ""
for elem in new_list:
new_string += str(elem)
print("The word now looks like this: " + new_string)
print("You have " + str(INITIAL_GUESSES) + " guesses left")
while guesses != 0 and str(new_list) != secret_word:
# do not allow user to enter more than one character
user_guess = input("Type a single letter here, then press enter: ")
new_string = ""
found = False
for i in range(secret_word_length):
if secret_word[i] == user_guess.upper():
new_list[i] = user_guess.upper()
found = True
if found:
print("That guess is correct.")
correct_guesses += 1
for elem in new_list:
new_string += str(elem)
if secret_word.find(user_guess.upper()) == -1:
guesses -= 1
print("There are no "+ user_guess.upper() + "'s in the word")
score = score - (score * 0.2) #with each incorrect guess, we remove 20% of the score
if new_string == secret_word:
print("Congratulations, the word is: " + new_string)
break;
else:
if guesses == 0:
print("Sorry, you lost. The secret word was: " + secret_word)
else:
print("The word now looks like this: " + new_string)
print("You have " + str(guesses) + " guesses left")
return score
def open_lexicon_file(filename):
cs_words = []
with open(filename) as f:
for line in f:
cs_words.append(line.strip())
return cs_words
def get_word():
"""
This function returns a secret word that the player is trying
to guess in the game. This function initially has a very small
list of words that it can select from to make it easier for you
to write and debug the main game playing program. In Part II of
writing this program, you will re-implement this function to
select a word from a much larger list by reading a list of words
from the file specified by the constant LEXICON_FILE.
"""
cs_words = open_lexicon_file(LEXICON_FILE)
random_choice = random.choice(cs_words)
return random_choice
def return_highest_scoring_player(score_list):
""" a) create a list of the dict's keys and values;
b) return the key with the max value"""
v = list(score_list.values())
max_val = max(v)
min_val = min(v)
k = list(score_list.keys())
if max_val != min_val:
return k[v.index(max(v))] + " wins!"
else:
return "It's a tie!"
def output_game_intro():
print("WELCOME TO MY WORD GUESSING GAME!")
print("-----------------------------------")
print("-------- -------")
print("------------ -----------")
print("-------------- -------------")
print("-------- -------")
print("-----------------------------------")
print("Rules are simple: the computer will generate a random word for you to guess. Enter one character at a time, until you guess the final word. The player who guesses more words, or in less steps wins the round.")
print("Each player has a maximum of " + str(INITIAL_GUESSES) + " guesses.")
print("Ready?? Let's get started!!")
print("-----------------------------------")
def main():
"""
To play the game, we first select the secret word for the
player to guess and then play the game using that secret word.
"""
output_game_intro()
score_list = {}
for i in range(NUM_OF_PLAYERS):
print("This is Player " + str(i+1) + "'s turn.")
secret_word = get_word()
current_player = "Player " + str(i+1)
score_list[current_player] = play_game(secret_word)
print(str(current_player) + " scored: " + str(score_list[current_player]))
print("-----------------------------------")
print("At the end of this round, here are the results: ")
print(return_highest_scoring_player(score_list))
# This provided line is required at the end of a Python file
# to call the main() function.
if __name__ == "__main__":
main()
| [
"random.choice"
] | [((2820, 2843), 'random.choice', 'random.choice', (['cs_words'], {}), '(cs_words)\n', (2833, 2843), False, 'import random\n')] |
from maya import cmds, mel
# load hik
mel.eval("HIKCharacterControlsTool")
# ----------------------------------------------------------------------------
LEG_HIERARCHY = ["hip", "knee", "ankle"]
ARM_HIERARCHY = ["shoulder", "elbow", "wrist"]
BODY_HIERARCHY = ["hip", "spine", "neck", "head"]
# ----------------------------------------------------------------------------
HIK_MAPPER = {
"Reference": "reference",
"Hips": "hip",
"Spine": "spine",
"Neck": "neck",
"Head": "head",
"LeftUpLeg": "l_hip",
"LeftLeg": "l_knee",
"LeftFoot": "l_ankle",
"RightUpLeg": "r_hip",
"RightLeg": "r_knee",
"RightFoot": "r_ankle",
"LeftArm": "l_shoulder",
"LeftForeArm": "l_elbow",
"LeftHand": "l_wrist",
"RightArm": "r_shoulder",
"RightForeArm": "r_elbow",
"RightHand": "r_wrist"
}
HIK_ID_MAPPER = {
cmds.hikGetNodeIdFromName(node): node
for node in HIK_MAPPER.keys()
}
| [
"maya.mel.eval",
"maya.cmds.hikGetNodeIdFromName"
] | [((39, 75), 'maya.mel.eval', 'mel.eval', (['"""HIKCharacterControlsTool"""'], {}), "('HIKCharacterControlsTool')\n", (47, 75), False, 'from maya import cmds, mel\n'), ((863, 894), 'maya.cmds.hikGetNodeIdFromName', 'cmds.hikGetNodeIdFromName', (['node'], {}), '(node)\n', (888, 894), False, 'from maya import cmds, mel\n')] |
import torch
import numpy as np
from CNN import *
from Training import *
from Testing import *
from Data_maker_loader import *
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
import time
server = "/rdsgpfs/general/user/kpp15/home/Hansen"
# from where to load tensors for data
wherepath = server + "/data/raster/tensors"
# Where to get and save tif map
sourcepath = server + "/data/raster/MadreTiff"
# Where to get model checkpoint
modelpath = server + "/deforestation_forecasting/models"
checkpoint = modelpath + "/CNN.CNNmodel/CNN.CNNmodel_3.7.19_23.47_315834[9].pbs.pt"
modelname = checkpoint.split("/", -1)[-1]
# Where to save Test_Roc
picspath = server + "/deforestation_forecasting/models/pics"
file = server + "/deforestation_forecasting/models/grid_summary/CNN.CNNmodel.txt"
if __name__ == "__main__":
start = time.time()
# Set all parameters
# Set training time period
start_year = 17
end_year = 17
# set CNN model parameeters
size = 45
DSM = True
input_dim = 11
hidden_dim = [128, 64, 64, 32]
kernel_size = [(5, 5), (5, 5), (3, 3), (3, 3)]
stride = [(2, 2), (1, 1), (1, 1), (1, 1)]
padding = [0, 0, 0, 0]
dropout = 0.2
levels = [13]
# set ratios of 0:1 labels in Train and Validation data sets
train_times = 4
test_times = 4
# set criteria for Early stopping
AUC = False
BCE_Wloss = False
FNcond = True
# set parameters for the cost of the confussion matrix
w = 10 # weights on the False Negative Rate
perc = (100 * train_times) / (
train_times + 1
) # the percentile to for treshhold selection. Advisable to be 100*times/(times+1)
# Weight parameter for the weighted BCE loss
pos_weight = 3
# Adam optimiser parameters:
lr = 0.0001
weight_decay = 0
# Early Stopping parameters
n_splits = 5
n_epochs = 10
patience = 3
# train_model parameters for debbuging and time regulations
training_time = 60
stop_batch = None
print_batch = 200
batch_size = 32
model = CNNmodel(
input_dim=input_dim,
hidden_dim=hidden_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dropout=dropout,
levels=levels,
)
criterion = torch.nn.BCEWithLogitsLoss(
reduction="mean", pos_weight=torch.tensor(pos_weight)
)
optimiser = torch.optim.Adam(
params=model.parameters(), lr=0.0001, weight_decay=weight_decay
)
checkpoint = torch.load(checkpoint)
model.load_state_dict(checkpoint["model_state_dict"])
optimiser.load_state_dict(checkpoint["optimiser_state_dict"])
Data = with_DSM(
size=int(size / 2),
start_year=start_year,
end_year=end_year,
wherepath=wherepath,
DSM=DSM,
)
if not (
os.path.isfile(wherepath + "/" + "Train_idx%d.npy" % (end_year))
& os.path.isfile(wherepath + "/" + "Test_idx%d.npy" % (end_year))
):
print("Creating indexes split")
train_idx, test_idx = train_test_split(
np.arange(len(Data.labels)),
test_size=0.2,
random_state=42,
shuffle=True,
stratify=Data.labels,
)
np.save(wherepath + "Train_idx%d.npy" % (end_year), train_idx)
np.save(wherepath + "Test_idx%d.npy" % (end_year), test_idx)
else:
train_idx = np.load(wherepath + "/" + "Train_idx%d.npy" % (end_year))
test_idx = np.load(wherepath + "/" + "Test_idx%d.npy" % (end_year))
train_sampler = ImbalancedDatasetUnderSampler(
labels=Data.labels, indices=train_idx, times=train_times
)
test_sampler = ImbalancedDatasetUnderSampler(
labels=Data.labels, indices=test_idx, times=test_times
)
job_id = modelname + f".transfer_learning_20{end_year+1:d}"
# Print Summary of the training parameters
# =========================================================================
print(
"Model:",
modelname,
"\nPeriod 20%d-20%d -> 20%d" % (start_year, end_year, end_year + 1),
)
print("New model:", job_id)
print(
"\t% deforested pixels in train:",
train_sampler.count[1] / sum(train_sampler.count),
)
print(
"\t% deforested pixels in val:", test_sampler.count[1] / sum(test_sampler.count)
)
print("\nHyperparameters: ")
print("\tImage size: %d" % (size))
print("\tHidden dim: ", hidden_dim)
print(
"\tTrain and Val ratios of 0:1 labels: 1:%d ; 1:%d " % (train_times, test_times)
)
print(
"\tADAM optimizer parameters: lr=%.7f, weight decay=%.2f, batch size=%d"
% (lr, weight_decay, batch_size)
)
print("\tBCEWithLogitsLoss pos_weights = %.2f" % (pos_weight))
print("\tn_epochs = %d with patience of %d epochs" % (n_epochs, patience))
print("\tCross Validation with n_splits = %d " % (n_splits))
print(
"\tIf to use BCEWithLogitsLoss as an early stop criterion :",
((not AUC) & (not FNcond)),
)
print("\tIf to use AUC as an early stop criterion :", AUC)
print("\tIf to use cost = FP+w*FN / TP+FP+w*FN+TN as an early stop criterion")
print(
"\twith w = %d and treshhold = the %d percentile of the output" % (w, perc),
FNcond,
)
print("\nModel: \n", model)
print("\nCriterion: \n", criterion)
print("\nOptimiser: \n", optimiser)
(
model,
train_loss,
valid_loss,
AUCs_train,
AUCs_val,
costs_train,
costs_val,
name,
) = train_model(
Data=Data,
model=model,
sampler=train_sampler,
criterion=criterion,
optimiser=optimiser,
patience=patience,
n_epochs=n_epochs,
n_splits=n_splits,
batch_size=batch_size,
stop_batch=stop_batch,
print_batch=print_batch,
training_time=training_time,
w=w,
perc=perc,
FNcond=FNcond,
AUC=AUC,
job=job_id,
path=modelpath,
)
visualize(
train=train_loss,
valid=valid_loss,
name="BCEloss",
modelname=name,
best="min",
path=picspath,
)
visualize(
train=AUCs_train,
valid=AUCs_val,
name="AUC",
modelname=name,
best="max",
path=picspath,
)
visualize(
train=costs_train,
valid=costs_val,
name="Cost",
modelname=name,
best="min",
path=picspath,
)
test_loss, test_AUC, test_cost = test_model(
model=model,
Data=Data,
criterion=criterion,
w=w,
perc=perc,
test_sampler=test_sampler,
batch_size=batch_size,
stop_batch=stop_batch,
name=name,
path=picspath,
)
write_report(
name=name,
job_id=job_id,
train_loss=train_loss,
valid_loss=valid_loss,
test_loss=test_loss,
AUCs_train=AUCs_train,
AUCs_val=AUCs_val,
test_AUC=test_AUC,
costs_train=costs_train,
costs_val=costs_val,
test_cost=test_cost,
file=file,
FNcond=FNcond,
AUC=AUC,
)
print("\n\nEND!Total time (in h):", (time.time() - start) / 3600)
| [
"torch.load",
"torch.tensor",
"torch.cuda.is_available",
"numpy.load",
"time.time",
"numpy.save",
"torch.device"
] | [((139, 164), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (162, 164), False, 'import torch\n'), ((174, 219), 'torch.device', 'torch.device', (["('cuda:0' if use_cuda else 'cpu')"], {}), "('cuda:0' if use_cuda else 'cpu')\n", (186, 219), False, 'import torch\n'), ((869, 880), 'time.time', 'time.time', ([], {}), '()\n', (878, 880), False, 'import time\n'), ((2546, 2568), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (2556, 2568), False, 'import torch\n'), ((3284, 3344), 'numpy.save', 'np.save', (["(wherepath + 'Train_idx%d.npy' % end_year)", 'train_idx'], {}), "(wherepath + 'Train_idx%d.npy' % end_year, train_idx)\n", (3291, 3344), True, 'import numpy as np\n'), ((3355, 3413), 'numpy.save', 'np.save', (["(wherepath + 'Test_idx%d.npy' % end_year)", 'test_idx'], {}), "(wherepath + 'Test_idx%d.npy' % end_year, test_idx)\n", (3362, 3413), True, 'import numpy as np\n'), ((3446, 3501), 'numpy.load', 'np.load', (["(wherepath + '/' + 'Train_idx%d.npy' % end_year)"], {}), "(wherepath + '/' + 'Train_idx%d.npy' % end_year)\n", (3453, 3501), True, 'import numpy as np\n'), ((3523, 3577), 'numpy.load', 'np.load', (["(wherepath + '/' + 'Test_idx%d.npy' % end_year)"], {}), "(wherepath + '/' + 'Test_idx%d.npy' % end_year)\n", (3530, 3577), True, 'import numpy as np\n'), ((2385, 2409), 'torch.tensor', 'torch.tensor', (['pos_weight'], {}), '(pos_weight)\n', (2397, 2409), False, 'import torch\n'), ((7323, 7334), 'time.time', 'time.time', ([], {}), '()\n', (7332, 7334), False, 'import time\n')] |
from functools import reduce
with open('input.txt', "r+") as file:
contents = file.read()
def restructure(first, second):
# Separator line
if first.endswith(' ') and second == '\n':
return first.rstrip(' ') + '|'
# Simple line
if second == '\n':
return first + ' '
return first + second
restructured = reduce(restructure, contents, '').rstrip()
split = restructured.split('|')
def addProps(details, char):
if char == ' ':
return details
details.update({char: True})
return details
def buildAnswers(entry):
return reduce(addProps, entry, {})
listOfAnswers = list(map(buildAnswers, split))
answersCount = list(map(lambda x: len(x.values()), listOfAnswers))
sumOfCounts = reduce(lambda f, s: f + s, answersCount)
print(sumOfCounts)
| [
"functools.reduce"
] | [((746, 786), 'functools.reduce', 'reduce', (['(lambda f, s: f + s)', 'answersCount'], {}), '(lambda f, s: f + s, answersCount)\n', (752, 786), False, 'from functools import reduce\n'), ((588, 615), 'functools.reduce', 'reduce', (['addProps', 'entry', '{}'], {}), '(addProps, entry, {})\n', (594, 615), False, 'from functools import reduce\n'), ((349, 382), 'functools.reduce', 'reduce', (['restructure', 'contents', '""""""'], {}), "(restructure, contents, '')\n", (355, 382), False, 'from functools import reduce\n')] |
from flask import Flask
from werkzeug.local import Local
from threading import Thread
app = Flask(__name__)
local = Local()
local.request = '123'
class myThread(Thread):
def run(self):
local.request = 'abc'
print('子线程:', local.request)
@app.route('/')
def hello_world():
mythead = myThread()
mythead.start()
mythead.join()
# print('主线程:', local.request)
# print(local.request)
return 'Hello World!'
if __name__ == '__main__':
app.run(debug=True)
| [
"werkzeug.local.Local",
"flask.Flask"
] | [((93, 108), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (98, 108), False, 'from flask import Flask\n'), ((118, 125), 'werkzeug.local.Local', 'Local', ([], {}), '()\n', (123, 125), False, 'from werkzeug.local import Local\n')] |
from types import SimpleNamespace
from jina.executors import BaseExecutor
def test_exec_from_python():
be = BaseExecutor(metas={'name': 'hello', 'random_name': 'random_value'})
assert be.metas.name == 'hello'
assert be.metas.random_name == 'random_value'
def test_runtime_args():
b = BaseExecutor.load_config(
'BaseExecutor', metas={'name': 'b123'}, runtime_args={'hello': 'world'}
)
assert b.runtime_args.hello == 'world'
assert b.metas.name == 'b123'
def test_default_args_from_load_config():
b = BaseExecutor.load_config('!BaseExecutor {}')
assert isinstance(b.runtime_args, SimpleNamespace)
assert isinstance(b.metas, SimpleNamespace)
# name is always auto-assigned
assert b.metas.name
def test_runtime_args_from_load_config():
y = '''
!BaseExecutor
metas:
name: my-mwu-encoder
workspace: ./
'''
b = BaseExecutor.load_config(y)
assert b.metas.workspace == './'
assert b.metas.name == 'my-mwu-encoder'
def test_default_args_from_python():
b = BaseExecutor()
assert isinstance(b.runtime_args, SimpleNamespace)
assert isinstance(b.metas, SimpleNamespace)
# name is always auto-assigned
assert b.metas.name
| [
"jina.executors.BaseExecutor",
"jina.executors.BaseExecutor.load_config"
] | [((115, 183), 'jina.executors.BaseExecutor', 'BaseExecutor', ([], {'metas': "{'name': 'hello', 'random_name': 'random_value'}"}), "(metas={'name': 'hello', 'random_name': 'random_value'})\n", (127, 183), False, 'from jina.executors import BaseExecutor\n'), ((305, 406), 'jina.executors.BaseExecutor.load_config', 'BaseExecutor.load_config', (['"""BaseExecutor"""'], {'metas': "{'name': 'b123'}", 'runtime_args': "{'hello': 'world'}"}), "('BaseExecutor', metas={'name': 'b123'},\n runtime_args={'hello': 'world'})\n", (329, 406), False, 'from jina.executors import BaseExecutor\n'), ((547, 591), 'jina.executors.BaseExecutor.load_config', 'BaseExecutor.load_config', (['"""!BaseExecutor {}"""'], {}), "('!BaseExecutor {}')\n", (571, 591), False, 'from jina.executors import BaseExecutor\n'), ((888, 915), 'jina.executors.BaseExecutor.load_config', 'BaseExecutor.load_config', (['y'], {}), '(y)\n', (912, 915), False, 'from jina.executors import BaseExecutor\n'), ((1045, 1059), 'jina.executors.BaseExecutor', 'BaseExecutor', ([], {}), '()\n', (1057, 1059), False, 'from jina.executors import BaseExecutor\n')] |
import os
from pprint import pprint
from docx import Document
startpath = 'session15/Tageshoroskope 2020'
doclist = []
yearlist =[]
for i in os.walk(startpath):
for d in i[2]:
if d.endswith(".docx"):
n = i[0].replace('./', '') + '/' + d
doclist.append(n)
print(n)
for wordfile in doclist:
horolist = {"Widder": "", "Stier": "", "Zwilling": "", "Krebs": "", "Löwe": "", "Jungfrau": "", "Waage": "", "Skorpion": "", "Schütze": "", "Steinbock": "", "Wassermann": "", "Fische": ""}
doc = Document(wordfile)
start = False
text = ""
for paragraph in doc.paragraphs:
if paragraph.text.startswith("Tageshoroskop für"):
datum = paragraph.text.replace("Tageshoroskop für", '').strip()
weiter = False
for tierkreiszeichen in horolist.keys():
if paragraph.text.startswith(tierkreiszeichen) and start:
start = False
horolist[actualZodiac] = datum, text.strip()
text = ""
if paragraph.text.startswith(tierkreiszeichen) and not start:
actualZodiac = tierkreiszeichen
start = True
weiter = True
break
if weiter:
continue
if start:
text += paragraph.text + " "
text.replace(" ", " ")
horolist[actualZodiac]=datum, text.strip()
yearlist.append(horolist)
for tierkreiszeichen in horolist.keys():
f = open(tierkreiszeichen + ".csv", "w")
for day in yearlist:
# print(day[tierkreiszeichen])
# if len(day[tierkreiszeichen]) > 1:
# f.write(f"{{{day[tierkreiszeichen][0]}|{day[tierkreiszeichen][1]}}}\n")
# else:
# f.write(f"{{{day[tierkreiszeichen][0]}| }}\n")
# if len(day[tierkreiszeichen]) <= 1:
# print("-------\n")
# print(day[tierkreiszeichen])
# print("\n" + tierkreiszeichen + "\n-------\n")
f.write(f"{day[tierkreiszeichen]}\n")
f.close()
| [
"os.walk",
"docx.Document"
] | [((143, 161), 'os.walk', 'os.walk', (['startpath'], {}), '(startpath)\n', (150, 161), False, 'import os\n'), ((545, 563), 'docx.Document', 'Document', (['wordfile'], {}), '(wordfile)\n', (553, 563), False, 'from docx import Document\n')] |
import scipy.io as sio
import numpy as np
import pandas as pd
import sys
import os
import shutil
class ADEIndex:
def __init__(self, refreshCSVs=False):
self.image_index = None
self.object_name_list = None
self.object_image_matrix = None
self._CSVsExist = False
self._csv_folderpath = os.path.join(sys.path[0], 'csvIndexes')
self.num_images_total = None
if os.path.exists(self._csv_folderpath)\
and os.path.exists(os.path.join(self._csv_folderpath, 'image_index.csv'))\
and os.path.exists(os.path.join(self._csv_folderpath, 'object_name_list.csv'))\
and os.path.exists(os.path.join(self._csv_folderpath,'object_image_matrix.csv')):
print("Now loading data from CSV files")
self.image_index = pd.read_csv(os.path.join(self._csv_folderpath, 'image_index.csv'))
self.object_name_list = pd.read_csv(os.path.join(self._csv_folderpath, 'object_name_list.csv'))
self.object_image_matrix = pd.read_csv(os.path.join(self._csv_folderpath, 'object_image_matrix.csv'))
self._CSVsExist = True
self.num_images_total = self.image_index.shape[0]
else:
_mat_filename = os.path.join(sys.path[0], 'ADE20K_2016_07_26', 'index_ade20k.mat')
try:
_mat_contents = sio.loadmat(_mat_filename)
except FileNotFoundError:
print("index_ade20k.mat was not found, likely due to a problem during package setup.")
print('You can resolve this error by manually placing index_ade20k.mat'
+ ' (available from https://groups.csail.mit.edu/vision/datasets/ADE20K/)'
+ ' into ./ADE20K_2016_07_26/')
return
# exit()
print("No CSVs found - will save CSVs after loading MATLAB data")
_matindex = _mat_contents['index'][0,0]
# When read with scipy, the MATLAB index does NOT have a consistent row
# or column structure.
# The columns are transposed occasionally because otherwise they don't fit
# together - they're imported from MATLAB with a bunch of inconsistent
# dimensions.
self.num_images_total = _matindex[_matindex.dtype.names[1]].size
# putting image attributes in a DataFrame
_filename_col_nested = pd.DataFrame(_matindex['filename'].T, columns=['filename'])
_filename_col = pd.DataFrame(columns=['filename'])
for index, row in _filename_col_nested.iterrows():
_filename_col.loc[index] = _filename_col_nested['filename'][index][0]
_folder_col_nested = pd.DataFrame(_matindex['folder'].T, columns=['folder'])
_folder_col = pd.DataFrame(columns=['folder'])
for index, row in _folder_col_nested.iterrows():
_folder_col.loc[index] = _folder_col_nested['folder'][index][0]
# I don't know what this column is for (it's not documented on the dataset site)
_typeset_col = pd.DataFrame(_matindex['typeset'], columns=['typeset'])
# scene type of each image
_scene_col = pd.DataFrame(_matindex['scene'].T, columns=['scene'])
# putting the columns together
_int_indexed_image_index = pd.concat([_filename_col, _folder_col, _typeset_col, _scene_col], axis=1)
self.image_index = _int_indexed_image_index.set_index('filename')
# Need filename col to be the index AND a query-able column
# (because conversion to csv makes the index just an int)
# self.image_index = pd.concat([self.image_index, filename_col], axis=1)
# print(image_index.index)
# print(image_index)
# print(image_index['ADE_train_00011093.jpg'])
# image_index.to_csv("csvIndexes/image_index.csv")
# print(image_index['ADE_train_00011093.jpg'])
# -------
# Putting object attributes in a DataFrame
object_name_list_nested = pd.DataFrame(_matindex['objectnames'].T, columns=['objectnames'])
self.object_name_list = pd.DataFrame(columns=['objectnames'])
for index, row in object_name_list_nested.iterrows():
self.object_name_list.loc[index] = object_name_list_nested['objectnames'][index][0]
# ----
# Extracting object frequency matrix (gives number of times each object
# in the list of objects occurs in each image)
# We could have gotten this ourselves from the text files in each
# image-segmap directory if we wanted, but the parsing format is not fun,
# so I decided to stick with converting the MATLAB code
# image filenames are rows, and words (object names) are columns
self.object_image_matrix = pd.DataFrame(_matindex['objectPresence'].T,
columns=self.object_name_list['objectnames'],
index=_filename_col['filename'])
# object_cols_that_match = object_image_matrix.loc[:,[x for x in object_image_matrix.columns if 'vcr' in x]]
# for (colName, colData) in object_cols_that_match.iteritems():
# image_rows_to_add = object_image_matrix.loc[object_image_matrix[colName] != 0]
# print(image_rows_to_add)
if refreshCSVs or (not self._CSVsExist):
if os.path.exists(self._csv_folderpath):
shutil.rmtree(self._csv_folderpath)
os.mkdir(self._csv_folderpath)
print("Now saving CSV files")
self.save_all_CSVs()
print("Your CSV files are now toasty and warm")
# Function to produce all 3 CSV files
# THE LAST ONE IS KINDA BIG (for a CSV) - around 300 MB
def save_all_CSVs(self):
self.image_index.to_csv(os.path.join(self._csv_folderpath,"image_index.csv"))
self.object_name_list.to_csv(os.path.join(self._csv_folderpath, 'object_name_list.csv'))
self.object_image_matrix.to_csv(os.path.join(self._csv_folderpath,"object_image_matrix.csv")) | [
"os.path.exists",
"scipy.io.loadmat",
"os.path.join",
"os.mkdir",
"shutil.rmtree",
"pandas.DataFrame",
"pandas.concat"
] | [((311, 350), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""csvIndexes"""'], {}), "(sys.path[0], 'csvIndexes')\n", (323, 350), False, 'import os\n'), ((392, 428), 'os.path.exists', 'os.path.exists', (['self._csv_folderpath'], {}), '(self._csv_folderpath)\n', (406, 428), False, 'import os\n'), ((1155, 1221), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""ADE20K_2016_07_26"""', '"""index_ade20k.mat"""'], {}), "(sys.path[0], 'ADE20K_2016_07_26', 'index_ade20k.mat')\n", (1167, 1221), False, 'import os\n'), ((2207, 2266), 'pandas.DataFrame', 'pd.DataFrame', (["_matindex['filename'].T"], {'columns': "['filename']"}), "(_matindex['filename'].T, columns=['filename'])\n", (2219, 2266), True, 'import pandas as pd\n'), ((2296, 2330), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['filename']"}), "(columns=['filename'])\n", (2308, 2330), True, 'import pandas as pd\n'), ((2495, 2550), 'pandas.DataFrame', 'pd.DataFrame', (["_matindex['folder'].T"], {'columns': "['folder']"}), "(_matindex['folder'].T, columns=['folder'])\n", (2507, 2550), True, 'import pandas as pd\n'), ((2572, 2604), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['folder']"}), "(columns=['folder'])\n", (2584, 2604), True, 'import pandas as pd\n'), ((2841, 2896), 'pandas.DataFrame', 'pd.DataFrame', (["_matindex['typeset']"], {'columns': "['typeset']"}), "(_matindex['typeset'], columns=['typeset'])\n", (2853, 2896), True, 'import pandas as pd\n'), ((2950, 3003), 'pandas.DataFrame', 'pd.DataFrame', (["_matindex['scene'].T"], {'columns': "['scene']"}), "(_matindex['scene'].T, columns=['scene'])\n", (2962, 3003), True, 'import pandas as pd\n'), ((3075, 3148), 'pandas.concat', 'pd.concat', (['[_filename_col, _folder_col, _typeset_col, _scene_col]'], {'axis': '(1)'}), '([_filename_col, _folder_col, _typeset_col, _scene_col], axis=1)\n', (3084, 3148), True, 'import pandas as pd\n'), ((3757, 3822), 'pandas.DataFrame', 'pd.DataFrame', (["_matindex['objectnames'].T"], {'columns': "['objectnames']"}), "(_matindex['objectnames'].T, columns=['objectnames'])\n", (3769, 3822), True, 'import pandas as pd\n'), ((3854, 3891), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['objectnames']"}), "(columns=['objectnames'])\n", (3866, 3891), True, 'import pandas as pd\n'), ((4511, 4638), 'pandas.DataFrame', 'pd.DataFrame', (["_matindex['objectPresence'].T"], {'columns': "self.object_name_list['objectnames']", 'index': "_filename_col['filename']"}), "(_matindex['objectPresence'].T, columns=self.object_name_list[\n 'objectnames'], index=_filename_col['filename'])\n", (4523, 4638), True, 'import pandas as pd\n'), ((5080, 5116), 'os.path.exists', 'os.path.exists', (['self._csv_folderpath'], {}), '(self._csv_folderpath)\n', (5094, 5116), False, 'import os\n'), ((5168, 5198), 'os.mkdir', 'os.mkdir', (['self._csv_folderpath'], {}), '(self._csv_folderpath)\n', (5176, 5198), False, 'import os\n'), ((5470, 5523), 'os.path.join', 'os.path.join', (['self._csv_folderpath', '"""image_index.csv"""'], {}), "(self._csv_folderpath, 'image_index.csv')\n", (5482, 5523), False, 'import os\n'), ((5557, 5615), 'os.path.join', 'os.path.join', (['self._csv_folderpath', '"""object_name_list.csv"""'], {}), "(self._csv_folderpath, 'object_name_list.csv')\n", (5569, 5615), False, 'import os\n'), ((5653, 5714), 'os.path.join', 'os.path.join', (['self._csv_folderpath', '"""object_image_matrix.csv"""'], {}), "(self._csv_folderpath, 'object_image_matrix.csv')\n", (5665, 5714), False, 'import os\n'), ((455, 508), 'os.path.join', 'os.path.join', (['self._csv_folderpath', '"""image_index.csv"""'], {}), "(self._csv_folderpath, 'image_index.csv')\n", (467, 508), False, 'import os\n'), ((536, 594), 'os.path.join', 'os.path.join', (['self._csv_folderpath', '"""object_name_list.csv"""'], {}), "(self._csv_folderpath, 'object_name_list.csv')\n", (548, 594), False, 'import os\n'), ((622, 683), 'os.path.join', 'os.path.join', (['self._csv_folderpath', '"""object_image_matrix.csv"""'], {}), "(self._csv_folderpath, 'object_image_matrix.csv')\n", (634, 683), False, 'import os\n'), ((770, 823), 'os.path.join', 'os.path.join', (['self._csv_folderpath', '"""image_index.csv"""'], {}), "(self._csv_folderpath, 'image_index.csv')\n", (782, 823), False, 'import os\n'), ((867, 925), 'os.path.join', 'os.path.join', (['self._csv_folderpath', '"""object_name_list.csv"""'], {}), "(self._csv_folderpath, 'object_name_list.csv')\n", (879, 925), False, 'import os\n'), ((972, 1033), 'os.path.join', 'os.path.join', (['self._csv_folderpath', '"""object_image_matrix.csv"""'], {}), "(self._csv_folderpath, 'object_image_matrix.csv')\n", (984, 1033), False, 'import os\n'), ((1258, 1284), 'scipy.io.loadmat', 'sio.loadmat', (['_mat_filename'], {}), '(_mat_filename)\n', (1269, 1284), True, 'import scipy.io as sio\n'), ((5126, 5161), 'shutil.rmtree', 'shutil.rmtree', (['self._csv_folderpath'], {}), '(self._csv_folderpath)\n', (5139, 5161), False, 'import shutil\n')] |
from Homework_6.Exercise_1 import book_dict, base_url_book, add_new_item, delete_item_finally
import random
import pytest
#Fixture with parameters for TestAddItemIdFunc
roles_list = [({"name": "Mtsiri",
"type": "classic",
"book": "http://pulse-rest-testing.herokuapp.com/books/6631",
"level": 1212
}, 22),
({"name": "Mtsiri",
"type": "classic",
"book": "http://pulse-rest-testing.herokuapp.com/books/6631",
"level": 1212,
"id": 22
}, 1)]
@pytest.fixture(scope="function", params=roles_list, ids=["id_addition", "id_replace"])
def param_test(request):
return request.param
#Fixture with parameters for TestAddNewRoleExept
@pytest.fixture()
def book_create():
return add_new_item(base_url_book, book_dict)
book_id = book_create # looks weird, but my fixture doesn't work without this var declaration
wrong_roles_list = [({"name": None,
"type":"detective",
"book": "{}{}".format(base_url_book, book_id),
"level": 100500
}, "Item hasn't been added"),
({"name": "Mtsiri",
"type": None,
"book": "{}{}".format(base_url_book, book_id),
"level": 100500
}, "Item hasn't been added"),
({"name": "Mtsiri",
"type": "classic",
"book": "{}{}".format(base_url_book, book_id),
"level": "level"
}, "Item hasn't been added"),
({"name": "Mtsiri",
"type": "classic",
"book": "{}{}".format(base_url_book, str(random.randint(4000000000, 9120000001))),
"level": 1212}, "Item hasn't been added")]
@pytest.fixture(scope="function", params=wrong_roles_list, ids=["without_name", "without_type", "str_level", "wrong_book"])
def wrong_roles_test(book_create, request):
book_id = book_create
yield request.param
delete_item_finally(base_url_book, book_id)
| [
"pytest.fixture",
"Homework_6.Exercise_1.add_new_item",
"random.randint",
"Homework_6.Exercise_1.delete_item_finally"
] | [((602, 692), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""', 'params': 'roles_list', 'ids': "['id_addition', 'id_replace']"}), "(scope='function', params=roles_list, ids=['id_addition',\n 'id_replace'])\n", (616, 692), False, 'import pytest\n'), ((793, 809), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (807, 809), False, 'import pytest\n'), ((1911, 2038), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""', 'params': 'wrong_roles_list', 'ids': "['without_name', 'without_type', 'str_level', 'wrong_book']"}), "(scope='function', params=wrong_roles_list, ids=[\n 'without_name', 'without_type', 'str_level', 'wrong_book'])\n", (1925, 2038), False, 'import pytest\n'), ((840, 878), 'Homework_6.Exercise_1.add_new_item', 'add_new_item', (['base_url_book', 'book_dict'], {}), '(base_url_book, book_dict)\n', (852, 878), False, 'from Homework_6.Exercise_1 import book_dict, base_url_book, add_new_item, delete_item_finally\n'), ((2132, 2175), 'Homework_6.Exercise_1.delete_item_finally', 'delete_item_finally', (['base_url_book', 'book_id'], {}), '(base_url_book, book_id)\n', (2151, 2175), False, 'from Homework_6.Exercise_1 import book_dict, base_url_book, add_new_item, delete_item_finally\n'), ((1804, 1842), 'random.randint', 'random.randint', (['(4000000000)', '(9120000001)'], {}), '(4000000000, 9120000001)\n', (1818, 1842), False, 'import random\n')] |
# coding: utf-8
###
# @file dist_run.py
# @author <NAME> <<EMAIL>>
#
# @section LICENSE
#
# Copyright (c) 2019 <NAME>.
#
# @section DESCRIPTION
#
# Distributed setup code. Just for trying basic constructs of distributed learning in PyTorch. Based on: https://pytorch.org/tutorials/intermediate/dist_tuto.html
###
#!/usr/bin/env python
import os
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
""" All-Reduce example."""
def run(rank, size):
""" Simple point-to-point communication. """
group = dist.new_group([0, 1])
tensor = torch.ones(1)
dist.all_reduce(tensor, op=dist.ReduceOp.SUM, group=group)
print('Rank ', rank, ' has data ', tensor[0])
def init_processes(rank, size, fn, backend='gloo'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '10001'
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
if __name__ == "__main__":
size = 2
processes = []
for rank in range(size):
p = Process(target=init_processes, args=(rank, size, run))
p.start()
processes.append(p)
for p in processes:
p.join()
| [
"torch.multiprocessing.Process",
"torch.distributed.new_group",
"torch.distributed.all_reduce",
"torch.distributed.init_process_group",
"torch.ones"
] | [((557, 579), 'torch.distributed.new_group', 'dist.new_group', (['[0, 1]'], {}), '([0, 1])\n', (571, 579), True, 'import torch.distributed as dist\n'), ((593, 606), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (603, 606), False, 'import torch\n'), ((611, 669), 'torch.distributed.all_reduce', 'dist.all_reduce', (['tensor'], {'op': 'dist.ReduceOp.SUM', 'group': 'group'}), '(tensor, op=dist.ReduceOp.SUM, group=group)\n', (626, 669), True, 'import torch.distributed as dist\n'), ((913, 973), 'torch.distributed.init_process_group', 'dist.init_process_group', (['backend'], {'rank': 'rank', 'world_size': 'size'}), '(backend, rank=rank, world_size=size)\n', (936, 973), True, 'import torch.distributed as dist\n'), ((1095, 1149), 'torch.multiprocessing.Process', 'Process', ([], {'target': 'init_processes', 'args': '(rank, size, run)'}), '(target=init_processes, args=(rank, size, run))\n', (1102, 1149), False, 'from torch.multiprocessing import Process\n')] |
#!/usr/bin/env python
import rasterio
import fiona
import numpy as np
import os
import time
from rasterio.plot import show
import matplotlib.pyplot as plt
from projections.rasterset import RasterSet, Raster
import projections.predicts as predicts
import projections.r2py.modelr as modelr
# Open the mask shape file
shp_file = os.path.join(os.environ['DATA_ROOT'],
'from-adriana/tropicalforests.shp')
shapes = fiona.open(shp_file)
# Read Adriana's abundance model (mainland)
mod = modelr.load(os.path.join(os.environ['MODEL_DIR'],
'ab-model.rds'))
predicts.predictify(mod)
# Import standard PREDICTS rasters
rasters = predicts.rasterset('luh5', 'historical', 1990, True)
rs = RasterSet(rasters, shapes = shapes, all_touched = True)
what = mod.output
rs[mod.output] = mod
stime = time.time()
data1, meta_data1 = rs.eval(what)
etime = time.time()
print("executed in %6.2fs" % (etime - stime))
show(data1)
##
## Compare with good raster
##
out = rasterio.open('adrid-good.tif')
good = out.read(1, masked=True)
diff = np.fabs(data1 - good)
print("max diff: %f" % diff.max())
assert np.allclose(data1, good, atol=1e-05, equal_nan=True)
del out
##
## Redo the projection using iterative API
##
mod = modelr.load('../models/ab-corrected.rds')
predicts.predictify(mod)
# Import standard PREDICTS rasters
rasters2 = predicts.rasterset('rcp', 'aim', 2020, 'medium')
rs2 = RasterSet(rasters2, shapes = shapes, all_touched = True)
rs2[mod.output] = mod
stime = time.time()
rs2.write(what, 'adrid.tif')
etime = time.time()
print("executed in %6.2fs" % (etime - stime))
out = rasterio.open('adrid.tif')
data2 = out.read(1, masked=True)
diff = np.fabs(data1 - data2)
print("max diff: %f" % diff.max())
plot = None
if plot:
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(10, 5))
show(data1, ax=ax1, cmap='Greens', title='Non-incremental')
show(data2, ax=ax2, cmap='Greens', title='Incremental')
show(diff, ax=ax3, cmap='viridis', title='Difference')
plt.show()
# Verify the data matches
assert np.allclose(data1, data2, atol=1e-05, equal_nan=True)
| [
"numpy.fabs",
"numpy.allclose",
"projections.rasterset.RasterSet",
"rasterio.open",
"projections.predicts.rasterset",
"os.path.join",
"rasterio.plot.show",
"projections.predicts.predictify",
"fiona.open",
"projections.r2py.modelr.load",
"time.time",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((330, 403), 'os.path.join', 'os.path.join', (["os.environ['DATA_ROOT']", '"""from-adriana/tropicalforests.shp"""'], {}), "(os.environ['DATA_ROOT'], 'from-adriana/tropicalforests.shp')\n", (342, 403), False, 'import os\n'), ((437, 457), 'fiona.open', 'fiona.open', (['shp_file'], {}), '(shp_file)\n', (447, 457), False, 'import fiona\n'), ((607, 631), 'projections.predicts.predictify', 'predicts.predictify', (['mod'], {}), '(mod)\n', (626, 631), True, 'import projections.predicts as predicts\n'), ((678, 730), 'projections.predicts.rasterset', 'predicts.rasterset', (['"""luh5"""', '"""historical"""', '(1990)', '(True)'], {}), "('luh5', 'historical', 1990, True)\n", (696, 730), True, 'import projections.predicts as predicts\n'), ((736, 787), 'projections.rasterset.RasterSet', 'RasterSet', (['rasters'], {'shapes': 'shapes', 'all_touched': '(True)'}), '(rasters, shapes=shapes, all_touched=True)\n', (745, 787), False, 'from projections.rasterset import RasterSet, Raster\n'), ((840, 851), 'time.time', 'time.time', ([], {}), '()\n', (849, 851), False, 'import time\n'), ((894, 905), 'time.time', 'time.time', ([], {}), '()\n', (903, 905), False, 'import time\n'), ((952, 963), 'rasterio.plot.show', 'show', (['data1'], {}), '(data1)\n', (956, 963), False, 'from rasterio.plot import show\n'), ((1005, 1036), 'rasterio.open', 'rasterio.open', (['"""adrid-good.tif"""'], {}), "('adrid-good.tif')\n", (1018, 1036), False, 'import rasterio\n'), ((1076, 1097), 'numpy.fabs', 'np.fabs', (['(data1 - good)'], {}), '(data1 - good)\n', (1083, 1097), True, 'import numpy as np\n'), ((1140, 1192), 'numpy.allclose', 'np.allclose', (['data1', 'good'], {'atol': '(1e-05)', 'equal_nan': '(True)'}), '(data1, good, atol=1e-05, equal_nan=True)\n', (1151, 1192), True, 'import numpy as np\n'), ((1257, 1298), 'projections.r2py.modelr.load', 'modelr.load', (['"""../models/ab-corrected.rds"""'], {}), "('../models/ab-corrected.rds')\n", (1268, 1298), True, 'import projections.r2py.modelr as modelr\n'), ((1299, 1323), 'projections.predicts.predictify', 'predicts.predictify', (['mod'], {}), '(mod)\n', (1318, 1323), True, 'import projections.predicts as predicts\n'), ((1371, 1419), 'projections.predicts.rasterset', 'predicts.rasterset', (['"""rcp"""', '"""aim"""', '(2020)', '"""medium"""'], {}), "('rcp', 'aim', 2020, 'medium')\n", (1389, 1419), True, 'import projections.predicts as predicts\n'), ((1426, 1478), 'projections.rasterset.RasterSet', 'RasterSet', (['rasters2'], {'shapes': 'shapes', 'all_touched': '(True)'}), '(rasters2, shapes=shapes, all_touched=True)\n', (1435, 1478), False, 'from projections.rasterset import RasterSet, Raster\n'), ((1514, 1525), 'time.time', 'time.time', ([], {}), '()\n', (1523, 1525), False, 'import time\n'), ((1563, 1574), 'time.time', 'time.time', ([], {}), '()\n', (1572, 1574), False, 'import time\n'), ((1628, 1654), 'rasterio.open', 'rasterio.open', (['"""adrid.tif"""'], {}), "('adrid.tif')\n", (1641, 1654), False, 'import rasterio\n'), ((1695, 1717), 'numpy.fabs', 'np.fabs', (['(data1 - data2)'], {}), '(data1 - data2)\n', (1702, 1717), True, 'import numpy as np\n'), ((2062, 2115), 'numpy.allclose', 'np.allclose', (['data1', 'data2'], {'atol': '(1e-05)', 'equal_nan': '(True)'}), '(data1, data2, atol=1e-05, equal_nan=True)\n', (2073, 2115), True, 'import numpy as np\n'), ((521, 574), 'os.path.join', 'os.path.join', (["os.environ['MODEL_DIR']", '"""ab-model.rds"""'], {}), "(os.environ['MODEL_DIR'], 'ab-model.rds')\n", (533, 574), False, 'import os\n'), ((1800, 1835), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(10, 5)'}), '(1, 3, figsize=(10, 5))\n', (1812, 1835), True, 'import matplotlib.pyplot as plt\n'), ((1838, 1897), 'rasterio.plot.show', 'show', (['data1'], {'ax': 'ax1', 'cmap': '"""Greens"""', 'title': '"""Non-incremental"""'}), "(data1, ax=ax1, cmap='Greens', title='Non-incremental')\n", (1842, 1897), False, 'from rasterio.plot import show\n'), ((1900, 1955), 'rasterio.plot.show', 'show', (['data2'], {'ax': 'ax2', 'cmap': '"""Greens"""', 'title': '"""Incremental"""'}), "(data2, ax=ax2, cmap='Greens', title='Incremental')\n", (1904, 1955), False, 'from rasterio.plot import show\n'), ((1958, 2012), 'rasterio.plot.show', 'show', (['diff'], {'ax': 'ax3', 'cmap': '"""viridis"""', 'title': '"""Difference"""'}), "(diff, ax=ax3, cmap='viridis', title='Difference')\n", (1962, 2012), False, 'from rasterio.plot import show\n'), ((2015, 2025), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2023, 2025), True, 'import matplotlib.pyplot as plt\n')] |
# Generated by Django 3.1.6 on 2021-03-16 17:03
from django.db import migrations, models
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
("gym", "0002_auto_20210316_1535"),
]
operations = [
migrations.RenameField(
model_name="gym",
old_name="photo_3",
new_name="gym_photo_main",
),
migrations.RemoveField(
model_name="gym",
name="photo_4",
),
migrations.RemoveField(
model_name="gym",
name="photo_5",
),
migrations.RemoveField(
model_name="gym",
name="photo_6",
),
migrations.RemoveField(
model_name="gym",
name="profile_photo_main",
),
migrations.AddField(
model_name="gym",
name="country",
field=django_countries.fields.CountryField(
default="DEFAULT", max_length=50
),
preserve_default=False,
),
migrations.AddField(
model_name="gym",
name="postcode",
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name="gym",
name="street_address1",
field=models.CharField(default="DEFAULT ADDRESS", max_length=80),
preserve_default=False,
),
migrations.AddField(
model_name="gym",
name="street_address2",
field=models.CharField(blank=True, max_length=80, null=True),
),
migrations.AddField(
model_name="gym",
name="town_or_city",
field=models.CharField(default="DEFAULT", max_length=40),
preserve_default=False,
),
]
| [
"django.db.migrations.RemoveField",
"django.db.migrations.RenameField",
"django.db.models.CharField"
] | [((262, 354), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""gym"""', 'old_name': '"""photo_3"""', 'new_name': '"""gym_photo_main"""'}), "(model_name='gym', old_name='photo_3', new_name=\n 'gym_photo_main')\n", (284, 354), False, 'from django.db import migrations, models\n'), ((406, 462), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""gym"""', 'name': '"""photo_4"""'}), "(model_name='gym', name='photo_4')\n", (428, 462), False, 'from django.db import migrations, models\n'), ((507, 563), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""gym"""', 'name': '"""photo_5"""'}), "(model_name='gym', name='photo_5')\n", (529, 563), False, 'from django.db import migrations, models\n'), ((608, 664), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""gym"""', 'name': '"""photo_6"""'}), "(model_name='gym', name='photo_6')\n", (630, 664), False, 'from django.db import migrations, models\n'), ((709, 776), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""gym"""', 'name': '"""profile_photo_main"""'}), "(model_name='gym', name='profile_photo_main')\n", (731, 776), False, 'from django.db import migrations, models\n'), ((1173, 1227), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)', 'null': '(True)'}), '(blank=True, max_length=20, null=True)\n', (1189, 1227), False, 'from django.db import migrations, models\n'), ((1353, 1411), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""DEFAULT ADDRESS"""', 'max_length': '(80)'}), "(default='DEFAULT ADDRESS', max_length=80)\n", (1369, 1411), False, 'from django.db import migrations, models\n'), ((1573, 1627), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(80)', 'null': '(True)'}), '(blank=True, max_length=80, null=True)\n', (1589, 1627), False, 'from django.db import migrations, models\n'), ((1750, 1800), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""DEFAULT"""', 'max_length': '(40)'}), "(default='DEFAULT', max_length=40)\n", (1766, 1800), False, 'from django.db import migrations, models\n')] |
# gdb -n -q -x solve.py ./esrever
import gdb
import re
gdb.execute("set disassembly intel")
gdb.execute("set pagination off")
gdb.execute("break *0x555555554ba0")
gdb.execute("run << /dev/null")
flag = ""
while True:
gdb.execute("nexti")
line = gdb.execute("x/1i $rip", to_string=True)
r = re.findall("\t([a-z]+) ", line)
if r == [] or r[0] != 'cmp':
continue
r = re.findall("QWORD PTR \[rbp(.+)\]", line)
if r == []:
continue
ofs = int(r[0], 16)
line = gdb.execute("x/1bx $rbp+({})".format(ofs), to_string=True)
c = line.split(":\t")[1]
flag += chr(int(c, 16))
print(flag)
gdb.execute("set $rax={}".format(c))
| [
"re.findall",
"gdb.execute"
] | [((56, 92), 'gdb.execute', 'gdb.execute', (['"""set disassembly intel"""'], {}), "('set disassembly intel')\n", (67, 92), False, 'import gdb\n'), ((93, 126), 'gdb.execute', 'gdb.execute', (['"""set pagination off"""'], {}), "('set pagination off')\n", (104, 126), False, 'import gdb\n'), ((127, 163), 'gdb.execute', 'gdb.execute', (['"""break *0x555555554ba0"""'], {}), "('break *0x555555554ba0')\n", (138, 163), False, 'import gdb\n'), ((164, 195), 'gdb.execute', 'gdb.execute', (['"""run << /dev/null"""'], {}), "('run << /dev/null')\n", (175, 195), False, 'import gdb\n'), ((223, 243), 'gdb.execute', 'gdb.execute', (['"""nexti"""'], {}), "('nexti')\n", (234, 243), False, 'import gdb\n'), ((255, 295), 'gdb.execute', 'gdb.execute', (['"""x/1i $rip"""'], {'to_string': '(True)'}), "('x/1i $rip', to_string=True)\n", (266, 295), False, 'import gdb\n'), ((304, 335), 're.findall', 're.findall', (['"""\t([a-z]+) """', 'line'], {}), "('\\t([a-z]+) ', line)\n", (314, 335), False, 'import re\n'), ((394, 437), 're.findall', 're.findall', (['"""QWORD PTR \\\\[rbp(.+)\\\\]"""', 'line'], {}), "('QWORD PTR \\\\[rbp(.+)\\\\]', line)\n", (404, 437), False, 'import re\n')] |
#!/usr/bin/env python
import os.path
import subprocess
release = True
__version__ = '1.3.0'
__version_name__ = "Xenia"
_repository_path = os.path.split(__file__)[0]
_git_file_path = os.path.join(_repository_path, '__git_version__.py')
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
env=env).communicate()[0]
return out
def get_git_branch():
'''
Gets the current Git branch.
'''
try:
out = _minimal_ext_cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
branch = out.strip().decode('ascii')
except:
branch = ''
return branch
def get_git_hash():
'''
Gets the last GIT commit hash and date for the repository, using the
path to this file.
'''
try:
out = _minimal_ext_cmd(['git', 'rev-parse', '--short', 'HEAD'])
revision = out.strip().decode('ascii')
except:
revision = ''
return revision
def get_git_date(git_hash):
'''
Gets the date of the last commit.
'''
try:
out = _minimal_ext_cmd(['git', 'show', git_hash, '--date=short',
'--format="%ad"'])
date = out.strip().decode('ascii').split('"')[1]
except:
date = ''
return date
def get_git_revision():
git_branch = get_git_branch()
git_hash = get_git_hash()
git_date = get_git_date(git_hash)
if git_branch:
rev = '.dev0+%s-%s(%s)' % (git_branch, git_hash, git_date)
else:
rev = ''
return rev
def write_git_version():
'''
Write the GIT revision to a file.
'''
rev = get_git_revision()
if rev == "":
if os.path.isfile(_git_file_path):
return
gitfile = open(_git_file_path, 'w')
gitfile.write('rev = "%s"\n' % rev)
gitfile.close()
def get_version():
'''
Get the version of the package, including the GIT revision if this
is an actual release.
'''
version = __version__
if not release:
try:
import __git_version__
version += __git_version__.rev
except ImportError:
version += get_git_revision()
return version
if __name__ == "__main__":
write_git_version()
| [
"subprocess.Popen"
] | [((544, 598), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'env': 'env'}), '(cmd, stdout=subprocess.PIPE, env=env)\n', (560, 598), False, 'import subprocess\n')] |
# Copyright (c) 2011-2013 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD License found in README.md.
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils import simplejson
from itertools import izip_longest
def render_template(request, template, context=None):
'''Wrap render_to_response with the context_instance argument set.'''
return render_to_response(template, context,
context_instance=RequestContext(request))
def render_json(data):
'''Return an HttpResponse object containing json-encoded data.'''
return HttpResponse(simplejson.dumps(data), mimetype='application/json')
def pairwise(iterable):
'''Group the elements of the given interable into 2-tuples.'''
i = iter(iterable)
return izip_longest(i, i)
| [
"itertools.izip_longest",
"django.template.RequestContext",
"django.utils.simplejson.dumps"
] | [((879, 897), 'itertools.izip_longest', 'izip_longest', (['i', 'i'], {}), '(i, i)\n', (891, 897), False, 'from itertools import izip_longest\n'), ((700, 722), 'django.utils.simplejson.dumps', 'simplejson.dumps', (['data'], {}), '(data)\n', (716, 722), False, 'from django.utils import simplejson\n'), ((557, 580), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (571, 580), False, 'from django.template import RequestContext\n')] |
# -*- encoding: utf-8 -*-
from collections import defaultdict
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import euclidean_distances
from model import MultiHSD
from tools import dataloader, visualize, rw
import tools
from tools import evaluate
from tools.rw import read_vectors
def scatterplot(graphName, vectors, labels, nodes=None):
vectors = np.asarray(vectors)
# pca = PCA(n_components=2, whiten=False, random_state=42)
# results = np.asarray(pca.fit_transform(vectors), dtype=np.float)
tsne = TSNE(init="pca", n_components=2, perplexity=2, n_iter=5000, learning_rate=0.05, random_state=42)
results = np.asarray(tsne.fit_transform(vectors), dtype=np.float)
# df = pd.DataFrame(data={"node": nodes,
# "x": results[:, 0],
# "y": results[:, 1],
# })
# df.to_csv(f"output/node2vec/{graphName}_tsne.csv", columns=["node", "x", "y"], index=None)
#visualize.plot_2D_points(nodes, results, labels)
visualize.plot_node_str(nodes, results)
def visualize_from_csv():
graph = "barbell"
vector_dict = rw.read_vectors(f"output/node2vec/{graph}.csv")
nodes = list(vector_dict.keys())
vectors = list(vector_dict.values())
scatterplot(graph, vectors, [], nodes=nodes)
if __name__ == '__main__':
visualize_from_csv()
| [
"numpy.asarray",
"tools.rw.read_vectors",
"sklearn.manifold.TSNE",
"tools.visualize.plot_node_str"
] | [((495, 514), 'numpy.asarray', 'np.asarray', (['vectors'], {}), '(vectors)\n', (505, 514), True, 'import numpy as np\n'), ((658, 759), 'sklearn.manifold.TSNE', 'TSNE', ([], {'init': '"""pca"""', 'n_components': '(2)', 'perplexity': '(2)', 'n_iter': '(5000)', 'learning_rate': '(0.05)', 'random_state': '(42)'}), "(init='pca', n_components=2, perplexity=2, n_iter=5000, learning_rate=\n 0.05, random_state=42)\n", (662, 759), False, 'from sklearn.manifold import TSNE\n'), ((1160, 1199), 'tools.visualize.plot_node_str', 'visualize.plot_node_str', (['nodes', 'results'], {}), '(nodes, results)\n', (1183, 1199), False, 'from tools import dataloader, visualize, rw\n'), ((1268, 1315), 'tools.rw.read_vectors', 'rw.read_vectors', (['f"""output/node2vec/{graph}.csv"""'], {}), "(f'output/node2vec/{graph}.csv')\n", (1283, 1315), False, 'from tools import dataloader, visualize, rw\n')] |
"""Main module."""
import asyncio
import logging
import socket
import select
from asyncio import IncompleteReadError
from enum import Enum
_LOGGER = logging.getLogger(__name__)
UDP_IP_ADDRESS = "127.0.0.1"
UDP_PORT_NO = 6789
DEFAULT_TIMEOUT = 10
DEFAULT_BUFFER_SIZE = 1024
class ConnectionState(Enum):
DISCONNECTED = 0
CONNECTED = 1
def checksum(command):
"""Function to calculate checksum."""
crc = 0x147A
# for b in command:
# # rotate (crc 1 bit left)
# crc = ((crc << 1) & 0xFFFF) | (crc & 0x8000) >> 15
# crc = crc ^ 0xFFFF
# crc = (crc + (crc >> 8) + b) & 0xFFFF
return crc
def verify_and_strip(resp):
"""Verify checksum and strip header and footer of received frame."""
return resp
def generate_query(command):
"""Add header, checksum and footer to command data."""
data = bytearray(command)
# c = checksum(data)
# data.append(c >> 8)
# data.append(c & 0xFF)
# data.replace(b'\xFE', b'\xFE\xF0')
data = bytearray.fromhex("FEFE") + data + bytearray.fromhex("FE0D")
return data
def print_hex(data):
"""Debugging method to print out frames in hex."""
hex_msg = ""
for c in data:
hex_msg += "\\x" + format(c, "02x")
print(hex_msg)
class Buspro:
"""Asynchronous interface to talk to Buspro bus."""
def __init__(self, host, port, loop):
"""Init the Buspro object."""
self._host = host
self._port = port
self._loop = loop
self._callback_a = None
self._callback_b = None
self._callback_c = None
self._state = ConnectionState.DISCONNECTED
self._socket = None
self._message_handlers = {}
# Assign handler
self._message_handlers[b'\x00'] = self._message_handler_a
self._message_handlers[b'\x17'] = self._message_handler_b
self._message_handlers[b'\x0A'] = lambda msg: self._message_handler_c(ConnectionState.CONNECTED, msg)
print("__init__ done")
def _message_handler_a(self, msg):
print("Returning status: %s", msg)
if self._callback_a:
self._callback_a(msg)
return msg
def _message_handler_b(self, msg):
print("Returning status: %s", msg)
if self._callback_b:
self._callback_b(msg)
return msg
def _message_handler_c(self, mode, msg):
print("Alarm update, mode: %s", mode)
print("Returning status: %s", msg)
if self._state in [mode, ConnectionState.DISCONNECTED]:
self._state = ConnectionState.CONNECTED
else:
self._state = ConnectionState.DISCONNECTED
if self._callback_c:
self._callback_c(self._state)
return self._state
async def async_connect(self):
"""Make a TCP connection to the alarm system."""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.settimeout(DEFAULT_TIMEOUT)
try:
self._socket.connect((UDP_IP_ADDRESS, UDP_PORT_NO))
except socket.error as err:
print("Unable to bind on port %s: %s", UDP_PORT_NO, err)
return False
#self._socket.listen(10)
#conn, addr = s.accept()
print("connect done")
return True
def close(self):
"""Stop monitoring and close connection."""
if self._socket:
# self._activeConnection.Close()
self._socket = None
self._state = ConnectionState.DISCONNECTED
print("Closing...")
async def async_custom_action(self, code):
"""Send command to disarm."""
print("Alarm disarm, code: %s")
while len(code) < 16:
code += 'F'
code_bytes = bytearray.fromhex(code)
data = generate_query(b'\x84' + code_bytes + self._partition_bytes)
await self._send_data(data)
async def async_start_listen(self, callback_a=None, callback_b=None, callback_c=None):
"""Start monitoring of the alarm status.
Send command to Buspro to start sending updates. Read in a
loop and call respective callbacks when received messages.
"""
print("starting async_start_listen")
self._callback_a = callback_a
self._callback_b = callback_b
self._callback_c = callback_c
print("Starting async_start_listen loop")
print("Iteration... ")
while True:
status = await self._listen_to_bus()
print("Got status!")
print("Closed, quit monitoring.")
def _listen_to_bus(self):
print("Wait...")
self._state = ConnectionState.CONNECTED
try:
resp = self._read_data()
except IncompleteReadError as e:
print("Got exception: %s. Most likely the other side has disconnected!", e)
self._socket = None
self._state = ConnectionState.DISCONNECTED
return self._state
if not resp:
print("Got empty response. We think it's disconnect.")
self._socket = None
self._state = ConnectionState.DISCONNECTED
return self._state
msg_id = resp[0:1]
if msg_id in self._message_handlers:
print("Calling handler for id: %s", msg_id)
return self._message_handlers[msg_id](resp)
else:
print("Ignoring message: %s", msg_id)
return None
def _read_data(self):
# listen for incoming udp packet
print("starting _read_data")
# readable, _, _ = select.select([self._socket], [], [], DEFAULT_TIMEOUT)
# if not readable:
# print("Timeout (%s second(s)) waiting for data on port %s.", DEFAULT_TIMEOUT, UDP_PORT_NO)
# return
#data, _ = self._socket.recvfrom(DEFAULT_BUFFER_SIZE)
data = self._socket.recv(DEFAULT_BUFFER_SIZE)
print("-- Receiving data --")
print_hex(data)
print("-- ------------- --")
return verify_and_strip(data)
def _send_data(self, data):
print("-- Sending data --")
print_hex(data)
print("-- ------------- --")
print("Sent %d bytes", len(data))
#await self._writer.write(data)
#clientSock.sendto(data, (UDP_IP_ADDRESS, UDP_PORT_NO))
try:
self._socket.send(data.encode())
except socket.error as err:
print("Unable to send payload %r to %s on port %s: %s", data, UDP_IP_ADDRESS, UDP_PORT_NO, err)
return
def demo(host, port):
"""Basic demo."""
print("starting demo")
loop = asyncio.get_event_loop()
client = Buspro(host, port, loop)
loop.run_until_complete(client.async_connect())
## loop.create_task(client.async_custom_action("3333"))
loop.create_task(client.async_start_listen())
loop.run_forever()
loop.close()
demo("127.0.0.1", 1000) | [
"logging.getLogger",
"asyncio.get_event_loop",
"socket.socket"
] | [((152, 179), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (169, 179), False, 'import logging\n'), ((6887, 6911), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6909, 6911), False, 'import asyncio\n'), ((2945, 2993), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (2958, 2993), False, 'import socket\n')] |
#coding=UTF-8
from BuildArchetypes import archetypes, getDeploymentContext
from BuildDemos import demos
import argparse, cgi
parser = argparse.ArgumentParser(description="Build report generator")
parser.add_argument("version", type=str, help="Vaadin version that was just built")
parser.add_argument("deployUrl", type=str, help="Base url of the deployment server")
parser.add_argument("buildResultUrl", type=str, help="URL for the build result page")
parser.add_argument("stagingRepo", type=str, help="URL for the staging repository")
parser.add_argument("tbapiUrl", type=str, help="URL for the TestBench API build")
args = parser.parse_args()
content = """<html>
<head></head>
<body>
<table>
"""
content += "<tr><td>Try demos<ul>"
for demo in demos:
content += "<li><a href='{url}/{demoName}-{version}'>{demoName}</a></li>\n".format(url=args.deployUrl, demoName=demo, version=args.version)
content += "</ul></td></tr>\n<tr><td>Try archetype demos<ul>"
for archetype in archetypes:
content += "<li><a href='{url}/{context}'>{demo}</a></li>\n".format(url=args.deployUrl, demo=archetype, context=getDeploymentContext(archetype, args.version))
content += """</ul></td></tr>
<tr><td><a href="{repoUrl}">Staging repository</a></td></tr>
<tr><td>Eclipse Ivy Settings:<br><pre>""".format(repoUrl=args.stagingRepo)
content += cgi.escape(""" <ibiblio name="vaadin-staging" usepoms="true" m2compatible="true"
root="{repoUrl}" />""".format(repoUrl=args.stagingRepo))
content += """</pre>
</td></tr>
<tr><td><a href="https://dev.vaadin.com/milestone/Vaadin {version}">Close Trac Milestone</a></td></tr>
<tr><td><a href="https://dev.vaadin.com/query?status=pending-release&component=Core+Framework&resolution=fixed&col=id&col=summary&col=component&col=milestone&col=status&col=type">Verify pending release tickets still have milestone {version}</a></td></tr>
<tr><td><a href="https://dev.vaadin.com/admin/ticket/versions">Add version {version} to Trac</td></tr>
<tr><td><a href="{url}">Staging result page (See test results, pin and tag build and dependencies)</a></td></tr>
<tr><td>Commands to tag all repositories (warning: do not run as a single script but set variables and check before any push commands - this has not been tested yet and the change IDs are missing)</td></tr>
<tr><td><pre>
VERSION={version}
GERRIT_USER=[fill in your gerrit username]
FRAMEWORK_REVISION=[fill in framework revision]
SCREENSHOTS_REVISION=[fill in screenshot repository revision]
ARCHETYPES_REVISION=[fill in maven-integration repository revision]
PLUGIN_REVISION=[fill in maven plug-in repository revision]
git clone ssh://[email protected]:29418/vaadin
cd vaadin
git tag -a -m"$VERSION" $VERSION $FRAMEWORK_REVISION
git push --tags
cd ..
git clone ssh://[email protected]:29418/vaadin-screenshots
cd vaadin-screenshots
git tag -a -m"$VERSION" $VERSION $SCREENSHOTS_REVISION
git push --tags
cd ..
git clone ssh://[email protected]:29418/maven-integration
cd maven-integration
git tag -a -m"$VERSION" $VERSION $ARCHETYPES_REVISION
git push --tags
cd ..
git clone ssh://$GERRIT_USER@dev.<EMAIL>.com:29418/maven-plugin
cd maven-plugin
git tag -a -m"$VERSION" $VERSION $PLUGIN_REVISION
git push --tags
cd ..
</pre></td></tr>
<tr><td><a href="{tbapi}">Build and publish TestBench API for version {version} if proceeding</a></td></tr>
</table>
</body>
</html>""".format(url=args.buildResultUrl, repoUrl=args.stagingRepo, version=args.version, tbapi=args.tbapiUrl)
f = open("result/report.html", 'w')
f.write(content)
| [
"BuildArchetypes.getDeploymentContext",
"argparse.ArgumentParser"
] | [((136, 197), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Build report generator"""'}), "(description='Build report generator')\n", (159, 197), False, 'import argparse, cgi\n'), ((1104, 1149), 'BuildArchetypes.getDeploymentContext', 'getDeploymentContext', (['archetype', 'args.version'], {}), '(archetype, args.version)\n', (1124, 1149), False, 'from BuildArchetypes import archetypes, getDeploymentContext\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Galarius'
__copyright__ = 'Copyright 2020, Galarius'
import os
import sys
import argparse
import platform
from core import IBooksWorker
def main(args):
worker = IBooksWorker()
# print titles
if args.list:
titles = worker.titles()
print('\n'.join(titles))
sys.exit(0)
# export
if not (os.path.exists(args.out) and os.path.isdir(args.out)):
os.makedirs(args.out)
if args.title:
worker.export(args)
else:
worker.export_all(args)
if __name__ == "__main__":
if platform.python_version().startswith("2."):
print('Python3 is required')
sys.exit(1)
ap = argparse.ArgumentParser(description="Export iBooks highlights", epilog="Run `pinotate.py` to export all highlights to the current directory")
ap.add_argument('-o', '--out', default='./', help='output directory')
ap.add_argument('-l', '--list', action="store_true", help='print books titles')
ap.add_argument('--headings', default=False, action="store_true", help='add headings to markdown')
ap.add_argument('-s', '--sort', default=False, action="store_true", help='sort by location instead of time')
ap.add_argument('title', metavar='title', nargs='?', help="export only this book's highlights")
args = ap.parse_args()
main(args)
| [
"os.path.exists",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.isdir",
"sys.exit",
"platform.python_version",
"core.IBooksWorker"
] | [((235, 249), 'core.IBooksWorker', 'IBooksWorker', ([], {}), '()\n', (247, 249), False, 'from core import IBooksWorker\n'), ((732, 878), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Export iBooks highlights"""', 'epilog': '"""Run `pinotate.py` to export all highlights to the current directory"""'}), "(description='Export iBooks highlights', epilog=\n 'Run `pinotate.py` to export all highlights to the current directory')\n", (755, 878), False, 'import argparse\n'), ((366, 377), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (374, 377), False, 'import sys\n'), ((475, 496), 'os.makedirs', 'os.makedirs', (['args.out'], {}), '(args.out)\n', (486, 496), False, 'import os\n'), ((710, 721), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (718, 721), False, 'import sys\n'), ((412, 436), 'os.path.exists', 'os.path.exists', (['args.out'], {}), '(args.out)\n', (426, 436), False, 'import os\n'), ((441, 464), 'os.path.isdir', 'os.path.isdir', (['args.out'], {}), '(args.out)\n', (454, 464), False, 'import os\n'), ((621, 646), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (644, 646), False, 'import platform\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""collect.py
Collect data for testing 'perceived' audio quality
References
- https://www.quora.com/Is-there-an-objective-way-to-measure-sound-quality-Audio-community-often-cite-uneven-frequency-in-highs-mids-and-lows-as-poor-audio-quality-but-how-is-that-perceptually-negative-to-someone-who-listens-to-music?share=1
- http://www.bnoack.com/index.html?http&&&www.bnoack.com/audio/speech-level.html
Testing Standards
- PESQ: https://en.wikipedia.org/wiki/Perceptual_Evaluation_of_Speech_Quality
- POLQA: https://en.wikipedia.org/wiki/Perceptual_Objective_Listening_Quality_Analysis
Python Playing and Recording Sound
- https://realpython.com/playing-and-recording-sound-python/#recording-audio
"""
import simpleaudio as sa
import sounddevice as sd
import wave
from scipy.io.wavfile import write
def get_duration(filename):
"""Get wav file playback duration
"""
with wave.open(filename, 'r') as f:
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
return duration
if __name__ == '__main__':
import os
import argparse
PARSER = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
PARSER.add_argument("dir_in", help="Directory of input WAV files")
PARSER.add_argument("dir_out", help="Directory of output WAV files")
ARGS = PARSER.parse_args()
# collect all absolute filenames from path
filename_list = []
for filename in os.listdir(ARGS.dir_in):
path = os.path.join(ARGS.dir_in, filename)
# filter-out non-functional files
if not os.path.isfile(path):
continue
# filter-out undesired file-types
if os.path.splitext(path)[1].lower() in (".wav"):
filename_list.append(path)
if not filename_list:
sys.exit("[Error] Files not found: {}".format(ARGS.dir_in))
# step through each file
for file_speaker in filename_list:
# construct output filename
head, tail = os.path.split(file_speaker)
file_listener = ARGS.dir_out + tail
# play WAV file (speaker)
print("Playing...")
print(file_speaker)
wave_obj = sa.WaveObject.from_wave_file(file_speaker)
play_obj = wave_obj.play()
# record WAV file (listener)
print("Recording...")
print(file_listener)
fs = 44100 # sampling rate
seconds = get_duration(file_speaker) + 1 # recording duration [sec]
recording = sd.rec(int(round(seconds * fs)), samplerate=fs, channels=1)
play_obj.wait_done() # wait until sound has finished playing
sd.wait() # wait until recording is finished
write(file_listener, fs, recording) # save as WAV file
| [
"simpleaudio.WaveObject.from_wave_file",
"wave.open",
"os.listdir",
"argparse.ArgumentParser",
"sounddevice.wait",
"os.path.join",
"os.path.splitext",
"os.path.split",
"os.path.isfile",
"scipy.io.wavfile.write"
] | [((1175, 1278), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (1198, 1278), False, 'import argparse\n'), ((1541, 1564), 'os.listdir', 'os.listdir', (['ARGS.dir_in'], {}), '(ARGS.dir_in)\n', (1551, 1564), False, 'import os\n'), ((938, 962), 'wave.open', 'wave.open', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (947, 962), False, 'import wave\n'), ((1581, 1616), 'os.path.join', 'os.path.join', (['ARGS.dir_in', 'filename'], {}), '(ARGS.dir_in, filename)\n', (1593, 1616), False, 'import os\n'), ((2080, 2107), 'os.path.split', 'os.path.split', (['file_speaker'], {}), '(file_speaker)\n', (2093, 2107), False, 'import os\n'), ((2264, 2306), 'simpleaudio.WaveObject.from_wave_file', 'sa.WaveObject.from_wave_file', (['file_speaker'], {}), '(file_speaker)\n', (2292, 2306), True, 'import simpleaudio as sa\n'), ((2710, 2719), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (2717, 2719), True, 'import sounddevice as sd\n'), ((2764, 2799), 'scipy.io.wavfile.write', 'write', (['file_listener', 'fs', 'recording'], {}), '(file_listener, fs, recording)\n', (2769, 2799), False, 'from scipy.io.wavfile import write\n'), ((1675, 1695), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1689, 1695), False, 'import os\n'), ((1772, 1794), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (1788, 1794), False, 'import os\n')] |
from django.http import HttpResponse#, JsonResponse
from django.db.models import Q
from django.utils.translation import gettext as _
from .models import Providers
from users.models import Users
import json
def my_providers_autocomplete(request):
if request.is_ajax():
query = request.GET.get('term', '')
user = Users.objects.get(pk=request.user)
query = \
Q(created_by_user=user) & Q(dropped=False) & \
(Q(name__icontains=query) | \
Q(rfc__icontains=query))
providers = Providers.objects.filter(query)
results = []
for provider in providers:
lbl=provider.name+' [' + _('RFC') + '='+provider.rfc+']'
results.append(lbl)
data = json.dumps(results)
mimetype = "application/json"
return HttpResponse(data, mimetype) | [
"django.utils.translation.gettext",
"users.models.Users.objects.get",
"django.http.HttpResponse",
"json.dumps",
"django.db.models.Q"
] | [((762, 790), 'django.http.HttpResponse', 'HttpResponse', (['data', 'mimetype'], {}), '(data, mimetype)\n', (774, 790), False, 'from django.http import HttpResponse\n'), ((339, 373), 'users.models.Users.objects.get', 'Users.objects.get', ([], {'pk': 'request.user'}), '(pk=request.user)\n', (356, 373), False, 'from users.models import Users\n'), ((697, 716), 'json.dumps', 'json.dumps', (['results'], {}), '(results)\n', (707, 716), False, 'import json\n'), ((391, 414), 'django.db.models.Q', 'Q', ([], {'created_by_user': 'user'}), '(created_by_user=user)\n', (392, 414), False, 'from django.db.models import Q\n'), ((417, 433), 'django.db.models.Q', 'Q', ([], {'dropped': '(False)'}), '(dropped=False)\n', (418, 433), False, 'from django.db.models import Q\n'), ((443, 467), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'query'}), '(name__icontains=query)\n', (444, 467), False, 'from django.db.models import Q\n'), ((476, 499), 'django.db.models.Q', 'Q', ([], {'rfc__icontains': 'query'}), '(rfc__icontains=query)\n', (477, 499), False, 'from django.db.models import Q\n'), ((629, 637), 'django.utils.translation.gettext', '_', (['"""RFC"""'], {}), "('RFC')\n", (630, 637), True, 'from django.utils.translation import gettext as _\n')] |
from setuptools import setup, find_packages
setup(
name='rss-checker',
version='0.2.3',
author='<NAME>',
author_email='<EMAIL>',
install_requires=[
'requests>2,<3',
'click>6,<7',
'dateparser>0.5,<1',
'pyaml',
],
packages=find_packages(),
entry_points={
'console_scripts': ['rss-checker=rss_checker.main:check',
'rss-checkd=rss_checker.main:checkd'],
}
)
| [
"setuptools.find_packages"
] | [((282, 297), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (295, 297), False, 'from setuptools import setup, find_packages\n')] |
import sys
import rospy
from tm_api_msgs.srv import record,move_to_record,delete_point,start_work,get_record
from tm_api_msgs.msg import robot_status
from threading import Thread
import time
import os
sys.path.insert (0,os.path.dirname(os.path.abspath(__file__)))
from utilites import Utilites
class SendRecieveFromService:
@staticmethod
def ros_pytho_client(aqqestName,srvMsgName):
rospy.wait_for_service(aqqestName)
returnFunction = rospy.ServiceProxy(aqqestName,srvMsgName)
return returnFunction
@staticmethod
def send_record_service(aqqestName,positionName,jointPositionRad,jointPositionDeg,cartesianPosition):
py_fun = SendRecieveFromService.ros_pytho_client(aqqestName,record)
res = py_fun(positionName,jointPositionRad,jointPositionDeg,cartesianPosition)
return res.success
@staticmethod
def send_move_to_record_service(aqqestName,positionName, isPlan):
py_fun = SendRecieveFromService.ros_pytho_client(aqqestName,move_to_record)
res = py_fun(positionName,isPlan)
return res.success
@staticmethod
def send_delete_to_record_service(positionName,aqqestName):
py_fun = SendRecieveFromService.ros_pytho_client(aqqestName,delete_point)
res = py_fun(positionName)
return res.success
class TmMoveApiPython:
def __init__(self,isTest):
self.isTest = isTest
def __enter__(self):
self.currentCartesianPosition = None
self.currentJointPositionRad = None
self.currentJointPositionDeg = None
start_work_py_function = SendRecieveFromService.ros_pytho_client("start_work",start_work)
rospy.wait_for_service("start_work")
if(self.isTest):
start_work_py_function(True)
else:
start_work_py_function(False)
self.create_listener()
def __exit__(self, exc_type, exc_val, exc_tb):
print("TmMoveApiPython __exit__ is called ")
self.cartesianSub.unregister()
self.jointDegSub.unregister()
self.jointRadSub.unregister()
end_work_py_function = SendRecieveFromService.ros_pytho_client("end_work",start_work)
end_work_py_function()
def check_data_correct(self,data):
if(len(set(data))==1 and data[0] == -999):
print("it seems not initial before!")
return None
return data
def get_current_cartesian_position(self):
while(self.currentCartesianPosition == None):
time.sleep(0.1)
return self.check_data_correct(self.currentCartesianPosition)
def get_current_joint_position_rad(self):
while(self.currentCartesianPosition == None):
time.sleep(0.1)
return self.check_data_correct(self.currentJointPositionRad)
def get_current_joint_position_deg(self):
while(self.currentCartesianPosition == None):
time.sleep(0.1)
return self.check_data_correct(self.currentJointPositionDeg)
def cartesian_position_callback(self,data):
self.currentCartesianPosition = data.currentCartesianPosition
def joint_position_deg_callback(self,data):
self.currentJointPositionDeg = data.currentJointPositionDeg
def joint_position_rad_callback(self,data):
self.currentJointPositionRad = data.currentJointPositionRad
def create_listener(self):
rospy.init_node('tm_move_python', anonymous=True)
self.cartesianSub = rospy.Subscriber("currentCartesianPosition", robot_status, self.cartesian_position_callback)
self.jointDegSub = rospy.Subscriber("currentJointPositionDeg", robot_status, self.joint_position_deg_callback)
self.jointRadSub = rospy.Subscriber("currentJointPositionRad", robot_status, self.joint_position_rad_callback)
#print("before spin")
#rospy.spin()
#print("after spin")
def except_reaction(self,e):
print("Service call failed: %s",e)
return False
def record_position_joint(self,positionName,jointPosition):
try:
print("send record_position_joint to service")
return SendRecieveFromService.send_record_service("record_position_joint",positionName,jointPosition,None,None)
except rospy.ServiceException as e:
return self.except_reaction(e)
def record_postion_joint_degree(self,positionName,jointPosition):
try:
print("send record_postion_joint_degree to service")
return SendRecieveFromService.send_record_service("record_postion_joint_degree",positionName,None,jointPosition,None)
except rospy.ServiceException as e:
return self.except_reaction(e)
def record_position_cartesian(self,positionName,cartesianPosition):
try:
print("send record_position_cartesian to service")
return SendRecieveFromService.send_record_service("record_position_cartesian",positionName,None,None,cartesianPosition)
except rospy.ServiceException as e:
return self.except_reaction(e)
def move_recorded_poisiton(self,positionName,isPlan):
try:
print("send move_recorded_poisiton to service")
return SendRecieveFromService.send_move_to_record_service("move_recorded_poisiton",positionName,isPlan)
except rospy.ServiceException as e:
return self.except_reaction(e)
def move_recorded_joint_poisiton(self,positionName,isPlan):
try:
print("send move_recorded_joint_poisiton to service")
return SendRecieveFromService.send_move_to_record_service("move_recorded_joint_poisiton",positionName,isPlan)
except rospy.ServiceException as e:
return self.except_reaction(e)
def move_recorded_cartesian_poisiton(self,positionName,isPlan):
try:
print("send move_recorded_cartesian_poisiton to service")
return SendRecieveFromService.send_move_to_record_service("move_recorded_cartesian_poisiton",positionName,isPlan)
except rospy.ServiceException as e:
return self.except_reaction(e)
def checkAllLengthIsCorrect(self,compareVariable,*args):
length = len(compareVariable)
for var in args:
if(length == len(var)):
continue
else:
return False
return True
def get_all_recorded_position(self):
isSuccess,allJointPosition = self.get_recorded_joint_position_deg()
if(isSuccess):
isSuccess,allCartesianPosition = self.get_recorded_cartesian_position()
return isSuccess,allJointPosition,allCartesianPosition
else:
return False,None,None
def get_recorded_joint_position_deg(self):
try:
print("send get_recorded_joint_position_deg to service")
py_fun = SendRecieveFromService.ros_pytho_client("get_recorded_joint_position",get_record)
res = py_fun()
allJointPosition = {}
if(self.checkAllLengthIsCorrect(res.recorded_joint_positions_name,res.j1,res.j2,res.j3,res.j4,res.j5,res.j6)):
for i in range(len(res.recorded_joint_positions_name)):
joints = [res.j1[i],res.j2[i],res.j3[i],res.j4[i],res.j5[i],res.j6[i]]
jointsDeg = [ Utilites.rad_to_deg(rad) for rad in joints]
allJointPosition[res.recorded_joint_positions_name[i]] = jointsDeg
return True,allJointPosition
else:
print("the lenghth of all joint and joint name is not the same")
return False,allJointPosition
except rospy.ServiceException as e:
return self.except_reaction(e)
def get_recorded_cartesian_position(self):
try:
print("send get_recorded_cartesian_position to service")
py_fun = SendRecieveFromService.ros_pytho_client("get_recorded_cartesian_position",get_record)
res = py_fun()
allCartesianPosition = {}
if(self.checkAllLengthIsCorrect(res.recorded_cartesian_positions_name,res.c1,res.c2,res.c3,res.c4,res.c5,res.c6,res.c7)):
for i in range(len(res.recorded_cartesian_positions_name)):
cartesianPoint = [res.c1[i],res.c2[i],res.c3[i],res.c4[i],res.c5[i],res.c6[i],res.c7[i]]
allCartesianPosition[res.recorded_cartesian_positions_name[i]] = cartesianPoint
return True,allCartesianPosition
else:
print("the lenghth of all joint and joint name is not the same")
return False,allCartesianPosition
except rospy.ServiceException as e:
return self.except_reaction(e)
def delete_recorded_poisiton(self,positionName):
try:
print("send delete_recorded_poisiton to service")
return SendRecieveFromService.send_delete_to_record_service(positionName,"delete_recorded_poisiton")
except rospy.ServiceException as e:
return self.except_reaction(e)
def delete_recorded_joint_poisiton(self,positionName):
try:
print("send delete_recorded_joint_poisiton to service")
return SendRecieveFromService.send_delete_to_record_service(positionName,"delete_recorded_joint_poisiton")
except rospy.ServiceException as e:
return self.except_reaction(e)
def delete_recorded_cartesian_poisiton(self,positionName):
try:
print("send delete_recorded_cartesian_poisiton to service")
return SendRecieveFromService.send_delete_to_record_service(positionName,"delete_recorded_cartesian_poisiton")
except rospy.ServiceException as e:
return self.except_reaction(e)
def delete_all_recorded_position(self):
try:
print("send delete_all_recorded_position to service")
SendRecieveFromService.send_delete_to_record_service(None,"delete_all_recorded_position")
return True
except rospy.ServiceException as e:
return self.except_reaction(e)
def delete_all_recorded_joint_poisiton(self):
try:
print("send delete_all_recorded_joint_poisiton to service")
SendRecieveFromService.send_delete_to_record_service(None,"delete_all_recorded_joint_poisiton")
return True
except rospy.ServiceException as e:
return self.except_reaction(e)
def delete_all_recorded_cartesian_poisiton(self):
try:
print("send delete_all_recorded_cartesian_poisiton to service")
SendRecieveFromService.send_delete_to_record_service(None,"delete_all_recorded_cartesian_poisiton")
except rospy.ServiceException as e:
return self.except_reaction(e)
| [
"utilites.Utilites.rad_to_deg",
"rospy.init_node",
"rospy.ServiceProxy",
"time.sleep",
"os.path.abspath",
"rospy.Subscriber",
"rospy.wait_for_service"
] | [((236, 261), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (251, 261), False, 'import os\n'), ((401, 435), 'rospy.wait_for_service', 'rospy.wait_for_service', (['aqqestName'], {}), '(aqqestName)\n', (423, 435), False, 'import rospy\n'), ((461, 503), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['aqqestName', 'srvMsgName'], {}), '(aqqestName, srvMsgName)\n', (479, 503), False, 'import rospy\n'), ((1687, 1723), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""start_work"""'], {}), "('start_work')\n", (1709, 1723), False, 'import rospy\n'), ((3420, 3469), 'rospy.init_node', 'rospy.init_node', (['"""tm_move_python"""'], {'anonymous': '(True)'}), "('tm_move_python', anonymous=True)\n", (3435, 3469), False, 'import rospy\n'), ((3498, 3595), 'rospy.Subscriber', 'rospy.Subscriber', (['"""currentCartesianPosition"""', 'robot_status', 'self.cartesian_position_callback'], {}), "('currentCartesianPosition', robot_status, self.\n cartesian_position_callback)\n", (3514, 3595), False, 'import rospy\n'), ((3618, 3714), 'rospy.Subscriber', 'rospy.Subscriber', (['"""currentJointPositionDeg"""', 'robot_status', 'self.joint_position_deg_callback'], {}), "('currentJointPositionDeg', robot_status, self.\n joint_position_deg_callback)\n", (3634, 3714), False, 'import rospy\n'), ((3737, 3833), 'rospy.Subscriber', 'rospy.Subscriber', (['"""currentJointPositionRad"""', 'robot_status', 'self.joint_position_rad_callback'], {}), "('currentJointPositionRad', robot_status, self.\n joint_position_rad_callback)\n", (3753, 3833), False, 'import rospy\n'), ((2536, 2551), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2546, 2551), False, 'import time\n'), ((2735, 2750), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2745, 2750), False, 'import time\n'), ((2933, 2948), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2943, 2948), False, 'import time\n'), ((7355, 7379), 'utilites.Utilites.rad_to_deg', 'Utilites.rad_to_deg', (['rad'], {}), '(rad)\n', (7374, 7379), False, 'from utilites import Utilites\n')] |
from functools import update_wrapper
# Django REST framework
@classmethod
def as_view(cls, **initkwargs):
if isinstance(getattr(cls, 'queryset', None), models.query.QuerySet):
def force_evaluation():
raise RuntimeError(
'Do not evaluate the `.queryset` attribute directly, '
'as the result will be cached and reused between requests. '
'Use `.all()` or call `.get_queryset()` instead.'
)
cls.queryset._fetch_all = force_evaluation
view = super(APIView, cls).as_view(**initkwargs)
view.cls = cls
view.initkwargs = initkwargs
# Note: session based authentication is explicitly CSRF validated,
# all other authentication is CSRF exempt.
return csrf_exempt(view)
# Django
@classonlymethod
def as_view(cls, **initkwargs):
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
return self.dispatch(request, *args, **kwargs)
view.view_class = cls
view.view_initkwargs = initkwargs
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
"""
所以这里的update_wrapper并不是实现功能所必须的,而是为了兼容。
django中的一些方法实现中通过内省动态的的处理。而通过decorator/partial对原函数进行处理会覆盖wrapped函数的内省状态。
django应该有一些地方对as_view,和dispatch的内省信有依赖。在django rest framework中会override 或者 overwrite, as_view和dispatch方法
view = super(APIView, cls).as_view(**initkwargs)
继续使用父类的构造方法
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
在子类中重写了父类的方法,却使用父类的函数签名。
"""
| [
"functools.update_wrapper"
] | [((1786, 1823), 'functools.update_wrapper', 'update_wrapper', (['view', 'cls'], {'updated': '()'}), '(view, cls, updated=())\n', (1800, 1823), False, 'from functools import update_wrapper\n'), ((1914, 1961), 'functools.update_wrapper', 'update_wrapper', (['view', 'cls.dispatch'], {'assigned': '()'}), '(view, cls.dispatch, assigned=())\n', (1928, 1961), False, 'from functools import update_wrapper\n')] |
def timestamp2datestring(timestamp,format="%a %b %d %X %Z %Y"):
import time
return time.strftime(format, time.gmtime(timestamp/1000.)) | [
"time.gmtime"
] | [((114, 145), 'time.gmtime', 'time.gmtime', (['(timestamp / 1000.0)'], {}), '(timestamp / 1000.0)\n', (125, 145), False, 'import time\n')] |
import numpy as np
import random
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
import queue
training_files = ["bisecting.txt","blobs.txt","moons.txt"]
INPUT_FILE="blobs.txt"
ITERATIONS=50
#Define label for differnt point group
UNASSIGNED = 0
CORE_PT = -1
BORDER_PT = -2
dataset = []
noOfClusters = 0
def read_dataset(INPUT_FILE):
"""
Reading dataset
"""
global dataset
f = open(INPUT_FILE, "r")
lines = f.readlines()
for i in range(len(lines)):
data = lines[i].split()
dataset.append(list(map(float, data)))
print("Total dataset = {} points".format(len(dataset)))
f.close()
pass
def find_nearest_neighbour(k):
"""
Nearest neighbour
"""
global dataset
nearest_neighbors = NearestNeighbors(n_neighbors=k)
nearest_neighbors.fit(dataset)
distances, indices = nearest_neighbors.kneighbors(dataset)
distances = np.sort(distances, axis=0)[:, 1]
# print(distances, indices)
plt.grid()
plt.plot(distances)
# plt.savefig(INPUT_FILE+'_Nearest_Neighbour.png')
plt.show()
def plotClusters(dataset, labels, noOfClusters, file):
total_points = len(dataset)
print("Plotting for {} points".format(total_points))
plt.figure()
# Color array for clusters
scatterColors = ["blue","green","red","cyan","brown","indigo", "pink", "royalblue",
"orange","yellow","black","olive", "gold", "orangered", "skyblue", "teal" ]
for i in range(noOfClusters):
if (i==0):
#Plot all noise point as blue
color='blue'
else:
color = scatterColors[i % len(scatterColors)]
x = []; y = []
for j in range(total_points):
if labels[j] == i:
x.append(dataset[j][0])
y.append(dataset[j][1])
plt.scatter(x, y, c=color, alpha=1, marker='.')
plt.grid()
plt.savefig(file)
# plt.show()
def euclidean_dist(point1, point2):
"""
Euclid distance function
"""
x1 = point1[0]
x2 = point2[0]
y1 = point1[1]
y2 = point2[1]
# create the points
p1 = (x1 - x2)**2
p2 = (y1 - y2)**2
return np.sqrt(p1 + p2)
def neighbor_points(dataset, pointIdx, radius):
'''
find all neigbor points in radius from a given point.
'''
points = []
for i in range(len(dataset)):
# Calculating distance btn points
if euclidean_dist(dataset[i], dataset[pointIdx]) <= radius:
points.append(i)
return points
def dbscan(data, Eps, MinPt):
'''
DBSCAN Algorithm
'''
global dataset, noOfClusters
#initilize all pointlable to unassign
pointlabel = [UNASSIGNED] * len(data)
neighbourhood_arr = []
#initilize list for core/noncore point
core_pts=[]
non_core_pts=[]
#Find all neigbor for all point
for i in range(len(data)):
neighbourhood_arr.append(neighbor_points(dataset,i,Eps))
#Find all core point, edgepoint and noise
for i in range(len(neighbourhood_arr)):
# A point is a core point if it has more than a specified number of points (MinPts) within Eps
if (len(neighbourhood_arr[i]) >= MinPt):
pointlabel[i] = CORE_PT
core_pts.append(i)
else:
non_core_pts.append(i)
for i in non_core_pts:
for j in neighbourhood_arr[i]:
if j in core_pts:
pointlabel[i] = BORDER_PT
break
#start assigning point to cluster
cluster_no = 1
# Put all neigbor core point in queue and find neigboir's neigbor
for i in range(len(pointlabel)):
q = queue.Queue()
if (pointlabel[i] == CORE_PT):
pointlabel[i] = cluster_no
for j in neighbourhood_arr[i]:
if(pointlabel[j] == CORE_PT):
q.put(j)
pointlabel[j]= cluster_no
elif(pointlabel[j] == BORDER_PT):
pointlabel[j] = cluster_no
# checking queue
while not q.empty():
neighbors = neighbourhood_arr[q.get()]
for n in neighbors:
if (pointlabel[n] == CORE_PT):
pointlabel[n]=cluster_no
q.put(n)
if (pointlabel[n] == BORDER_PT):
pointlabel[n]=cluster_no
cluster_no = cluster_no + 1
noOfClusters = cluster_no
return pointlabel
def DBSCAN_start(eps, minpts):
"""
docstring
"""
global dataset, noOfClusters
print("Starting DBSCAN for EPS: {} | Minpts: {}".format(eps, minpts))
labels = dbscan(dataset,eps,minpts)
plotClusters(dataset, labels, noOfClusters, INPUT_FILE+'_DBSCAN.png')
outliers = labels.count(0)
print("No. of Clusters: {}".format(noOfClusters-1))
print("Outliers: {}".format(outliers))
return noOfClusters - 1
def calc_distance(X1, X2):
return (sum((X1 - X2)**2))**0.5
def assign_clusters(centroids, X):
assigned_cluster = []
for i in X:
distance=[]
for j in centroids:
distance.append(calc_distance(i, j))
# print(distance)
# print(np.argmin(distance))
# print("--------------------------------")
assigned_cluster.append(np.argmin(distance)) # idx of minimum element
# print(assigned_cluster)
return assigned_cluster
def calc_centroids(clusters_lables, k):
global dataset
points_per_cluster = [[] for _ in range(k)]
for i in range(len(clusters_lables)):
points_per_cluster[clusters_lables[i]].append(dataset[i])
centroids = []
for i in range(k):
centroids.append(np.mean(points_per_cluster[i], axis=0))
return centroids
def match_centroids(c_new, c_old):
return (np.array(c_new) == np.array(c_old)).all()
def k_means(k):
"""
K-Means clustering algorithm
"""
global dataset
print("Running k-Means for {} clusters..".format(k))
X = np.array(dataset)
init_centroids = random.sample(range(0, len(dataset)), k)
centroids, cluster_labels = [], []
for i in init_centroids:
centroids.append(dataset[i])
# converting to 2D - array
# centroids = np.array(centroids)
# get_centroids = assign_clusters(centroids, X)
prev_centroids = centroids.copy()
for i in range(ITERATIONS):
print("For iteration {}: ".format(i))
prev_centroids = np.array(prev_centroids)
cluster_labels = assign_clusters(prev_centroids, X)
centroids = calc_centroids(cluster_labels, k)
# print(prev_centroids)
print(centroids)
if match_centroids(centroids,prev_centroids):
print("Converged ...")
break
else:
prev_centroids = centroids.copy()
plotClusters(dataset, cluster_labels, k, INPUT_FILE+'_k_means.png')
if __name__ == "__main__":
print("Choose Training file...")
for i, item in enumerate(training_files, start=1):
print(i,item)
choice = int(input())
INPUT_FILE=training_files[choice-1]
read_dataset(INPUT_FILE)
print("1. Plot k Nearest Neighbours\n2. Run Clustering Algorithms")
choice = int(input())
if choice == 1:
print("Enter the value of k:")
k = int(input())
find_nearest_neighbour(k)
else:
print("Enter EPS value:")
eps = float(input())
print("Enter Minpts value:")
minpts = int(input())
k = DBSCAN_start(eps, minpts)
k_means(k)
| [
"numpy.mean",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"numpy.sort",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.figure",
"sklearn.neighbors.NearestNeighbors",
"matplotlib.pyplot.scatter",
"numpy.argmin",
"queue.Queue",
"matplotlib.pyplot.show"
] | [((730, 761), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'k'}), '(n_neighbors=k)\n', (746, 761), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((930, 940), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (938, 940), True, 'import matplotlib.pyplot as plt\n'), ((942, 961), 'matplotlib.pyplot.plot', 'plt.plot', (['distances'], {}), '(distances)\n', (950, 961), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1025), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1023, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1178), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1176, 1178), True, 'import matplotlib.pyplot as plt\n'), ((1713, 1723), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1721, 1723), True, 'import matplotlib.pyplot as plt\n'), ((1725, 1742), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {}), '(file)\n', (1736, 1742), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1978), 'numpy.sqrt', 'np.sqrt', (['(p1 + p2)'], {}), '(p1 + p2)\n', (1969, 1978), True, 'import numpy as np\n'), ((5253, 5270), 'numpy.array', 'np.array', (['dataset'], {}), '(dataset)\n', (5261, 5270), True, 'import numpy as np\n'), ((867, 893), 'numpy.sort', 'np.sort', (['distances'], {'axis': '(0)'}), '(distances, axis=0)\n', (874, 893), True, 'import numpy as np\n'), ((1662, 1709), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'color', 'alpha': '(1)', 'marker': '"""."""'}), "(x, y, c=color, alpha=1, marker='.')\n", (1673, 1709), True, 'import matplotlib.pyplot as plt\n'), ((3246, 3259), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (3257, 3259), False, 'import queue\n'), ((5663, 5687), 'numpy.array', 'np.array', (['prev_centroids'], {}), '(prev_centroids)\n', (5671, 5687), True, 'import numpy as np\n'), ((4615, 4634), 'numpy.argmin', 'np.argmin', (['distance'], {}), '(distance)\n', (4624, 4634), True, 'import numpy as np\n'), ((4973, 5011), 'numpy.mean', 'np.mean', (['points_per_cluster[i]'], {'axis': '(0)'}), '(points_per_cluster[i], axis=0)\n', (4980, 5011), True, 'import numpy as np\n'), ((5077, 5092), 'numpy.array', 'np.array', (['c_new'], {}), '(c_new)\n', (5085, 5092), True, 'import numpy as np\n'), ((5096, 5111), 'numpy.array', 'np.array', (['c_old'], {}), '(c_old)\n', (5104, 5111), True, 'import numpy as np\n')] |
import os
import tempfile
from time import time
import pydicom
import datetime
from pydicom.dataset import Dataset, FileDataset
import numpy as np
class dcm_loader(object):
"""
"""
def __init__(self):
pass
def load_vol(self, path):
"""
path : patient data path
returns numpy array of patient data
"""
self.patient = pydicom.dcmread(path)
return self.patient.pixel_array
def write_vol(self, path, vol):
"""
path : path to write the data
vol : modifient volume
return: True or False based on saving of volume
"""
suffix = '.dcm'
filename_little_endian = tempfile.NamedTemporaryFile(suffix=suffix).name
filename_big_endian = tempfile.NamedTemporaryFile(suffix=suffix).name
file_meta = Dataset()
ds = FileDataset(filename_little_endian, {},
file_meta=file_meta, preamble=b"\0" * 128)
ds.PatientName = self.patient.PatientName
ds.PatientID = self.patient.PatientID
ds.is_little_endian = self.patient.is_little_endian
ds.is_implicit_VR = self.patient.is_implicit_VR
# Set creation date/time
dt = datetime.datetime.now()
ds.ContentDate = dt.strftime('%Y%m%d')
timeStr = dt.strftime('%H%M%S.%f') # long format with micro seconds
ds.ContentTime = timeStr
ds.PixelData = vol.tostring()
try:
ds.save_as(filename_little_endian)
return True
except:
return False
| [
"pydicom.dataset.FileDataset",
"pydicom.dcmread",
"datetime.datetime.now",
"tempfile.NamedTemporaryFile",
"pydicom.dataset.Dataset"
] | [((392, 413), 'pydicom.dcmread', 'pydicom.dcmread', (['path'], {}), '(path)\n', (407, 413), False, 'import pydicom\n'), ((857, 866), 'pydicom.dataset.Dataset', 'Dataset', ([], {}), '()\n', (864, 866), False, 'from pydicom.dataset import Dataset, FileDataset\n'), ((880, 969), 'pydicom.dataset.FileDataset', 'FileDataset', (['filename_little_endian', '{}'], {'file_meta': 'file_meta', 'preamble': "(b'\\x00' * 128)"}), "(filename_little_endian, {}, file_meta=file_meta, preamble=\n b'\\x00' * 128)\n", (891, 969), False, 'from pydicom.dataset import Dataset, FileDataset\n'), ((1239, 1262), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1260, 1262), False, 'import datetime\n'), ((711, 753), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': 'suffix'}), '(suffix=suffix)\n', (738, 753), False, 'import tempfile\n'), ((789, 831), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': 'suffix'}), '(suffix=suffix)\n', (816, 831), False, 'import tempfile\n')] |
import os
import re
import string
punc = str()
def fileInspector(parent_directory, isTrain=False, isSpam=False):
'''
Retrieves the name of files in the directory
e.g. if path of the <parent_directory> directory is:
- ~/Desktop/<parent_directory>, then it checks ~/Desktop
'''
if isTrain:
if isSpam:
flist = sorted([ f for f in os.listdir( f"{parent_directory}/dataset/training/spam" ) if "msg" in f], key=str.lower)
else:
flist = sorted([ f for f in os.listdir( f"{parent_directory}/dataset/training/legitimate" ) if "msg" in f], key=str.lower)
else:
if isSpam:
flist = sorted([ f for f in os.listdir( f"{parent_directory}/dataset/test/spam" ) if "msg" in f], key=str.lower)
else:
flist = sorted([ f for f in os.listdir( f"{parent_directory}/dataset/test/legitimate" ) if "msg" in f], key=str.lower)
return flist
def fileRead(location, fname):
'''
Retrieves the content of the given file name in the given directory
'''
with open("{}/{}".format(location, fname), "r", encoding="latin-1") as f:
return f.read()
def setPunc(isReset=False):
'''
- Creates punctuations to be removed in the string
clearing operations, except it holds dollar sign
since dollar sign seems to be exist very much in
spam messages.
'''
global punc
if isReset:
punc = string.punctuation
else:
punc = string.punctuation
dollar_sign_pos = punc.index("$")
punc = punc[0:dollar_sign_pos] + punc[(dollar_sign_pos+1):]
def clearPunc(_string):
'''
- Clears any punctuation if necessary.
'''
global punc
setPunc()
non_punc_string = str(_string).translate(str.maketrans(punc, " "*len(punc)))
setPunc(isReset=True)
return non_punc_string
def processed_string(_string):
'''
- Splits the given string with respect to whitespace
and make them lowercase while stripping any remaining
whitespaces.
'''
current_form = clearPunc(_string)
tokens = str(current_form).split()
trimmed_tokens = []
for token in tokens:
trimmed_tokens.append(str(token).strip().lower())
return trimmed_tokens
| [
"os.listdir"
] | [((373, 428), 'os.listdir', 'os.listdir', (['f"""{parent_directory}/dataset/training/spam"""'], {}), "(f'{parent_directory}/dataset/training/spam')\n", (383, 428), False, 'import os\n'), ((516, 577), 'os.listdir', 'os.listdir', (['f"""{parent_directory}/dataset/training/legitimate"""'], {}), "(f'{parent_directory}/dataset/training/legitimate')\n", (526, 577), False, 'import os\n'), ((680, 731), 'os.listdir', 'os.listdir', (['f"""{parent_directory}/dataset/test/spam"""'], {}), "(f'{parent_directory}/dataset/test/spam')\n", (690, 731), False, 'import os\n'), ((819, 876), 'os.listdir', 'os.listdir', (['f"""{parent_directory}/dataset/test/legitimate"""'], {}), "(f'{parent_directory}/dataset/test/legitimate')\n", (829, 876), False, 'import os\n')] |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
TODO: get this to work with python3
"""
import logging, copy
log = logging.getLogger(__name__)
import numpy as np, math
try:
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Circle, Ellipse, PathPatch
import matplotlib.lines as mlines
import matplotlib.path as mpath
except ImportError:
plt = None
pass
def ellipse_closest_approach_to_point( ex, ez, _c ):
"""
Ellipse natural frame, semi axes ex, ez. _c coordinates of point
:param ex: semi-major axis
:param ez: semi-major axis
:param c: xz coordinates of point
:return p: point on ellipse of closest approach to center of torus circle
Closest approach on the bulb ellipse to the center of torus "circle"
is a good point to target for hype/cone/whatever neck,
as are aiming to eliminate the cylinder neck anyhow
equation of RHS torus circle, in ellipse frame
(x - R)^2 + (z - z0)^2 - r^2 = 0
equation of ellipse
(x/ex)^2 + (z/ez)^2 - 1 = 0
"""
c = np.asarray( _c ) # center of RHS torus circle
assert c.shape == (2,)
t = np.linspace( 0, 2*np.pi, 1000000 )
e = np.zeros( [len(t), 2] )
e[:,0] = ex*np.cos(t)
e[:,1] = ez*np.sin(t) # 1M parametric points on the ellipse
p = e[np.sum(np.square(e-c), 1).argmin()] # point on ellipse closest to c
return p
def ellipse_points( xy=[0,-5.], ex=254., ez=190., n=1000 ):
"""
:param ec: center of ellipse
:param ex: xy radius of ellipse
:param ez: z radius of ellipse
:param n: number of points
:return e: array of shape (n,2) of points on the ellipse
"""
t = np.linspace( 0, 2*np.pi, n )
e = np.zeros([len(t), 2])
e[:,0] = ex*np.cos(t) + xy[0]
e[:,1] = ez*np.sin(t) + xy[1]
return e
def circle_points( xy=[0,0], tr=80, n=1000 ):
"""
:param tc: center of circle
:param tr: radius of circle
:param n: number of points
:return c: array of shape (n,2) of points on the circle
"""
t = np.linspace( 0, 2*np.pi, n )
c = np.zeros([len(t), 2])
c[:,0] = tr*np.cos(t) + xy[0]
c[:,1] = tr*np.sin(t) + xy[1]
return c
def points_inside_circle(points, center, radius):
"""
:param points: (n,2) array of points
:param center: (2,) coordinates of circle center
:param radius:
:return mask: boolean array of dimension (n,2) indicating if points are within the circle
"""
return np.sqrt(np.sum(np.square(points-center),1)) - radius < 0.
def ellipse_points_inside_circle():
tc = np.array([torus_x,torus_z])
tr = m4_torus_r
e = ellipse_points( xy=[0,-5.], ex=254., ez=190., n=1000000 )
class X(object):
def __init__(self, root):
self.root = root
def __repr__(self):
return "\n".join( map(repr, self.constituents()))
def find(self, shape):
return self.root.find(shape)
def find_one(self, shape):
ff = self.root.find(shape)
assert len(ff) == 1
return ff[0]
def constituents(self):
return self.root.constituents()
def replacement_cons(self):
"""
"""
i = self.find_one("STorus")
r = i.param[0]
R = i.param[1]
d = self.find_one("SEllipsoid")
ex = d.param[0]
ez = d.param[1]
print("r %s R %s ex %s ez %s " % (r,R,ex,ez))
print(" SEllipsoid d.xy %s " % repr(d.xy) )
print(" STorus i.xy %s " % repr(i.xy) )
z0 = i.xy[1] # torus z-plane in ellipsoid frame
p = ellipse_closest_approach_to_point( ex, ez, [R,z0] ) # [R,z0] is center of torus circle
pr, pz = p # at torus/ellipse closest point : no guarantee of intersection
print(" ellipse closest approach to torus %s " % repr(p) )
r2 = pr
r1 = R - r
mz = (z0 + pz)/2. # mid-z cone coordinate (ellipsoid frame)
hz = (pz - z0)/2. # cons half height
f = SCons( "f", [r1,r2,hz] )
B = np.array( [0, mz] )
print(" replacment SCons %s offset %s " % (repr(f),repr(B)))
return f, B
def spawn_rationalized(self):
"""
::
UnionSolid
/ \
Ellipsoid Subtraction
/ \
Tubs Torus
UnionSolid
/ \
Ellipsoid Cons
"""
name = self.__class__.__name__
x = copy.deepcopy(self)
# establish expectations for tree
e = x.find_one("SEllipsoid")
t = x.find_one("STorus")
ss = t.parent
assert ss is not None and ss.shape == "SSubtractionSolid"
us = ss.parent
assert us is not None and us.shape == "SUnionSolid"
assert us.left is not None and us.left == e and us.right == ss and ss.right == t
assert us.right is not None and us.right == ss
if name == "x018": # cathode vacuum cap
assert x.root.shape == "SIntersectionSolid"
x.root = e
e.parent = None
elif name == "x019": # remainder vacuum
assert x.root.shape == "SSubtractionSolid"
left = x.root.left
assert left.shape == "SUnionSolid"
left.parent = None
x.root = left
else:
pass
pass
if name in ["x019","x020","x021"]:
# calculate the parameters of the replacement cons
cons, offset = x.replacement_cons()
# tree surgery : replacing the right child of UnionSolid
us.right = cons
cons.parent = us
cons.ltransform = offset
pass
return x
class Shape(object):
"""
matplotlib patches do not support deferred placement it seems,
so do that here
"""
KWA = dict(fill=False)
dtype = np.float64
PRIMITIVE = ["SEllipsoid","STubs","STorus", "SCons", "SHype", "SBox", "SPolycone"]
PRIMITIVE2 = ["Box", "Cylinder", "Tubs" ]
ALL_PRIMITIVE = PRIMITIVE + PRIMITIVE2
COMPOSITE = ["SUnionSolid", "SSubtractionSolid", "SIntersectionSolid"]
ALL = ALL_PRIMITIVE + COMPOSITE
def __repr__(self):
return "%s : %20s : %s : %s " % (
self.name,
self.shape,
repr(self.ltransform),
repr(self.param)
)
def __init__(self, name, param, **kwa ):
shape = self.__class__.__name__
if not shape in self.ALL:
log.error("shape class name %s is not in the list %s " % ( shape, str(self.ALL)))
pass
assert shape in self.ALL
primitive = shape in self.ALL_PRIMITIVE
composite = shape in self.COMPOSITE
d = self.KWA.copy()
d.update(kwa)
self.kwa = d
self.name = name
self.shape = shape
self.param = param
self.parent = None
self.ltransform = None
self.left = None
self.right = None
if composite:
left = self.param[0]
right = self.param[1]
right.ltransform = self.param[2]
left.parent = self
right.parent = self
self.left = left
self.right = right
pass
is_primitive = property(lambda self:self.left is None and self.right is None)
is_composite = property(lambda self:self.left is not None and self.right is not None)
def _get_xy(self):
"""
Assumes only translations, adds the node.ltransform obtained by following
parent links up the tree of shapes.
a Intersection
/ \
b m(D) Union m:Tubs
/ \
c k(C) Union Tubs
/ \
d f(B) Ellipsoid Subtraction
/ \
g(B) i(B+A) Tubs Torus
"""
xy = np.array([0,0], dtype=self.dtype )
node = self
while node is not None:
if node.ltransform is not None:
log.debug("adding ltransform %s " % node.ltransform)
xy += node.ltransform
pass
node = node.parent
pass
return xy
xy = property(_get_xy)
def constituents(self):
if self.is_primitive:
return [self]
else:
assert self.is_composite
cts = []
cts.extend( self.left.constituents() )
cts.extend( self.right.constituents() )
return cts
pass
def find(self, shape):
cts = self.constituents()
return filter( lambda ct:ct.shape == shape, cts )
def patches(self):
"""
Positioning is relying on self.xy of the primitives
with nothing being passed into composites.
For composites self.param[2] is the local right transform
"""
if self.shape == "SEllipsoid":
return self.make_ellipse( self.xy, self.param, **self.kwa )
elif self.shape == "STubs" or self.shape == "Tubs":
return self.make_rect( self.xy, self.param, **self.kwa)
elif self.shape == "STorus":
return self.make_torus( self.xy, self.param, **self.kwa)
elif self.shape == "SCons":
return self.make_cons( self.xy, self.param, **self.kwa)
elif self.shape == "SHype":
return self.make_hype( self.xy, self.param, **self.kwa)
elif self.shape == "SBox" or self.shape == "Box":
return self.make_rect( self.xy, self.param, **self.kwa)
elif self.shape == "SPolycone":
return self.make_polycone( self.xy, self.param, **self.kwa)
else:
if not self.is_composite:
log.error("shape :%s: not handled in patches()" % self.shape )
pass
assert self.is_composite
pts = []
pts.extend( self.left.patches() )
pts.extend( self.right.patches() )
return pts
pass
@classmethod
def create(cls, pt ):
pass
@classmethod
def make_rect(cls, xy , wh, **kwa ):
"""
:param xy: center of rectangle
:param wh: halfwidth, halfheight
"""
ll = ( xy[0] - wh[0], xy[1] - wh[1] )
return [Rectangle( ll, 2.*wh[0], 2.*wh[1], **kwa )]
@classmethod
def make_ellipse(cls, xy , param, **kwa ):
return [Ellipse( xy, width=2.*param[0], height=2.*param[1], **kwa )]
@classmethod
def make_circle(cls, xy , radius, **kwa ):
return [Circle( xy, radius=radius, **kwa )]
@classmethod
def make_torus(cls, xy, param, **kwa ):
r = param[0]
R = param[1]
pts = []
lhs = cls.make_circle( xy + [-R,0], r, **kwa)
rhs = cls.make_circle( xy + [+R,0], r, **kwa)
pts.extend(lhs)
pts.extend(rhs)
return pts
@classmethod
def make_pathpatch(cls, xy, vtxs, **kwa ):
"""see analytic/pathpatch.py"""
Path = mpath.Path
path_data = []
for i, vtx in enumerate(vtxs):
act = Path.MOVETO if i == 0 else Path.LINETO
path_data.append( (act, (vtx[0]+xy[0], vtx[1]+xy[1])) )
pass
path_data.append( (Path.CLOSEPOLY, (vtxs[0,0]+xy[0], vtxs[0,1]+xy[1])) )
pass
codes, verts = zip(*path_data)
path = Path(verts, codes)
patch = PathPatch(path, **kwa)
return [patch]
@classmethod
def make_cons(cls, xy , param, **kwa ):
"""
(-r2,z2) (r2,z2)
1---------2
\ /
0 ... 3
(-r1,z1) (r1,z1)
"""
r1 = param[0]
r2 = param[1]
hz = param[2]
z2 = hz + xy[1]
z1 = -hz + xy[1]
vtxs = np.zeros( (4,2) )
vtxs[0] = ( -r1, z1)
vtxs[1] = ( -r2, z2)
vtxs[2] = ( r2, z2)
vtxs[3] = ( r1, z1)
return cls.make_pathpatch( xy, vtxs, **kwa )
@classmethod
def make_polycone(cls, xy , param, **kwa ):
"""
"""
zp = param
nz = len(zp)
assert zp.shape == (nz, 3), zp
assert nz > 1 , zp
rmin = zp[:,0]
rmax = zp[:,1]
z = zp[:,2]
vtxs = np.zeros( (2*nz,2) )
for i in range(nz):
vtxs[i] = ( -rmax[i], z[i] )
vtxs[2*nz-i-1] = ( rmax[i], z[i] )
pass
log.debug(" xy : %r " % xy )
return cls.make_pathpatch( xy, vtxs, **kwa )
@classmethod
def make_hype(cls, xy , param, **kwa ):
"""
4----------- 5
3 6
2 7
1 8
0 ---------- 9
sqrt(x^2+y^2) = r0 * np.sqrt( (z/zf)^2 + 1 )
"""
r0 = param[0]
stereo = param[1]
hz = param[2]
zf = r0/np.tan(stereo)
r_ = lambda z:r0*np.sqrt( np.square(z/zf) + 1. )
nz = 20
zlhs = np.linspace( -hz, hz, nz )
zrhs = np.linspace( hz, -hz, nz )
vtxs = np.zeros( (nz*2,2) )
vtxs[:nz,0] = -r_(zlhs) + xy[0]
vtxs[:nz,1] = zlhs + xy[1]
vtxs[nz:,0] = r_(zrhs) + xy[0]
vtxs[nz:,1] = zrhs + xy[1]
return cls.make_pathpatch( xy, vtxs, **kwa )
class SEllipsoid(Shape):pass
class STubs(Shape):pass
class STorus(Shape):pass
class SCons(Shape):pass
class SHype(Shape):pass
class SPolycone(Shape):pass
class SUnionSolid(Shape):pass
class SSubtractionSolid(Shape):pass
class SIntersectionSolid(Shape):pass
if __name__ == '__main__':
pass
| [
"logging.getLogger",
"matplotlib.patches.Rectangle",
"numpy.tan",
"numpy.asarray",
"numpy.square",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"matplotlib.patches.PathPatch",
"numpy.cos",
"copy.deepcopy",
"numpy.sin",
"matplotlib.patches.Ellipse",
"matplotlib.patches.Circle"
] | [((792, 819), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (809, 819), False, 'import logging, copy\n'), ((1760, 1774), 'numpy.asarray', 'np.asarray', (['_c'], {}), '(_c)\n', (1770, 1774), True, 'import numpy as np, math\n'), ((1844, 1878), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1000000)'], {}), '(0, 2 * np.pi, 1000000)\n', (1855, 1878), True, 'import numpy as np, math\n'), ((2385, 2413), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n'], {}), '(0, 2 * np.pi, n)\n', (2396, 2413), True, 'import numpy as np, math\n'), ((2757, 2785), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n'], {}), '(0, 2 * np.pi, n)\n', (2768, 2785), True, 'import numpy as np, math\n'), ((3297, 3325), 'numpy.array', 'np.array', (['[torus_x, torus_z]'], {}), '([torus_x, torus_z])\n', (3305, 3325), True, 'import numpy as np, math\n'), ((1927, 1936), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1933, 1936), True, 'import numpy as np, math\n'), ((1954, 1963), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1960, 1963), True, 'import numpy as np, math\n'), ((4756, 4773), 'numpy.array', 'np.array', (['[0, mz]'], {}), '([0, mz])\n', (4764, 4773), True, 'import numpy as np, math\n'), ((5299, 5318), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (5312, 5318), False, 'import logging, copy\n'), ((9257, 9291), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'self.dtype'}), '([0, 0], dtype=self.dtype)\n', (9265, 9291), True, 'import numpy as np, math\n'), ((12799, 12821), 'matplotlib.patches.PathPatch', 'PathPatch', (['path'], {}), '(path, **kwa)\n', (12808, 12821), False, 'from matplotlib.patches import Rectangle, Circle, Ellipse, PathPatch\n'), ((13211, 13227), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {}), '((4, 2))\n', (13219, 13227), True, 'import numpy as np, math\n'), ((13682, 13703), 'numpy.zeros', 'np.zeros', (['(2 * nz, 2)'], {}), '((2 * nz, 2))\n', (13690, 13703), True, 'import numpy as np, math\n'), ((14403, 14427), 'numpy.linspace', 'np.linspace', (['(-hz)', 'hz', 'nz'], {}), '(-hz, hz, nz)\n', (14414, 14427), True, 'import numpy as np, math\n'), ((14445, 14469), 'numpy.linspace', 'np.linspace', (['hz', '(-hz)', 'nz'], {}), '(hz, -hz, nz)\n', (14456, 14469), True, 'import numpy as np, math\n'), ((14489, 14510), 'numpy.zeros', 'np.zeros', (['(nz * 2, 2)'], {}), '((nz * 2, 2))\n', (14497, 14510), True, 'import numpy as np, math\n'), ((2460, 2469), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2466, 2469), True, 'import numpy as np, math\n'), ((2494, 2503), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2500, 2503), True, 'import numpy as np, math\n'), ((2832, 2841), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2838, 2841), True, 'import numpy as np, math\n'), ((2866, 2875), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2872, 2875), True, 'import numpy as np, math\n'), ((11667, 11713), 'matplotlib.patches.Rectangle', 'Rectangle', (['ll', '(2.0 * wh[0])', '(2.0 * wh[1])'], {}), '(ll, 2.0 * wh[0], 2.0 * wh[1], **kwa)\n', (11676, 11713), False, 'from matplotlib.patches import Rectangle, Circle, Ellipse, PathPatch\n'), ((11794, 11857), 'matplotlib.patches.Ellipse', 'Ellipse', (['xy'], {'width': '(2.0 * param[0])', 'height': '(2.0 * param[1])'}), '(xy, width=2.0 * param[0], height=2.0 * param[1], **kwa)\n', (11801, 11857), False, 'from matplotlib.patches import Rectangle, Circle, Ellipse, PathPatch\n'), ((11938, 11970), 'matplotlib.patches.Circle', 'Circle', (['xy'], {'radius': 'radius'}), '(xy, radius=radius, **kwa)\n', (11944, 11970), False, 'from matplotlib.patches import Rectangle, Circle, Ellipse, PathPatch\n'), ((14297, 14311), 'numpy.tan', 'np.tan', (['stereo'], {}), '(stereo)\n', (14303, 14311), True, 'import numpy as np, math\n'), ((2023, 2039), 'numpy.square', 'np.square', (['(e - c)'], {}), '(e - c)\n', (2032, 2039), True, 'import numpy as np, math\n'), ((3205, 3231), 'numpy.square', 'np.square', (['(points - center)'], {}), '(points - center)\n', (3214, 3231), True, 'import numpy as np, math\n'), ((14347, 14364), 'numpy.square', 'np.square', (['(z / zf)'], {}), '(z / zf)\n', (14356, 14364), True, 'import numpy as np, math\n')] |
# -*- coding: utf-8 -*-
import scrapy
from spider.Utils import TimeHelper, CarDBHelper
class WangyicarSpider(scrapy.Spider):
name = 'wangyicar'
allowed_domains = ['163.com']
start_urls = ['http://product.auto.163.com']
def parse(self, response):
item = dict()
item['app_id'] = '4'
# 品牌id
brand_id = response.xpath('//*[@id="brandCont"]/*/h2/a/@id').extract()
# 品牌名字
brand_name = response.xpath('//*[@id="brandCont"]/*/h2/a/@title').extract()
# 品牌页url
brand_url_list = response.xpath('//*[@id="brandCont"]/*/h2/a/@href').extract()
if len(brand_name) > 0:
for i in range(len(brand_name)):
item['brand_id'] = brand_id[i]
item['brand_name'] = brand_name[i]
table = 'datau_crawler_brand'
item['create_time'] = TimeHelper.TimeHelper.getTime()
item['update_time'] = TimeHelper.TimeHelper.getTime()
CarDBHelper.DataDBHelper.save(table=table, item=item)
brand_url = 'http://product.auto.163.com'+brand_url_list[i]
yield scrapy.Request(brand_url, meta={'item': item}, dont_filter=True, callback=self.brand_page)
def brand_page(self, response):
itembrand = response.meta['item']
brand_id = itembrand['brand_id']
item = dict()
item['app_id'] = '4'
# 车系id
series_id = response.xpath('//*/div[@class="item-cont cur"]//*/p[@class="title"]/a/@data-series-id').extract()
# 车系名字
series_name = response.xpath('//*/div[@class="item-cont cur"]//*/p[@class="title"]/a/text()').extract()
# 车系url
series_url_list = self.get_series_url(response)
if len(series_name) > 0:
for i in range(len(series_name)):
item['brand_id'] = brand_id
item['series_id'] = series_id[i]
item['series_name'] = series_name[i]
item['series_url'] = series_url_list[i]
table = 'datau_crawler_carseries'
item['create_time'] = TimeHelper.TimeHelper.getTime()
item['update_time'] = TimeHelper.TimeHelper.getTime()
CarDBHelper.DataDBHelper.save(table=table, item=item)
series_url = series_url_list[i]
yield scrapy.Request(series_url, meta={'item': item}, dont_filter=True, callback=self.series_page)
def series_page(self, response):
itemseries = response.meta['item']
series_id = itemseries['series_id']
item = dict()
item['app_id'] = '4'
# 车型id
vm_id = self.get_vm_id(response)
# 车型名字
vm_name = response.xpath('//*/div[@class="table_car_sells"]/div/div/div[1]/a/text()').extract()
# 车型url
vm_url_list = self.get_vm_url(response)
if len(vm_name) > 0:
for i in range(len(vm_name)):
item['series_id'] = series_id
item['vm_id'] = vm_id[i]
item['vm_name'] = vm_name[i]
item['vm_url'] = vm_url_list[i]
table = 'datau_crawler_vehiclemodel'
item['create_time'] = TimeHelper.TimeHelper.getTime()
item['update_time'] = TimeHelper.TimeHelper.getTime()
CarDBHelper.DataDBHelper.save(table=table, item=item)
@staticmethod
def get_series_url(response):
series_url_list = response.xpath('//*/div[@class="item-cont cur"]//*/p[@class="title"]/a/@href').extract()
for i in range(len(series_url_list)):
series_url_list[i] = 'http://product.auto.163.com'+series_url_list[i]
return series_url_list
@staticmethod
def get_vm_id(response):
vm_id = response.xpath('//*/div[@class="table_car_sells"]/div/div/div[1]/a/@href').extract()
for i in range(len(vm_id)):
vm_id[i] = vm_id[i].replace('/product/', '')
vm_id[i] = vm_id[i].replace('.html#ncx00020', '')
return vm_id
@staticmethod
def get_vm_url(response):
vm_url_list = response.xpath('//*/div[@class="table_car_sells"]/div/div/div[1]/a/@href').extract()
for i in range(len(vm_url_list)):
vm_url_list[i] = 'http://product.auto.163.com'+vm_url_list[i]
return vm_url_list
| [
"spider.Utils.TimeHelper.TimeHelper.getTime",
"spider.Utils.CarDBHelper.DataDBHelper.save",
"scrapy.Request"
] | [((873, 904), 'spider.Utils.TimeHelper.TimeHelper.getTime', 'TimeHelper.TimeHelper.getTime', ([], {}), '()\n', (902, 904), False, 'from spider.Utils import TimeHelper, CarDBHelper\n'), ((943, 974), 'spider.Utils.TimeHelper.TimeHelper.getTime', 'TimeHelper.TimeHelper.getTime', ([], {}), '()\n', (972, 974), False, 'from spider.Utils import TimeHelper, CarDBHelper\n'), ((991, 1044), 'spider.Utils.CarDBHelper.DataDBHelper.save', 'CarDBHelper.DataDBHelper.save', ([], {'table': 'table', 'item': 'item'}), '(table=table, item=item)\n', (1020, 1044), False, 'from spider.Utils import TimeHelper, CarDBHelper\n'), ((2109, 2140), 'spider.Utils.TimeHelper.TimeHelper.getTime', 'TimeHelper.TimeHelper.getTime', ([], {}), '()\n', (2138, 2140), False, 'from spider.Utils import TimeHelper, CarDBHelper\n'), ((2179, 2210), 'spider.Utils.TimeHelper.TimeHelper.getTime', 'TimeHelper.TimeHelper.getTime', ([], {}), '()\n', (2208, 2210), False, 'from spider.Utils import TimeHelper, CarDBHelper\n'), ((2227, 2280), 'spider.Utils.CarDBHelper.DataDBHelper.save', 'CarDBHelper.DataDBHelper.save', ([], {'table': 'table', 'item': 'item'}), '(table=table, item=item)\n', (2256, 2280), False, 'from spider.Utils import TimeHelper, CarDBHelper\n'), ((3203, 3234), 'spider.Utils.TimeHelper.TimeHelper.getTime', 'TimeHelper.TimeHelper.getTime', ([], {}), '()\n', (3232, 3234), False, 'from spider.Utils import TimeHelper, CarDBHelper\n'), ((3273, 3304), 'spider.Utils.TimeHelper.TimeHelper.getTime', 'TimeHelper.TimeHelper.getTime', ([], {}), '()\n', (3302, 3304), False, 'from spider.Utils import TimeHelper, CarDBHelper\n'), ((3321, 3374), 'spider.Utils.CarDBHelper.DataDBHelper.save', 'CarDBHelper.DataDBHelper.save', ([], {'table': 'table', 'item': 'item'}), '(table=table, item=item)\n', (3350, 3374), False, 'from spider.Utils import TimeHelper, CarDBHelper\n'), ((1144, 1239), 'scrapy.Request', 'scrapy.Request', (['brand_url'], {'meta': "{'item': item}", 'dont_filter': '(True)', 'callback': 'self.brand_page'}), "(brand_url, meta={'item': item}, dont_filter=True, callback=\n self.brand_page)\n", (1158, 1239), False, 'import scrapy\n'), ((2352, 2449), 'scrapy.Request', 'scrapy.Request', (['series_url'], {'meta': "{'item': item}", 'dont_filter': '(True)', 'callback': 'self.series_page'}), "(series_url, meta={'item': item}, dont_filter=True, callback=\n self.series_page)\n", (2366, 2449), False, 'import scrapy\n')] |
import requests
import pyquery
url = 'https://github.com/A1014280203/show-me-the-code'
url_list = list()
resp = requests.get(url)
doc = pyquery.PyQuery(resp.content.decode())
a_tags = doc.find('a')
for a in a_tags.items():
if a.attr('href').startswith('http'):
url_list.append(a.attr('href'))
elif a.attr('href').startswith('/'):
url_list.append('https://github.com' + a.attr('href'))
print(url_list) | [
"requests.get"
] | [((118, 135), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (130, 135), False, 'import requests\n')] |
# standard imports
import os
import glob
import inspect
from pprint import pprint
import pickle as pkl
import copy
import pandas as pd
import numpy as np
from tqdm import tqdm
import logging
import subprocess
import warnings
import itertools
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy.visualization import ZScaleInterval
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', AstropyWarning)
try:
from p_tqdm import p_map
_parallel = True
except ModuleNotFoundError:
print('package "p_tqdm" not installed, cannot do parallel processing')
_parallel = False
# internal imports
import LOSSPhotPypeline
import LOSSPhotPypeline.utils as LPPu
from LOSSPhotPypeline.image import Phot, FitsInfo, FileNames
# setup tqdm for pandas
tqdm.pandas()
class LPP(object):
'''Lick Observatory Supernova Search Photometry Reduction Pipeline'''
def __init__(self, targetname, interactive = True, parallel = True, cal_diff_tol = 0.05, force_color_term = False, max_display_phase = 120,
wdir = '.', cal_use_common_ref_stars = False, sep_tol = 8, pct_increment = 0.05, in_pct_floor = 0.8, autoloadsave = False):
'''Instantiation instructions'''
# basics from instantiation
self.targetname = targetname.replace(' ', '')
self.config_file = targetname + '.conf'
self.interactive = interactive
self.wdir = os.path.abspath(wdir) # working directory for running (particularly idl code)
if (parallel is True) and (_parallel) is True:
self.parallel = True
else:
self.parallel = False
self.cal_diff_tol = cal_diff_tol # starting calibration difference tolerance
self.abs_cal_tol = 0.2 # do not proceed with the pipeline if in non-interactive mode and cal tol exceeds this
self.min_ref_num = 2 # minimum number of ref stars
self.pct_increment = pct_increment # amount to increment percentage requirement down by if doing ref check
self.in_pct_floor = in_pct_floor # minimum percentage of images ref stars must be in if doing ref check
self.checks = ['filter', 'date'] # default checks to perform on image list
self.phase_limits = (-60, 2*365) # phase bounds in days relative to disc. date to keep if "date" check performed
self.cal_use_common_ref_stars = cal_use_common_ref_stars # override requirement that each image have all ref stars
self.sep_tol = sep_tol # radius around target in arcseconds to exclude candidate reference stars from
# log file
self.logfile = self.targetname.replace(' ', '') + '.log'
self.build_log()
# sourced from configuration file
self.targetra = None
self.targetdec = None
self.photsub = False
self.photmethod = 'all'
self.refname = 'TBD'
self.photlistfile = 'TBD'
# discovery date (mjd)
self.disc_date_mjd = None
# check if config file exists -- if not then generate template
if not os.path.exists(self.config_file):
self.log.warn('No configuration file detected, complete template ({}) before proceeding.'.format(self.config_file + '.template'))
LPPu.genconf(targetname = self.targetname, config_file = self.config_file + '.template')
return
# general variables
self.filter_set_ref = ['B', 'V', 'R', 'I', 'CLEAR']
self.first_obs = None
self.phot_cols = {'3.5p': 3, '5p': 5, '7p': 7, '9p': 9, '1fh': 11, '1.5fh': 13, '2fh': 15, 'psf': 17}
self.calmethod = 'psf' # can be set to any key in phot_cols, but recommended is 'psf'
self.image_list = [] # list of image file names
self.phot_instances = [] # Phot instance for each image
self.aIndex = [] # indices of all images in phot_instances
self.wIndex = [] # subset of aIndex to work on
self.bfIndex = [] # indices of images with unsupported filters
self.ucIndex = [] # indices of WCS fail images, even though _c
self.bdIndex = [] # indices of images with dates outside of phase boundaries
self.pfIndex = [] # indices of photometry failures
self.psfIndex = [] # indices of photometry (sub) failures
self.cfIndex = [] # indices of calibration failures
self.csfIndex = [] # indices of calibration (sub) failures
self.noIndex = []
self.nosIndex = []
self.mrIndex = pd.Index([]) # keep track of indices to remove manually
self.run_success = False # track run success
# calibration variables
self.cal_source = 'auto'
self.calfile = 'TBD'
self.calfile_use = 'TBD'
self.force_color_term = force_color_term
self.calibration_dir = 'calibration'
if not os.path.isdir(self.calibration_dir):
os.makedirs(self.calibration_dir)
self.radecfile = os.path.join(self.calibration_dir, self.targetname + '_radec.txt')
self.radec = None
self.cal_IDs = 'all'
self.cal_arrays = None
self.cal_force_clear = False
self.max_display_phase = max_display_phase # num days to show rel to disc for interactive calibration
# keep track of counts of color terms
self.color_terms = {'kait1': 0, 'kait2': 0, 'kait3': 0, 'kait4': 0,
'nickel1': 0, 'nickel2': 0,
'Landolt': 0}
self.color_terms_used = None
# load configuration file
loaded = False
while not loaded:
try:
self.loadconf()
loaded = True
except FileNotFoundError:
LPPu.genconf(targetname = self.targetname, config_file = self.config_file + '.template')
print('Configuration could not be loaded. Template generated: {}'.format(self.config_file + '.template'))
response = input('Specify configuration file (*****.conf) or q to quit > ')
if 'q' == response.lower():
return
else:
self.config_file = response
# lightcurve variables
self.lc_dir = 'lightcurve'
self.lc_base = os.path.join(self.lc_dir, 'lightcurve_{}_'.format(self.targetname))
self.lc_ext = {'raw': '_natural_raw.dat',
'bin': '_natural_bin.dat',
'group': '_natural_group.dat',
'standard': '_standard.dat',
'ul': '_natural_ul.dat'}
# galaxy subtraction variables
self.template_images = None
self.templates_dir = 'templates'
# data directories
self.data_dir = os.path.dirname(self.refname)
self.error_dir = self.data_dir + '_sim'
# steps in standard reduction procedure
self.current_step = 0
self.steps = [self.load_images,
self.check_images,
self.find_ref_stars,
self.match_refcal_stars,
self.do_galaxy_subtraction_all_image,
self.do_photometry_all_image,
self.get_sky_all_image,
self.do_calibration,
self.get_zeromag_all_image,
self.get_limmag_all_image,
self.generate_lc,
self.write_summary]
# save file
self.savefile = self.targetname.replace(' ', '') + '.sav'
if os.path.exists(self.savefile):
if self.interactive:
load = input('Load saved state from {}? ([y]/n) > '.format(self.savefile))
else:
load = 'n' # run fresh if in non-interactive mode
if autoloadsave :
load = 'y' # run fresh if in non-interactive mode, unless this keyword is set
if 'n' not in load.lower():
self.load()
# make sure that the selected calmethod is one of the photmethods
if self.calmethod not in self.photmethod:
self.log.warn('Calibration method must be one of the photometry methods. Exiting.')
return
###################################################################################################
# Configuration File Methods
###################################################################################################
def loadconf(self):
'''
reads config file and sets class attributes accordingly
the most accurate accounting of system state is stored in the binary savefile
'''
# load config file and try to standardize keys
conf = pd.read_csv(self.config_file, header = None, delim_whitespace = True, comment = '#',
index_col = 0, squeeze = True).replace(np.nan, '')
conf.index = conf.index.str.lower()
# read and set values (including the type)
self.targetra = float(conf['targetra'])
self.targetdec = float(conf['targetdec'])
if conf['photsub'].lower() == 'yes': # defaults to False in all other cases
self.photsub = True
if conf['calsource'].lower() in ['psf','sdss','apass']: # only set if a known source is specified
self.cal_source = conf['calsource'].lower()
if conf['photmethod'].lower() == 'all':
self.photmethod = list(self.phot_cols.keys())
elif ',' not in conf['photmethod'].lower():
if conf['photmethod'].lower().strip() in self.phot_cols.keys():
self.photmethod = [conf['photmethod'].lower().strip()]
else:
print('{} is not a valid photometry method. Available options are:'.format(conf['photmethod'].strip()))
print(', '.join(self.phot_col.keys()))
self.photmethod = input('Enter selection(s) > ').strip().replace(' ', '').split(',')
else:
proposed = conf['photmethod'].strip().split(',')
if set(proposed).issubset(set(self.phot_cols.keys())):
self.photmethod = proposed
else:
print('At least one of {} is not a valid photometry method. Available options are:'.format(conf['photmethod'].strip()))
print(', '.join(self.phot_cols.keys()))
self.photmethod = input('Enter selection(s) > ').strip().replace(' ', '').split(',')
self.refname = conf['refname']
self.photlistfile = conf['photlistfile']
if conf['forcecolorterm'].strip() in self.color_terms.keys():
self.force_color_term = conf['forcecolorterm'].strip()
self.log.info('{} loaded'.format(self.config_file))
###################################################################################################
# Logging
###################################################################################################
def build_log(self):
'''starts and sets up log'''
self.log = logging.getLogger('LOSSPhotPypeline')
self.log.setLevel(logging.DEBUG)
# don't duplicate entries
if self.log.hasHandlers():
self.log.handlers.clear()
# internal logging
fh = logging.FileHandler(self.logfile)
fh.setFormatter(logging.Formatter('%(asctime)s in %(funcName)s with level %(levelname)s ::: %(message)s'))
self.log.addHandler(fh)
# if in interactive mode, print log at or above INFO on screen
if self.interactive:
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(logging.Formatter('\n'+'*'*60+'\n%(message)s\n'+'*'*60))
self.log.addHandler(sh)
# used by contextlib to log all idl and bash outputs, while hiding from screen
self.log.write = lambda msg: self.log.debug('[external] ' + msg) if msg != '\n' else None
self.log.info('Welcome to the LOSS Photometry Pypeline (LPP)')
###################################################################################################
# UI / Automation Methods
###################################################################################################
def __iter__(self):
return self
def next(self, *args, **kwargs):
'''performs next reduction step (arguments for that step can be passed through)'''
if self.current_step < len(self.steps):
self.steps[self.current_step](*args, **kwargs)
self.current_step += 1
self.save()
self.summary()
else:
raise StopIteration
def skip(self):
'''skip current step'''
self.log.info('skipping step: {}'.format(self.steps[self.current_step].__name__))
self.go_to(self.current_step + 1)
self.summary()
def go_to(self, step = None):
'''go to specified step, or choose interactively'''
if type(step) == int:
self.current_step = step
self.summary()
else:
self.summary()
print('\nChoose an option:\n')
print('primary reduction steps:')
for i, step in enumerate(self.steps):
if i == self.current_step:
print('{} --- {} (current step)'.format(i, step.__name__))
else:
print('{} --- {}'.format(i, step.__name__))
print('\nadditional options:')
print('n --- add new image(s) by filename(s)')
print('nf --- add new images from file of names')
print('p --- plot light curve from file')
print('c --- cut points from specific light curve')
print('cr --- cut points from specific raw light curve and regenerate subsequent light curves')
print('q --- quit\n')
resp = input('selection > ').lower()
if 'n' == resp:
new_images = input('enter name(s) or new images (comma separated) > ')
if ',' not in new_images:
new_image_list = [new_images]
else:
new_image_list = [fl.strip() for fl in new_images.split(',')]
self.process_new_images(new_image_list = new_image_list)
elif 'nf' == resp:
new_image_file = input('enter name of new image file > ')
self.process_new_images(new_image_file = new_image_file)
elif 'p' == resp:
lc_file = input('enter light curve file (including relative path) to plot > ')
self.plot_lc([lc_file])
elif (resp == 'c') or (resp == 'cr'):
lc_file = input('enter light curve file (including relative path) to cut points from > ')
regenerate = False
if resp == 'cr':
regenerate = True
self.cut_lc_points(lc_file, regenerate = True)
else:
try:
self.current_step = int(resp)
except ValueError:
return
self.summary()
def save(self):
'''saves current state of pipeline'''
vs = vars(self).copy()
vs.pop('steps')
vs.pop('log')
with open(self.savefile, 'wb') as f:
pkl.dump(vs, f)
self.log.info('{} written'.format(self.savefile))
def load(self, savefile = None, summary = True):
'''re-initializes pipeline from saved state in file'''
if savefile is None:
savefile = self.savefile
with open(savefile, 'rb') as f:
vs = pkl.load(f)
for v in vs.keys():
s = 'self.{} = vs["{}"]'.format(v, v)
exec(s)
self.log.info('{} loaded'.format(savefile))
if summary:
self.summary()
def summary(self):
'''print summary of pipeline status'''
print('\n' + '*'*60)
print('Reduction status for {}'.format(self.targetname))
print('Interactive: {}'.format(self.interactive))
print('Photsub Mode: {}'.format(self.photsub))
print('*'*60 + '\n')
if self.current_step == 0:
print('Beginning of reduction pipeline.\n')
else:
print('Previous step: {}'.format(self.steps[self.current_step - 1].__name__))
print(self.steps[self.current_step - 1].__doc__ + '\n')
try:
print('--> Next step: {}'.format(self.steps[self.current_step].__name__))
print(self.steps[self.current_step].__doc__ + '\n')
except IndexError:
print('End of reduction pipeline.')
self.save()
return
try:
print('----> Subsequent step: {}'.format(self.steps[self.current_step + 1].__name__))
print(self.steps[self.current_step + 1].__doc__ + '\n')
except IndexError:
print('End of reduction pipeline.')
def run(self, skips = []):
'''run through reduction steps'''
while True:
if self.current_step in skips:
self.skip()
else:
try:
self.next()
except StopIteration:
break
def show_variables(self):
'''prints instance variables'''
pprint(vars(self))
def show_methods(self):
'''show available methods'''
print('method: docstring')
for name in LPP.__dict__.keys():
if name[:2] != '__' and name != 'show_methods':
print('{}: {}'.format(name, LPP.__dict__[name].__doc__))
###################################################################################################
# Reduction Pipeline Methods
###################################################################################################
def load_images(self):
'''reads image list file to generate lists of image names and Phot instances'''
self.image_list = pd.read_csv(self.photlistfile, header = None, delim_whitespace = True,
comment = '#', squeeze = True)
if self.interactive:
print('\nSelected image files')
print('*'*60 + '\n')
print(self.image_list)
print('\n')
self.log.info('image list loaded from {}'.format(self.photlistfile))
self.log.info('generating list of Phot instances from image list')
self.phot_instances = self._im2inst(self.image_list) # radec is None if running in order
# set indices
self.aIndex = self.image_list.index
self.wIndex = self.aIndex
def check_images(self):
'''only keep images that are in a supported filter and without file format issues'''
# filter check
if 'filter' in self.checks:
filter_check = lambda img: True if img.filter.upper() in self.filter_set_ref else False
self.log.info('checking filters')
bool_idx = self.phot_instances.loc[self.wIndex].progress_apply(filter_check)
self.bfIndex = self.wIndex[~pd.Series(bool_idx)]
self.log.info('dropping {} images due to unsupported filter'.format(len(self.bfIndex)))
self.wIndex = self.wIndex.drop(self.bfIndex)
# uncal check
if 'uncal' in self.checks:
cal_check = lambda img: True if ('RADECSYS' not in img.header) else (False if (img.header['RADECSYS'] == '-999') else True)
self.log.info('checking images for WCS')
bool_idx = self.phot_instances.loc[self.wIndex].progress_apply(cal_check)
self.ucIndex = self.wIndex[~pd.Series(bool_idx)]
self.log.info('dropping {} images for failed WCS'.format(len(self.ucIndex)))
self.wIndex = self.wIndex.drop(self.ucIndex)
if 'date' in self.checks:
if self.disc_date_mjd is None:
self.log.warn('discovery date not set, cannot do date check')
return
date_check = lambda img: True if ((img.mjd >= (self.disc_date_mjd + self.phase_limits[0])) and
(img.mjd <= (self.disc_date_mjd + self.phase_limits[1]))) else False
self.log.info('checking phases')
bool_idx = self.phot_instances.loc[self.wIndex].progress_apply(date_check)
self.bdIndex = self.wIndex[~pd.Series(bool_idx)]
self.log.info('dropping {} images that are outside of phase bounds'.format(len(self.bdIndex)))
self.wIndex = self.wIndex.drop(self.bdIndex)
# if there are none left, end pipeline
if len(self.wIndex) == 0:
self.log.warn('all images removed by checks --- cannot proceed')
self.run_success = False
self.current_step = self.steps.index(self.write_summary) - 1
return
def find_ref_stars(self):
'''identify all suitable stars in ref image, compute ra & dec, write radecfile, store in instance'''
# if radecfile already exists, no need to do it
if os.path.exists(self.radecfile):
self.log.info('radecfile already exists, loading only')
self.radec = pd.read_csv(self.radecfile, delim_whitespace=True, skiprows = (0,1,3,4,5), names = ['RA','DEC'])
# set radec in Phot instances
for img in self.phot_instances.loc[self.wIndex]:
img.radec = self.radec
return
if self.refname == '' :
self.log.warn('refname has not been assigned, please do it first!')
return
# instantiate object to manage names
ref = Phot(self.refname, calmethod = self.calmethod)
# use sextractor to extract all stars to be used as refstars
sxcp = os.path.join(os.path.dirname(inspect.getfile(LOSSPhotPypeline)), 'conf', 'sextractor_config')
config = os.path.join(sxcp, 'kait.sex')
filt = os.path.join(sxcp, 'gauss_2.0_5x5.conv')
par = os.path.join(sxcp, 'kait.par')
star = os.path.join(sxcp, 'default.nnw')
cmd_list = ['sex', self.refname,
'-c', config,
'-PARAMETERS_NAME', par,
'-FILTER_NAME', filt,
'-STARNNW_NAME', star,
'-CATALOG_NAME', ref.sobj,
'-CHECKIMAGE_NAME', ref.skyfit]
p = subprocess.Popen(cmd_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, universal_newlines = True)
stdout, stderr = p.communicate()
self.log.debug(stdout)
self.log.debug(stderr)
# make sure process succeeded
if not os.path.exists(ref.sobj):
self.log.warn('SExtractor failed --- no sobj file generated, check!')
return
# read sobj file of X_IMAGE and Y_IMAGE columns, as well as MAG_APER for sort
with fits.open(ref.sobj) as hdul:
data = hdul[1].data
# sort according to magnitude, from small/bright to hight/faint
data.sort(order = 'MAG_APER')
imagex = data.X_IMAGE
imagey = data.Y_IMAGE
# transform to RA and DEC using ref image header information
cs = WCS(header = ref.header)
imagera, imagedec = cs.all_pix2world(imagex, imagey, 0)
# remove any identified "stars" that are too close to target
coords = SkyCoord(imagera, imagedec, unit = (u.deg, u.deg))
target_coords = SkyCoord(self.targetra, self.targetdec, unit = (u.deg, u.deg))
offsets = coords.separation(target_coords).arcsecond
imagera = imagera[offsets > self.sep_tol]
imagedec = imagedec[offsets > self.sep_tol]
# write radec file
with open(self.radecfile, 'w') as f:
f.write('TARGET\n')
f.write(' RA DEC\n')
f.write(' {:.7f} {:.7f}\n'.format(self.targetra, self.targetdec))
f.write('\nREFSTARS\n')
f.write(' RA DEC\n')
for i in range(len(imagera)):
f.write(' {:.7f} {:.7f}\n'.format(imagera[i], imagedec[i]))
self.log.info('{} written'.format(self.radecfile))
self.radec = pd.read_csv(self.radecfile, delim_whitespace=True, skiprows = (0,1,3,4,5), names = ['RA','DEC'])
# set radec in Phot instances
for img in self.phot_instances.loc[self.wIndex]:
img.radec = self.radec
def match_refcal_stars(self):
'''get calibration catalog, and match stars to ref stars -- only do if needed'''
if os.path.exists(os.path.join(self.calibration_dir, self.calfile)) is False:
# get calibration catalog
catalog = LPPu.astroCatalog(self.targetname, self.targetra, self.targetdec, relative_path = self.calibration_dir)
catalog.get_cal(method = self.cal_source)
self.calfile = catalog.cal_filename
self.cal_source = catalog.cal_source
self.log.info('calibration data sourced')
self.log.info('matching ref stars to catalog stars and selecting 40 brightest')
self.get_cal_info()
radec = SkyCoord(self.radec.loc[1:, 'RA'], self.radec.loc[1:, 'DEC'], unit = (u.deg, u.deg))
cal_cat = pd.read_csv(os.path.join(self.calibration_dir, self.calfile), delim_whitespace = True)
cal = SkyCoord(cal_cat.loc[:, 'ra'], cal_cat.loc[:, 'dec'], unit = (u.deg, u.deg))
idx, d2d, d3d = match_coordinates_sky(cal, radec)
cal_use = cal_cat.iloc[d2d.arcsecond < 5] # calibration stars that match within 5"
cal_use.index = self.radec.loc[1:].iloc[idx[d2d.arcsecond < 5]].index - 1 # don't count sn and align indices with radecfile
cal_use.insert(0, 'starID', cal_use.index)
cal_use = cal_use.sort_values(by = 'r').drop_duplicates(subset = 'starID', keep = 'first')
self.cal_use = cal_use.iloc[:40] # select top 40 brightest
# write "use" files
with open(os.path.join(self.calibration_dir, self.calfile_use), 'w') as outfile:
outfile.write(self.cal_use.to_string(index = False))
catalog = LPPu.astroCatalog(self.targetname, self.targetra, self.targetdec, relative_path = self.calibration_dir)
catalog.cal_filename = self.calfile_use
catalog.cal_source = self.cal_source
catalog.to_natural()
self.cal_arrays = catalog.get_cal_arrays(index_order = self.cal_use.index)
# show ref stars (and cut if interactive mode)
if self.interactive:
self._display_refstars(icut = True)
else:
self._display_refstars()
def do_galaxy_subtraction_all_image(self, subreg = 0.9):
'''performs galaxy subtraction on all selected image files'''
if not self.photsub:
self.log.warn('not in photsub mode, skipping galaxy subtraction')
return
self.log.info('starting galaxy subtraction')
if self.template_images is None:
self.load_templates()
if self.template_images is None:
self.log.warn('could not get suitable template images, running without galaxy subtraction')
self.photsub = False
return
# set up for parallelization
ti = self.template_images
fn = lambda img: img.galaxy_subtract(ti, subreg = subreg)
# do galaxy subtraction in the appropriate mode
if self.parallel is True:
res = p_map(fn, self.phot_instances.loc[self.wIndex].tolist())
else:
res = []
for img in tqdm(self.phot_instances.loc[self.wIndex].tolist()):
res.append(fn(img))
# extract results, log, and determine if successful
res = pd.DataFrame(res, columns = ['success', 'log'])
res['log'].apply(lambda log_entry: self._log_idl(*log_entry))
if not res['success'].all():
self.log.warn('photsub failed (probably b/c of missing templates), running without galaxy subtraction')
self._get_template_candidates()
self.photsub = False
self.log.info('galaxy subtraction done')
def do_photometry_all_image(self, forcesky = False):
'''performs photometry on all selected image files'''
self.log.info('starting photometry (galsub: {})'.format(self.photsub))
# set up for parallelization
ps = self.photsub
fn = lambda img: img.do_photometry(photsub = ps, forcesky = forcesky)
# do photometry in the appropriate mode
if self.parallel is True:
res = p_map(fn, self.phot_instances.loc[self.wIndex].tolist())
else:
res = []
for img in tqdm(self.phot_instances.loc[self.wIndex].tolist()):
res.append(fn(img))
# extract results, log, and remove failures
res = pd.DataFrame(res, columns = ['unsub', 'sub', 'log'])
res['log'].apply(lambda log_entry: self._log_idl(*log_entry))
self.pfIndex = self.wIndex[~res['unsub']]
self.log.warn('photometry failed on {} out of {} images'.format(len(self.pfIndex), len(self.wIndex)))
if self.photsub is False:
self.wIndex = self.wIndex.drop(self.pfIndex)
else:
self.psfIndex = self.wIndex[~res['sub']]
self.log.warn('photometry (sub) failed on {} out of {} images'.format(len(self.psfIndex), len(self.wIndex)))
self.wIndex = self.wIndex.drop(self.pfIndex.intersection(self.psfIndex))
if len(self.wIndex) == 0:
self.log.warn('all images failed, cannot proceed')
self.run_success = False
self.current_step = self.steps.index(self.write_summary) - 1
return
self.log.info('photometry done')
def get_sky_all_image(self):
'''get and set sky value for every phot instance'''
self.log.info('getting sky value for each image')
self.phot_instances.loc[self.wIndex].progress_apply(lambda img: img.get_sky())
def calibrate(self, final_pass = False):
'''performs calibration on all images included in photlistfile, using outputs from do_photometry_all_image'''
if not final_pass:
self.log.info('performing calibration')
else:
self.log.info('doing final calibration')
# reset trackers
self.cfIndex = []
self.csfIndex = []
# calibration list
cal_list = []
# iterate through image list and execute calibration script on each
for idx, img in tqdm(self.phot_instances.loc[self.wIndex].iteritems(), total = len(self.wIndex)):
# set photsub mode appropriately
if self.photsub is False:
ps = False
elif (self.photsub is True) and (idx in self.psfIndex):
ps = False
else:
ps = True
# do calibration
phot = img.calibrate(self.cal_IDs, self.cal_arrays[img.color_term].loc[:, img.filter.upper()],
self.cal_arrays[img.color_term].loc[:, 'E'+img.filter.upper()], sub = ps, write_dat = final_pass)
phot.rename(columns = {self.calmethod: 'Mag_obs'}, inplace = True)
# add comparison information
phot.insert(0, 'Filter', img.filter.upper())
phot.loc[self.cal_IDs, 'RA_cal'] = self.cal_arrays[img.color_term].loc[self.cal_IDs, 'RA']
phot.loc[self.cal_IDs, 'DEC_cal'] = self.cal_arrays[img.color_term].loc[self.cal_IDs, 'DEC']
phot.loc[self.cal_IDs, 'Mag_cal'] = self.cal_arrays[img.color_term].loc[self.cal_IDs, img.filter.upper()]
phot.loc[self.cal_IDs, 'RA_diff'] = np.abs(phot.loc[self.cal_IDs, 'RA_obs'] - phot.loc[self.cal_IDs, 'RA_cal'])
phot.loc[self.cal_IDs, 'DEC_diff'] = np.abs(phot.loc[self.cal_IDs, 'DEC_obs'] - phot.loc[self.cal_IDs, 'DEC_cal'])
cal_list.append(phot.loc[self.cal_IDs, ['Filter', 'RA_diff', 'DEC_diff', 'Mag_obs', 'Mag_cal', 'ref_in', 'system']])
# check for success if in final pass mode
if final_pass:
if (os.path.exists(img.psfdat) is False):
self.cfIndex.append(idx)
if (self.photsub is True) and (os.path.exists(img.psfsubdat) is False):
self.csfIndex.append(idx)
# organize calibrators and compute globabl metrics
self.calibrators = pd.concat([df.loc[self.cal_IDs, :] for df in cal_list], keys = self.wIndex)
self.calibrators['Mag_diff'] = self.calibrators['Mag_obs'] - self.calibrators['Mag_cal']
# remove failures if in final pass mode
if final_pass:
self.cfIndex = pd.Index(self.cfIndex)
self.csfIndex = pd.Index(self.csfIndex)
self.log.warn('calibration failed on {} out of {} images'.format(len(self.cfIndex), len(self.wIndex)))
self.wIndex = self.wIndex.drop(self.cfIndex) # processing based only on non-subtracted images
if self.photsub is True:
self.log.warn('calibration (sub) failed on {} out of {} images'.format(len(self.csfIndex), len(self.wIndex)))
if len(self.wIndex) == 0:
self.log.warn('all images failed, cannot proceed')
self.run_success = False
self.current_step = self.steps.index(self.write_summary) - 1
return
def do_calibration(self, use_filts = 'all', sig = 3, min_cut_diff = 0.5, quality_cuts = True):
'''check calibration and make cuts as needed'''
self.log.info('performing calibration')
# get filters used
self.filters = set(self.phot_instances.loc[self.wIndex].apply(lambda img: img.filter.upper()))
if use_filts == 'all':
use_filts = self.filters
if self.cal_IDs == 'all':
self.cal_IDs = self.cal_arrays['kait4'].index # choice of color term here is arbitrary
# iterate until acceptable tolerance is reached
accept_tol = False
skip_calibrate = False
iter_cnt = -1
while not accept_tol:
iter_cnt += 1
# run calibration
if not skip_calibrate:
self.calibrate()
skip_calibrate = False
# find indices (img, cal_ID) where Mag_obs failed to measure
locs = self.calibrators.loc[self.calibrators['Mag_obs'].isnull(), :].index
# pct of succ meas as a function of img
img_succ = (1 - locs.levels[0][locs.labels[0]].value_counts() / len(self.cal_IDs))
# pct of succ meas as a function of cal_ID
cal_succ = (1 - locs.levels[1][locs.labels[1]].value_counts() / len(self.wIndex))
# run minimal quality cuts if requested --- these are the first and second iterations
if (quality_cuts is True) and (iter_cnt < 2):
# remove any cal IDs or images with a very low success rate
ID_cut = cal_succ.index[cal_succ < 0.4]
if (len(ID_cut) > 0) and (iter_cnt == 0):
self.cal_IDs = self.cal_IDs.drop(ID_cut)
self.log.info('cut ID(s): {} from minimal quality cut'.format(ID_cut))
continue
elif iter_cnt == 0:
self.log.info('all IDs passed minimal quality cut')
iter_cnt = 1
img_cut = img_succ.index[img_succ < 0.4]
if (len(img_cut) > 0) and (iter_cnt == 1):
self.manual_remove(img_cut)
self.log.info('cut image(s): {} from minimal quality cut'.format(img_cut))
continue
elif iter_cnt == 1:
self.log.info('all images passed minimal quality cut')
iter_cnt = 2
elif iter_cnt < 2:
iter_cnt = 2
# cut to use common ref stars if requested --- these is the fourth iteration
# iteration 3 is used to remove outlier images before applying this cut
if (self.cal_use_common_ref_stars is True) and (iter_cnt == 3):
self.log.info('finding common ref stars')
accept = False
cnt = 0
while not accept:
current_pct = 1 - cnt * self.pct_increment
tmp = cal_succ[cal_succ >= current_pct]
if len(tmp) > self.min_ref_num:
self.log.info('{} ref stars are in at least {} pct of images, using these'.format(len(tmp), 100*current_pct))
accept = True
elif current_pct < self.in_pct_floor:
self.log.warn('reached minimum tolerance for pct image including ref stars, quitting')
self.run_succss = False
self.current_step = self.steps.index(self.write_summary) - 1
return
cnt += 1
if len(tmp) < len(self.cal_IDs):
self.cal_IDs = tmp.index
continue
# instantiate trackers
cut_list = [] # store IDs that will be cut
nan_list = [] # IDs of NaN to be cut immediately
full_list = []
bad_img_list = [] # indices of images that are <sig> outliers
single_cut_idx = None
tmp_max = self.cal_diff_tol
df_list = []
# group by filter and perform comparison
for filt, group in self.calibrators.groupby('Filter', sort = False):
# use specific filters if specified
if filt not in use_filts:
continue
# if clear is not the only filter, skip it in comparison unless forced to use
if (len(self.filters) > 1) and ('CLEAR' in self.filters) and (filt == 'CLEAR') and (not self.cal_force_clear):
continue
# compute metrics
df = group.median(level = 1)
df.loc[:, 'pct_im'] = group['Mag_obs'].notnull().sum(level=1) / len(group['Mag_obs'].groupby(level=0))
df.loc[:, 'std_obs'] = group.std(level = 1).loc[:, 'Mag_obs']
df = df.sort_index()
df.loc[:, 'Diff'] = np.abs(df.loc[:, 'Mag_diff'])
# identify possible exclusions
cut_list.extend(list(df.index[df.loc[:, 'Diff'] > self.cal_diff_tol]))
nan_list.extend(list(df.index[df.loc[:, 'Diff'].isnull()]))
if len(nan_list) > 0:
break
full_list = list(df.index) # ok to overwrite b/c same each time
## exclude outlier images by iterating through all cal IDs and finding images of <sig> outliers
for id in self.cal_IDs:
selection = self.calibrators.loc[self.calibrators['Filter'] == filt, :].loc[(self.wIndex, id),['Mag_obs', 'system']]
for sys, grp in selection.groupby('system', sort = False):
grp = grp.loc[grp['Mag_obs'].notnull(), :]
mags = grp.loc[:, 'Mag_obs'].values
index = grp.index.levels[0][grp.index.labels[0]] # image indices
if len(mags) > 0:
bad_img_list.extend(index[np.abs(mags - mags.mean()) > np.max([min_cut_diff, sig * mags.std()])])
if self.interactive:
print('\nFilter: {}'.format(filt))
print('*'*60)
rnd = pd.Series([2,4,4,3,3,3,3], index = ['pct_im', 'RA_diff', 'DEC_diff', 'Mag_cal', 'Mag_obs', 'std_obs', 'Mag_diff'])
print(df.loc[:, ['pct_im', 'RA_diff', 'DEC_diff', 'Mag_cal', 'Mag_obs', 'std_obs', 'Mag_diff']].round(rnd))
else:
# find index and value of maximum diff
maxi = df.loc[:, 'Diff'].idxmax()
maxd = df.loc[maxi, 'Diff']
if maxd > tmp_max:
single_cut_idx = maxi
tmp_max = maxd
df.insert(0, 'Filter', filt)
df_list.append(df)
cut_list = list(set(cut_list))
bad_img_list = list(set(bad_img_list))
# remove NaN
if len(nan_list) > 0:
self.log.info('cutting ID(s) {} for NaN'.format(', '.join([str(i) for i in nan_list])))
self.cal_IDs = self.cal_IDs.drop(nan_list)
continue
# make cuts to refstars as needed
if self.interactive:
# show ref stars and calibrated light curves
fig, ax = plt.subplots(1, 2, figsize = (12, 6))
self._display_refstars(ax = ax[0], display = True)
if self.photsub:
r = self.phot_instances.loc[self.wIndex].apply(lambda img: pd.Series([img.mjd, img.filter, img.phot_sub.loc[-1, self.calmethod],
img.phot_sub.loc[-1, self.calmethod + '_err'], img.color_term]))
else:
r = self.phot_instances.loc[self.wIndex].apply(lambda img: pd.Series([img.mjd, img.filter, img.phot.loc[-1, 'Mag_obs'],
img.phot.loc[-1, self.calmethod + '_err'], img.color_term]))
r.columns = ('mjd', 'filter', 'mag', 'emag', 'system')
p = LPPu.plotLC(offset_scale = 2)
for idx, ct in enumerate(set(r['system'])):
fs = 'full'
if 'nickel' in ct:
fs = 'none'
for filt in set(r['filter']):
selector = (r['filter'] == filt) & r['mag'].notnull() & (r['system'] == ct)
if (self.max_display_phase != 0) and (self.max_display_phase != 'all'):
selector = selector & (r['mjd'] - r['mjd'].min() < self.max_display_phase)
line, = ax[1].plot(r.loc[selector, 'mjd'], r.loc[selector, 'mag'] + p._offset(filt), c = p._color(filt),
marker = ['o', 'D', 's', 'v', '^'][idx], linestyle = 'None', picker = 3,
label = '{},{}'.format(filt, ct), fillstyle = fs)
ax[1].invert_yaxis()
ax[1].set_xticks(())
ax[1].set_yticks(())
x0, x1 = ax[1].get_xlim()
y0, y1 = ax[1].get_ylim()
ax[1].set_aspect(np.abs((x1-x0)/(y1-y0)))
plt.tight_layout()
def onpick(event):
ind = event.ind[0]
filt, sys = event.artist._label.split(',')
row = r.loc[(r['filter'] == filt) & (r['system'] == sys) & (r['mjd'] == event.artist._x[ind]), :]
id = row.index[0]
cal = self.phot_instances.loc[id].phot.loc[self.cal_IDs, 'Mag_obs']
print('\nClicked Point Information:')
print('\tImage ID: {}'.format(id))
print('\tImage Name: {}'.format(self.image_list.loc[id]))
print('\tMJD: {:.1f}'.format(row['mjd'].item()))
print('\tMag: {:.1f} pm {:.1f}'.format(row['mag'].item(), row['emag'].item()))
print('\tFilter: {}'.format(filt))
print('\tSystem: {}'.format(sys))
print('\tcal IDs used: {}/{}'.format(len(cal.loc[cal.notnull()]), len(cal)))
print('\tfailed cal IDs: {}'.format(', '.join([str(i) for i in sorted(cal.loc[cal.isnull()].index)])))
print('\nChoice >')
cid = fig.canvas.mpl_connect('pick_event', lambda event: onpick(event))
print('*'*60)
nshow = np.min([len(cal_succ), len(img_succ), 10])
if nshow > 0:
print('\nSuccess Rate Per (worst {})'.format(nshow))
print('{:<12} {:<12}'.format('cal ID', 'image'))
for c, i in itertools.zip_longest(cal_succ.iloc[:nshow].index, img_succ.iloc[:nshow].index):
print('{:<4} {:<7} {:<4} {:<7}'.format(c, round(cal_succ.loc[c], 3), i, round(img_succ.loc[i], 3)))
# warn if any individual images have too few ref stars
ref_counts = self.calibrators['Mag_obs'].notnull().sum(level = 0)
if (ref_counts < self.min_ref_num).sum() > 0:
print('\nWarning - the following image(s) have below the minimum number of ref stars ({}):'.format(self.min_ref_num))
print(ref_counts.index[ref_counts < self.min_ref_num])
if (ref_counts == self.min_ref_num).sum() > 0:
print('\nWarning - the following image(s) have the minimum number of ref stars ({}):'.format(self.min_ref_num))
print(ref_counts.index[ref_counts == self.min_ref_num])
print('\nDo not cut the following ID(s) to avoid falling below the minimum:')
idx_selector = (ref_counts.index[ref_counts == self.min_ref_num], self.cal_IDs)
num_affected = self.calibrators.loc[idx_selector, 'Mag_obs'].notnull().sum(level=1)
print(num_affected.index[num_affected > 0].sort_values())
if len(bad_img_list) > 0:
print('\nWarning - the following image(s) are outliers:')
print(bad_img_list)
print('\nAt tolerance {}, {} ID(s) (out of {}) will be cut'.format(self.cal_diff_tol, len(cut_list), len(full_list)))
print(sorted(cut_list))
print('\nSelect an option below (or click on light curve points to get info):')
print('\tAccept cuts with tolerance of {} mag ([y])'.format(self.cal_diff_tol))
print('\tAdjust tolerance [enter float between 0 and 1]')
print('\tCut calibration star(s) by ID(s) [comma separated list of IDs to cut]')
print('\tDisplay image ["d" followed by index (e.g. d162)]')
print('\tCut image(s) ["c" followed by comma separated indexes (e.g. c162,163)]')
print('\tView measured mags for specific cal star ["<passband>" followed by cal ID (e.g. B5)]')
response = input('\nChoice > '.format(self.cal_diff_tol))
fig.canvas.mpl_disconnect(cid)
plt.ioff()
plt.close()
if (response == '') or ('y' in response.lower()):
self.cal_IDs = self.cal_IDs.drop(cut_list)
accept_tol = True
elif '.' in response:
self.cal_diff_tol = float(response)
skip_calibrate = True
elif response.lower()[0] == 'd':
self.compare_image2ref(int(response[1:]))
skip_calibrate = True
elif (response.lower()[0] == 'c') and (response.lower()[1] != 'l'):
self.manual_remove([int(i) for i in response[1:].split(',')])
elif response[0] in self.filters:
self._display_obs_cal_mags(response[0], int(response[1:]))
skip_calibrate = True
elif response[:5].lower() == 'clear':
self._display_obs_cal_mags(response[:5], int(response[5:]))
else:
self.cal_IDs = self.cal_IDs.drop([int(i) for i in response.split(',')])
elif (len(bad_img_list) > 0):
self.log.info('removing {} outlier image(s): {}'.format(len(bad_img_list), bad_img_list))
self.manual_remove(bad_img_list)
elif single_cut_idx is None:
accept_tol = True
elif len(full_list) > self.min_ref_num:
self.log.info('cutting ID {} for exceeding tolerance and re-running calibration'.format(single_cut_idx))
self.cal_IDs = self.cal_IDs.drop([single_cut_idx])
elif self.cal_diff_tol <= self.abs_cal_tol:
self.log.info('increasing tolerance to {} and re-running calibration'.format(self.cal_diff_tol))
self.cal_diff_tol += 0.05
else:
self.log.warn('calibration tolerance exceeds {}, cannot proceed'.format(self.abs_cal_tol))
self.run_success = False
self.current_step = self.steps.index(self.write_summary) - 1
return
with open(os.path.join(self.calibration_dir, 'final_ref_stars.dat'), 'w') as outfile:
outfile.write(pd.concat([df.loc[self.cal_IDs, :] for df in df_list], sort = False).to_string())
# make final pass on calibration to track failures and write .dat files
self.calibrate(final_pass = True)
# write final "use" files
with open(os.path.join(self.calibration_dir, self.calfile_use), 'w') as outfile:
outfile.write(self.cal_use.loc[self.cal_IDs, :].to_string(index = False))
catalog = LPPu.astroCatalog(self.targetname, self.targetra, self.targetdec, relative_path = self.calibration_dir)
catalog.cal_filename = self.calfile_use
catalog.cal_source = self.cal_source
catalog.to_natural()
# show new ref stars
plt.ioff()
self._display_refstars()
def get_zeromag_all_image(self):
'''get and set zeromag for every phot instance'''
self.log.info('getting zeromag for each image')
self.phot_instances.loc[self.wIndex].progress_apply(lambda img: img.get_zeromag())
def get_limmag_all_image(self):
'''get and set limiting mag for every phot instance'''
self.log.info('getting limiting mag for each image')
self.phot_instances.loc[self.wIndex].progress_apply(lambda img: img.calc_limmag())
def generate_raw_lcs(self, color_term, photsub_mode = False):
'''builds raw light curve files from calibrated results'''
# light curve containers
columns = (';; MJD','etburst', 'mag', '-emag', '+emag', 'limmag', 'filter', 'imagename')
lc = {name: [] for name in columns}
lcs = {m: copy.deepcopy(lc) for m in self.photmethod}
# limiting mag containers
lm = {name: [] for name in columns}
lms = {m: copy.deepcopy(lm) for m in self.photmethod}
# iterate through files and extract LC information
for idx, img in self.phot_instances.loc[self.wIndex].iteritems():
# immediately skip if not the appropriate color term unless being forced
if (color_term != img.color_term) and (self.force_color_term is False):
continue
# skip failed images
if (idx in self.cfIndex) and (photsub_mode is False):
continue
elif ((idx in self.psfIndex) or (idx in self.csfIndex)) and (photsub_mode is True):
continue
# read photometry results
cols = (0,) + sum(((self.phot_cols[m], self.phot_cols[m] + 1) for m in self.photmethod), ())
col_names = ('ID',) + sum(((m + '_mag', m + '_err') for m in self.photmethod), ())
if photsub_mode is False:
dat = img.psfdat
else:
dat = img.psfsubdat
d = pd.read_csv(dat, header = None, delim_whitespace = True, comment = ';', usecols=cols, names = col_names)
# detect if no target in file
if 1 not in d['ID'].values:
self.log.warn('no object in calibrated photometry file: {}'.format(dat))
if photsub_mode is False:
self.noIndex.append(idx)
else:
self.nosIndex.append(idx)
# setup columns for each raw file
for m in self.photmethod:
if 1 not in d['ID'].values:
continue # skip these ones
mag = d[d['ID'] == 1][m + '_mag'].item()
err = d[d['ID'] == 1][m + '_err'].item()
if np.isnan(mag):
record = lms[m]
else:
record = lcs[m]
record['mag'].append(round(mag,5))
record['-emag'].append(round(mag - err,5))
record['+emag'].append(round(mag + err,5))
record[';; MJD'].append(round(img.mjd, 6))
record['etburst'].append(round(img.exptime / (60 * 24), 5)) # exposure time in days
record['filter'].append(img.filter.upper())
record['imagename'].append(img.cimg)
record['limmag'].append(round(img.limmag, 5))
# write raw lc files
for m in self.photmethod:
lc_raw_name = self._lc_fname(color_term, m, 'raw', sub = photsub_mode)
lc_raw = pd.DataFrame(lcs[m])
lc_raw.to_csv(lc_raw_name, sep = '\t', columns = columns, index = False, na_rep = 'NaN')
lm_raw_name = self._lc_fname(color_term, m, 'ul', sub = photsub_mode)
lm_raw = pd.DataFrame(lms[m])
lm_raw.to_csv(lm_raw_name, sep = '\t', columns = columns, index = False, na_rep = 'NaN')
p = LPPu.plotLC(lc_file = lc_raw_name, lm_file = lm_raw_name, name = self.targetname, photmethod = m)
p.plot_lc(extensions = ['.ps', '.png'])
def generate_bin_lc(self, infile, outfile):
'''wraps IDL lightcurve binning routine'''
idl_cmd = '''idl -e "lpp_dat_res_bin, '{}', '{}', OUTFILE='{}', /OUTPUT"'''.format(infile, outfile, outfile)
stdout, stderr = LPPu.idl(idl_cmd)
self._log_idl(idl_cmd, stdout, stderr)
def generate_group_lc(self, infile, outfile):
'''wraps IDL lightcurve grouping routine'''
idl_cmd = '''idl -e "lpp_dat_res_group, '{}', '{}', OUTFILE='{}'"'''.format(infile, outfile, outfile)
stdout, stderr = LPPu.idl(idl_cmd)
self._log_idl(idl_cmd, stdout, stderr)
if (', not doing group' in stdout) or (os.path.exists(outfile) is False):
return False
else:
return True
def generate_final_lc(self, color_term, infile, outfile):
'''wraps IDL routine to convert from natural system'''
idl_cmd = '''idl -e "lpp_invert_natural_stand_objonly, '{}', '{}', OUTFILE='{}', /OUTPUT"'''.format(infile, color_term, outfile)
stdout, stderr = LPPu.idl(idl_cmd)
self._log_idl(idl_cmd, stdout, stderr)
if not os.path.exists(outfile):
return False
else:
return True
def raw2standard_lc(self, infile):
'''wrap intermediate steps that transform light curves from "raw" to "standard"'''
# assign convenience variables
tmp = infile.split('_')
ct = tmp[tmp.index('natural') - 2] # get color term
m = tmp[tmp.index('natural') - 1] # get phot aperture
binfile = infile.replace('raw', 'bin')
groupfile = binfile.replace('bin', 'group')
lc = groupfile.replace('natural_group', 'standard')
# do intermediate light curve steps
self.generate_bin_lc(infile, binfile)
grp_result = self.generate_group_lc(binfile, groupfile)
if grp_result is False:
self.log.warn('no groupfile generated, skipping')
return False, False
std_result = self.generate_final_lc(ct, groupfile, lc)
if std_result is False:
self.log.warn('no standard lc generated, skipping')
return True, False
# plot
p = LPPu.plotLC(lc_file = lc, name = self.targetname, photmethod = m)
p.plot_lc(extensions = ['.ps', '.png'])
return True, True
def get_color_term_used(self):
'''get dictionary counting use of each color term'''
ct = self.phot_instances.loc[self.wIndex].apply(lambda img: img.color_term)
self.color_terms_used = dict(ct.value_counts())
def generate_lc(self, sub = False):
'''performs all functions to transform image photometry into calibrated light curve of target'''
self.log.info('generating and plotting light curves (sub mode: {})'.format(sub))
# set up file system
if not os.path.isdir(self.lc_dir):
os.makedirs(self.lc_dir)
self.get_color_term_used()
# generate raw light curves
self.log.info('generating raw light curves for the following color terms: {}'.format(', '.join(self.color_terms_used.keys())))
for ct in tqdm(self.color_terms_used.keys()):
self.generate_raw_lcs(ct, photsub_mode = sub)
# generate intermediate and final light curves
self.log.info('generating "standard" light curves')
for m in tqdm(self.photmethod):
all_nat = []
all_std = []
for ct in self.color_terms_used.keys():
group_succ, standard_succ = self.raw2standard_lc(self._lc_fname(ct, m, 'raw', sub = sub))
# only add group and standard if group has been updated
if group_succ is True:
all_nat.append((ct, self._lc_fname(ct, m, 'group', sub = sub)))
if standard_succ is True:
all_std.append(self._lc_fname(ct, m, 'standard', sub = sub))
# make "all" light curves
lc_nat = self._lc_fname('all', m, 'group', sub = sub)
concat_list = []
for row in all_nat:
tmp = pd.read_csv(row[1], delim_whitespace = True)
tmp.insert(3, 'SYSTEM', row[0])
concat_list.append(tmp)
if len(concat_list) > 0:
pd.concat(concat_list, sort = False).to_csv(lc_nat, sep = '\t', na_rep = 'NaN', index = False, float_format = '%6.3f')
p = LPPu.plotLC(lc_file = lc_nat, name = self.targetname, photmethod = m)
p.plot_lc(extensions = ['.ps', '.png'])
lc = self._lc_fname('all', m, 'standard', sub = sub)
concat_list = []
for fl in all_std:
concat_list.append(pd.read_csv(fl, delim_whitespace = True))
if len(concat_list) > 0:
pd.concat(concat_list, sort = False).to_csv(lc, sep = '\t', na_rep = 'NaN', index = False, float_format = '%6.3f')
p = LPPu.plotLC(lc_file = lc, name = self.targetname, photmethod = m)
p.plot_lc(extensions = ['.ps', '.png'])
self.log.info('done with light curves')
self.run_success = True
# use recursion to handle sub if needed
if (self.photsub is True) and (sub is False):
self.generate_lc(sub = True)
def get_errors(self, method = 'sn6', kpix_rad = 20, skip_photsub = False, photsub = 'auto', ps = 0.7965,
host_ra = None, host_dec = None, rseed = None):
'''inject artificial stars of same mag as SN at each epoch and compute mags'''
self.log.info('doing artificial star simulation to determine errors')
# set seed
if rseed is not None:
np.random.seed(rseed)
# make directory for new generated data
if not os.path.exists(self.error_dir):
os.makedirs(self.error_dir)
if (photsub == 'auto') or (type(photsub) != type(True)):
photsub = self.photsub
# hard coded
n_stars = 30
# compute coords of n_stars around host
def handle_img(img, ret_xy = False, method = method):
cs = WCS(header = img.header)
# get pix coords of sn
sn_x, sn_y = cs.all_world2pix(self.targetra, self.targetdec, 0)
# select appropriate method
if method == 'snhost':
# ring of radius equal to distance between sn and nucleus
n_stars = 10
host_x, host_y = cs.all_world2pix(host_ra, host_dec, 0)
theta_sn = np.arctan2(sn_y - host_y, sn_x - host_x) # angle relative to hose
# coordinates of artificial stars
dtheta = np.linspace(2*np.pi/n_stars, 2*np.pi - 2*np.pi/n_stars, n_stars)
x = host_x + np.sqrt((sn_y - host_y)**2 + (sn_x - host_x)**2) * np.cos(theta_sn + dtheta)
y = host_y + np.sqrt((sn_y - host_y)**2 + (sn_x - host_x)**2) * np.sin(theta_sn + dtheta)
elif method == 'squares':
# square distribution as discussed w/ zwk and TdG
x_comp = np.cos(np.linspace(np.pi/4, 2*np.pi - np.pi / 4, 4))
x = sn_x + (kpix_rad * ps / img.pixscale) * np.concatenate([x_comp, 2 * x_comp, 2 * np.cos(np.pi/4) * np.array([1,0,-1,0])])
y_comp = np.sin(np.linspace(np.pi/4, 2*np.pi - np.pi / 4, 4))
y = sn_y + (kpix_rad * ps / img.pixscale) * np.concatenate([y_comp, 2 * y_comp, 2 * np.sin(np.pi/4) * np.array([0,1,0,-1])])
n_stars = len(x)
else:
# preferred method of concentric hexagons with radius increments of 20 KAIT pixels
dtheta = np.linspace(0, 2*np.pi, 7)[:-1]
x = sn_x + (kpix_rad * ps / img.pixscale) * np.concatenate((np.cos(dtheta), 2 * np.cos(dtheta + np.pi/6), 3 * np.cos(dtheta),
4 * np.cos(dtheta + np.pi/6), 5 * np.cos(dtheta)))
y = sn_y + (kpix_rad * ps / img.pixscale) * np.concatenate((np.sin(dtheta), 2 * np.sin(dtheta + np.pi/6), 3 * np.sin(dtheta),
4 * np.sin(dtheta + np.pi/6), 5 * np.sin(dtheta)))
n_stars = len(x)
# if just want pixel coords, return them along with WCS instance
if ret_xy is True:
return cs, x, y
# get magnitude of sn at this epoch
mag = np.nan
try:
if photsub is False:
mag = img.phot_raw.loc[-1, self.calmethod]
emag = img.phot_raw.loc[-1, self.calmethod + '_err']
else:
mag = img.phot_sub_raw.loc[-1, self.calmethod]
emag = img.phot_sub_raw.loc[-1, self.calmethod + '_err']
except AttributeError:
pass
if (np.isnan(mag)) or (np.isinf(mag)):
return False, None
# if random seed given, injected mags drawn from a gaussian of width set by uncertainty
if rseed is None:
inj_mags = [mag]*n_stars
else:
inj_mags = np.random.normal(mag, emag, n_stars).tolist()
assert n_stars == len(x)
# IDL call leads to new images in new directory
idl_cmd = '''idl -e "lpp_sim_fake_star, '{}', {}, {}, {}, OUTFILE='{}', PSFFITARRFILE='{}', /USENATURALMAG"'''.format(img.cimg,
x.tolist(), y.tolist(), inj_mags, os.path.join(self.error_dir, os.path.basename(img.cimg)), img.psffitarr)
stdout, stderr = LPPu.idl(idl_cmd)
self._log_idl(idl_cmd, stdout, stderr)
# do checks on success then return
if os.path.exists(os.path.join(self.error_dir, os.path.basename(img.cimg))):
return True, inj_mags
else:
return False, None
self.log.info('creating images with artificial stars')
succ = []
mags = []
for img in tqdm(self.phot_instances.loc[self.wIndex].tolist()):
s, m = handle_img(img)
succ.append(s)
if m is not None:
mags.append(m)
# drop images with no mag
self.wIndex = self.wIndex[pd.Series(succ)]
# instantiate pipeline instance and inherit many parent attributes
sn = LPP(self.targetname, interactive = False, parallel = self.parallel, cal_diff_tol = self.cal_diff_tol, force_color_term = self.force_color_term,
wdir = self.wdir, cal_use_common_ref_stars = self.cal_use_common_ref_stars, autoloadsave = False, sep_tol = self.sep_tol)
vs = vars(self).copy()
vs.pop('steps')
vs.pop('log')
vs.pop('phot_instances')
for v in vs.keys():
s = 'sn.{} = vs["{}"]'.format(v, v)
exec(s)
sn.interactive = False
self.log.info('running pipeline steps on images with artificial stars')
# change image paths and load instances
sn.image_list = sn.image_list.apply(lambda fl: os.path.join(self.error_dir, os.path.basename(fl)))
sn.phot_instances = sn._im2inst(sn.image_list.loc[sn.wIndex], mode = 'quiet')
# include artificial stars in radec
cs, x, y = handle_img(Phot(self.refname, calmethod = self.calmethod), ret_xy = True)
fake_ra, fake_dec = cs.all_pix2world(x, y, 0)
for img in sn.phot_instances.loc[sn.wIndex]:
img.radec = self.radec.append(pd.DataFrame({'RA': fake_ra, 'DEC': fake_dec}), ignore_index = True)
# run needed pipeline steps on those new images
if (skip_photsub is False) and (photsub is True):
sn.do_galaxy_subtraction_all_image()
sn.do_photometry_all_image()
sn.get_sky_all_image()
sn.calibrate(final_pass = True) # don't really care about calibration, but need to do to read results
# gather, organize and write
sn.lc_dir = self.lc_dir + '_sim'
sn.lc_base = os.path.join(sn.lc_dir, 'lightcurve_{}_'.format(self.targetname))
if not os.path.exists(sn.lc_dir):
os.makedirs(sn.lc_dir)
def get_res(idx, ps):
img = sn.phot_instances.loc[idx]
if ps is False:
#tmp = img.phot.iloc[-n_stars:].loc[:, 'Mag_obs']
tmp = img.phot_raw.iloc[-n_stars:].loc[:, sn.calmethod]
else:
#tmp = img.phot_sub.iloc[-n_stars:].loc[:, sn.calmethod]
tmp = img.phot_sub_raw.iloc[-n_stars:].loc[:, sn.calmethod]
self.phot_instances.loc[idx].sim_err = tmp.std()
return tmp
res = []
for idx in sn.wIndex:
res.append(get_res(idx, photsub))
res = pd.DataFrame(res, index = sn.wIndex)
res.columns = sn.phot_instances.loc[sn.wIndex[0]].phot.index[-n_stars:]
# put mags into DataFrame
mags = pd.DataFrame(mags, index = self.wIndex)
mags.columns = sn.phot_instances.loc[sn.wIndex[0]].phot.index[-n_stars:]
# write results
with open(os.path.join(sn.lc_dir, 'sim_{}_injmags.dat'.format(sn.calmethod)), 'w') as f:
f.write(mags.to_string())
with open(os.path.join(sn.lc_dir, 'sim_{}_recmags.dat'.format(sn.calmethod)), 'w') as f:
f.write(res.to_string())
# write updated errors to lc
self.write_sim_lc(sn = sn, mags = mags, res = res, photsub = photsub)
# save image with inj stars labeled
sn._display_refstars(x = x, y = y, labels = res.columns, save_fig = os.path.join(sn.lc_dir, 'inj_stars.png'))
sn.savefile = sn.savefile.replace('.sav', '_sim.sav')
sn.save()
self.save()
def write_sim_lc(self, sn = None, mags = None, res = None, photsub = 'auto', drop_inj = []):
'''write sim errs to light curves'''
if (photsub == 'auto') or (type(photsub) != type(True)):
photsub = self.photsub
# instantiate if needed
if sn is None:
sn = LPP(self.targetname, interactive = False)
sn.savefile = sn.savefile.replace('.sav', '_sim.sav')
sn.load()
# read mags and sim results if needed
if mags is None:
mags = pd.read_csv(os.path.join(sn.lc_dir, 'sim_{}_injmags.dat'.format(sn.calmethod)), delim_whitespace = True, index_col = 0)
mags.columns = mags.columns.astype('int')
if res is None:
res = pd.read_csv(os.path.join(sn.lc_dir, 'sim_{}_recmags.dat'.format(sn.calmethod)), delim_whitespace = True, index_col = 0)
res.columns = res.columns.astype('int')
# drop any specied injected stars
mags = mags.drop(drop_inj, axis = 1)
res = res.drop(drop_inj, axis = 1)
# compute result metrics
residuals = mags.loc[sn.wIndex] - res.loc[sn.wIndex]
r = pd.concat([sn.image_list.loc[sn.wIndex], res.mean(axis = 1), res.median(axis = 1), res.std(axis = 1), residuals.mean(axis = 1)], axis = 1)
r.columns = ('imagename', 'sim_mean_mag', 'sim_med_mag', 'sim_std_mag', 'mean_residual')
with open(os.path.join(sn.lc_dir, 'sim_{}_results.dat'.format(sn.calmethod)), 'w') as f:
f.write(r.to_string(index = False))
with open(os.path.join(sn.lc_dir, 'sim_{}_summary.dat'.format(sn.calmethod)), 'w') as f:
f.write(r.describe().round(3).to_string())
with open(os.path.join(sn.lc_dir, 'sim_{}_rec_mean_mags.dat'.format(sn.calmethod)), 'w') as f:
f.write(res.mean(axis = 0).round(3).to_string())
r['imagename'] = r['imagename'].str.replace(self.error_dir, self.data_dir)
# do all light curves (with full uncertainty as quadrature sum of three sources)
all_nat = []
all_std = []
columns = (';; MJD', 'etburst', 'mag', '-emag', '+emag', 'limmag', 'filter', 'imagename')
ps_choice = photsub
self.log.info('updating LC errors')
for ct in tqdm(self.color_terms_used.keys()):
# generate raw light curves
lc = pd.read_csv(self._lc_fname(ct, sn.calmethod, 'raw', sub = ps_choice), delim_whitespace = True, comment = ';', names = columns)
tmp = pd.merge(lc, r, on = 'imagename', how = 'left')
orig_stat_err = (tmp['+emag'] - tmp['-emag'])/2
new_err = np.sqrt(orig_stat_err**2 + tmp['sim_std_mag']**2)
tmp['-emag'] = round(tmp['mag'] - new_err, 5)
tmp['+emag'] = round(tmp['mag'] + new_err, 5)
lc_raw_name = sn._lc_fname(ct, sn.calmethod, 'raw', sub = ps_choice)
tmp.drop(['sim_mean_mag', 'sim_med_mag', 'sim_std_mag', 'mean_residual'], axis = 'columns').to_csv(lc_raw_name, sep = '\t', columns = columns,
index = False, na_rep = 'NaN')
p = LPPu.plotLC(lc_file = lc_raw_name, name = self.targetname, photmethod = self.calmethod)
p.plot_lc(extensions = ['.ps', '.png'])
# generate remaining light curves
group_succ, standard_succ = self.raw2standard_lc(lc_raw_name)
if group_succ is True:
all_nat.append((ct, sn._lc_fname(ct, sn.calmethod, 'group', sub = ps_choice)))
if standard_succ is True:
all_std.append(sn._lc_fname(ct, sn.calmethod, 'standard', sub = ps_choice))
# make "all" light curves
lc_nat = sn._lc_fname('all', sn.calmethod, 'group', sub = ps_choice)
concat_list = []
for row in all_nat:
tmp = pd.read_csv(row[1], delim_whitespace = True)
tmp.insert(3, 'SYSTEM', row[0])
concat_list.append(tmp)
if len(concat_list) > 0:
pd.concat(concat_list, sort = False).to_csv(lc_nat, sep = '\t', na_rep = 'NaN', index = False, float_format = '%6.3f')
p = LPPu.plotLC(lc_file = lc_nat, name = self.targetname, photmethod = self.calmethod)
p.plot_lc(extensions = ['.ps', '.png'])
lc = sn._lc_fname('all', self.calmethod, 'standard', sub = ps_choice)
concat_list = []
for fl in all_std:
concat_list.append(pd.read_csv(fl, delim_whitespace = True))
if len(concat_list) > 0:
pd.concat(concat_list, sort = False).to_csv(lc, sep = '\t', na_rep = 'NaN', index = False, float_format = '%6.3f')
p = LPPu.plotLC(lc_file = lc, name = self.targetname, photmethod = self.calmethod)
p.plot_lc(extensions = ['.ps', '.png'])
def write_summary(self):
'''write summary file'''
# get filters used
self.filters = set(self.phot_instances.loc[self.wIndex].apply(lambda img: img.filter.upper()))
ctu = self.color_terms_used
if ctu is not None:
ctu = ', '.join(ctu.keys())
stars = self.cal_IDs
if stars != 'all':
stars = ', '.join(self.cal_IDs.astype(str))
self.summary_file = self.targetname + '.summary'
with open(self.summary_file, 'w') as f:
f.write('{:<25}{}\n'.format('targetname', self.targetname))
f.write('{:<25}{}\n'.format('photsub', self.photsub))
f.write('{:<25}{}\n'.format('filters', ', '.join(self.filters)))
f.write('{:<25}{}\n'.format('apertures', ', '.join(self.photmethod)))
f.write('{:<25}{}\n'.format('calmethod', self.calmethod))
f.write('{:<25}{}\n'.format('color_terms', ctu))
f.write('{:<25}{}\n'.format('num images', len(self.phot_instances)))
f.write('{:<25}{}\n'.format('num failures', len(self.aIndex) - len(self.wIndex)))
f.write('{:<25}{}\n'.format('num non-sup. filt.', len(self.bfIndex)))
f.write('{:<25}{}\n'.format('num excl. by date', len(self.bdIndex)))
f.write('{:<25}{}\n'.format('num phot failures', len(self.pfIndex)))
f.write('{:<25}{}\n'.format('num cal failures', len(self.cfIndex)))
f.write('{:<25}{}\n'.format('num no obj', len(self.noIndex)))
f.write('{:<25}{}\n'.format('num manually removed', len(self.mrIndex)))
f.write('{:<25}{}\n'.format('cal source', self.cal_source))
f.write('{:<25}{}\n'.format('cal stars', stars))
f.write('{:<25}{}\n'.format('cal tolerance', round(self.cal_diff_tol, 2)))
f.write('{:<25}{}\n'.format('run successful', self.run_success))
self.log.info('pipeline complete, summary file written')
self.save()
def get_host_photometry(self, tel = 'nickel'):
'''do photometry of the host galaxy'''
# instantiate pipeline instance and inherit many parent attributes
sn = LPP(self.targetname, interactive = False, parallel = False, cal_diff_tol = self.cal_diff_tol, force_color_term = self.force_color_term,
wdir = self.wdir, cal_use_common_ref_stars = self.cal_use_common_ref_stars, autoloadsave = False, sep_tol = self.sep_tol)
# setup
sn.radec = self.radec
sn.image_list = pd.Series([self.template_images['{}_{}'.format(filt, tel)] for filt in 'B V R I'.split(' ')])
sn.phot_instances = sn._im2inst(sn.image_list, mode = 'quiet')
sn.wIndex = sn.image_list.index
sn.cal_arrays = self.cal_arrays
sn.cal_IDs = self.cal_IDs
# do photometry
sn.photsub = False
sn.do_photometry_all_image(forcesky = True)
sn.get_sky_all_image()
sn.calibrate(final_pass = True)
sn.get_zeromag_all_image()
sn.get_limmag_all_image()
sn.lc_dir = 'host_photometry'
sn.lc_base = os.path.join(sn.lc_dir, 'lightcurve_{}_host_'.format(sn.targetname))
sn.lc_ext = {'raw': '_natural_raw.dat',
'bin': '_natural_bin.dat',
'group': '_natural_group.dat',
'standard': '_standard.dat',
'ul': '_natural_ul.dat'}
sn.generate_lc()
###################################################################################################
# Utility Methods
###################################################################################################
def manual_remove(self, id, save_img = True):
'''manually remove an index (or list of indices) from consideration'''
if type(id) is int:
id = [id]
id = pd.Index(id)
self.mrIndex = self.mrIndex.append(id)
self.wIndex = self.wIndex.drop(id)
if save_img:
for img_id in id:
self._display_refstars(imname = self.image_list.loc[img_id], imidx = img_id)
def process_new_images(self, new_image_file = None, new_image_list = []):
'''processes images obtained after initial processing'''
self.log.info('processing new images')
# read in new images to list
if (new_image_file is not None) and (new_image_list == []):
new_image_list = pd.read_csv(new_image_file, header = None, delim_whitespace = True,
comment = '#', squeeze = True)
elif new_image_list != []:
new_image_list = pd.Series(new_image_list)
# remove any images from new list that have already been processed
new_image_list = new_image_list[~new_image_list.isin(self.image_list)]
offset = self.aIndex[-1] + 1
tmp = self.wIndex
self.wIndex = pd.RangeIndex(start = offset, stop = offset + len(new_image_list))
# only proceed if any images remain
if len(new_image_list) == 0:
self.log.warn('all images in new image list have already been processed, exiting')
return
# update image list to include everything, and update phot_instances
self.log.info('loading new images')
self.image_list = self.image_list.append(new_image_list, ignore_index = True)
self.phot_instances = self.phot_instances.append(self._im2inst(new_image_list), ignore_index = True)
# perform galaxy_subtraction and photometry on new images
self.do_galaxy_subtraction_all_image()
self.do_photometry_all_image()
self.get_sky_all_image()
# perform calibration
full_cal = False
if self.interactive:
resp = input('\nperform full re-calibration? (y/[n]) > ')
if 'y' in resp.lower():
full_cal = True
if full_cal:
self.current_step = self.steps.index(self.do_calibration)
else:
self.calibrate(final_pass = True)
self.get_zeromag_all_image()
self.get_limmag_all_image()
self.current_step = self.steps.index(self.generate_lc)
# run program after calibration has been completed (on all images)
self.aIndex = self.aIndex.append(self.wIndex)
self.wIndex = tmp.append(self.wIndex)
self.run()
# add to original image file and remove new file
if new_image_file is not None:
ow = True
if self.interactive:
resp = input('\nadd {} to {} and then remove {}? (y/[n]) > '.format(new_image_file, self.photlistfile, new_image_file))
if 'y' not in resp.lower():
ow = False
if ow:
os.system('cat {} >> {}'.format(new_image_file, self.photlistfile))
os.system('rm {}'.format(new_image_file))
self.log.info('new images processed')
def load_templates(self):
'''search templates dir, setup, and convert formats as needed'''
succ = True
self.template_images = {'{}_{}'.format(f, tel): None for f in ['B', 'V', 'R', 'I'] for tel in ['kait', 'nickel']}
self.template_images['CLEAR_kait'] = None # no clear for Nickel
if os.path.exists(self.templates_dir) is False:
succ = False
msg = 'no templates directory, cannot do photsub'
else:
templates = glob.glob('{}/*c.fit'.format(self.templates_dir))
if len(templates) == 0:
msg = 'no templates available'
succ = False
if succ is True:
if len(templates) < 5: # 5 passbands
# warn if not enough templates found (but may be ok if not all needed)
msg = 'warning: did not find templates for every passband'
for templ in templates:
ti = FitsInfo(templ)
filt = ti.filter.upper()
if (ti.telescope.lower() == 'nickel') and (filt != 'CLEAR') and ('n2k_c.fit' not in templ):
self.template_images['{}_nickel'.format(filt)] = ti.cimg
# also rebin for kait
self.template_images['{}_kait'.format(filt)] = ti.cimg.replace('c.fit', 'n2k_c.fit')
idl_cmd = '''idl -e "lpp_rebin_nickel2kait, '{}', SAVEFILE='{}'"'''.format(ti.cimg, self.template_images['{}_kait'.format(filt)])
stdout, stderr = LPPu.idl(idl_cmd)
self._log_idl(idl_cmd, stdout, stderr)
if not os.path.exists(self.template_images['{}_kait'.format(filt)]):
succ = False
msg = 'rebinning of templates from nickel to kait failed, cannot do photsub'
elif (ti.telescope.lower() == 'kait') and (filt == 'CLEAR') and ('n2k_c.fit' not in templ):
self.template_images['CLEAR_kait'] = ti.cimg
elif 'n2k_c.fit' in templ:
pass
else:
succ = False
msg = 'either BVRI templates are not from Nickel or CLEAR template is not from KAIT, cannnot do photsub'
break
if succ is True:
self.log.info('templates loaded')
return
# otherwise process is not a success, search for candidates but proceed without photsub
self.log.warning(msg)
self.log.warning('switching to non-subtraction mode, but searching for template candidates')
self.template_images = None
self.photsub = False
self._get_template_candidates()
def get_cal_info(self):
'''checks for existence of calibration files and writes them if found'''
calfile = 'cal_{}_PS1.dat'.format(self.targetname)
if os.path.exists(os.path.join(self.calibration_dir, calfile)):
self.calfile = calfile
self.cal_source = 'PS1'
elif os.path.exists(os.path.join(self.calibration_dir, calfile.replace('PS1', 'SDSS'))):
self.calfile = calfile.replace('PS1', 'SDSS')
self.cal_source = 'SDSS'
elif os.path.exists(os.path.join(self.calibration_dir, calfile.replace('PS1', 'APASS'))):
self.calfile = calfile.replace('PS1', 'APASS')
self.cal_source = 'APASS'
self.calfile_use = self.calfile.replace('.dat', '_use.dat')
def cut_lc_points(self, lc_file, regenerate = False):
'''interactively cut points from each band in input lc file'''
if ('_all_' in lc_file) and ('_raw' in lc_file):
self.cut_raw_all_lc_points(lc_file)
return
self.log.info('interactively cutting points from light curve file')
self.log.info('working on {}'.format(lc_file))
p = LPPu.plotLC(lc_file = lc_file)
cut_images = p.plot_lc(icut = True)
if cut_images is not None:
self.manual_remove(self.aIndex[self.image_list.isin(cut_images)])
del p
p = LPPu.plotLC(lc_file = lc_file)
p.plot_lc(extensions = ['.ps', '.png'])
if regenerate is True:
return self.raw2standard_lc(lc_file)
def cut_raw_all_lc_points(self, infile):
'''given "all" raw filename (need not exist), do cutting on relevant raw files and regenerate "all" files'''
# assign convenience variables
tmp = infile.split('_')
m = tmp[tmp.index('natural') - 1] # get phot aperture
groupfile = infile.replace('raw', 'group')#.replace('.dat', '_cut.dat')
lc = groupfile.replace('natural_group', 'standard')
all_nat = []
all_std = []
for ct in self.color_terms.keys():
raw = infile.replace('all', ct)
if os.path.exists(raw):
group_succ, std_succ = self.cut_lc_points(raw, regenerate = True)
if group_succ:
all_nat.append((ct, groupfile.replace('all', ct)))
if std_succ:
all_std.append(lc.replace('all', ct))
concat_list = []
for row in all_nat:
tmp = pd.read_csv(row[1], delim_whitespace = True)
tmp.insert(3, 'SYSTEM', row[0])
concat_list.append(tmp)
if len(concat_list) > 0:
pd.concat(concat_list, sort = False).to_csv(groupfile, sep = '\t', na_rep = 'NaN', index = False, float_format = '%6.3f')
p = LPPu.plotLC(lc_file = groupfile, name = self.targetname, photmethod = m)
p.plot_lc(extensions = ['.ps', '.png'])
concat_list = []
for fl in all_std:
if os.path.exists(fl):
concat_list.append(pd.read_csv(fl, delim_whitespace = True))
if len(concat_list) > 0:
pd.concat(concat_list, sort = False).to_csv(lc, sep = '\t', na_rep = 'NaN', index = False, float_format = '%6.3f')
p = LPPu.plotLC(lc_file = lc, name = self.targetname, photmethod = m)
p.plot_lc(extensions = ['.ps', '.png'])
def plot_lc(self, lc_list):
'''plots each light curve from the input list'''
for fl in lc_list:
self.log.info('plotting {}'.format(fl))
p = LPPu.plotLC(lc_file = fl)
p.plot_lc(extensions = ['.ps', '.png'])
def _ct2cf(self, color_term, use = False):
'''return "calfit" filename associated with input color term'''
base = self.calfile.split('.')[0]
if color_term != 'Landolt':
cal_nat_fit = base + '_{}_natural.fit'.format(color_term)
else:
cal_nat_fit = base + '_Landolt_standard.fit'
if use is False:
return cal_nat_fit
else:
return cal_nat_fit.replace('_{}_'.format(self.cal_source), '_{}_use_'.format(self.cal_source))
def _im2inst(self, image_list, mode = 'progress'):
'''create a series of Phot instances from input image list (also a series)'''
# hide astropy warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', AstropyWarning)
if mode != 'quiet':
return image_list.progress_apply(Phot, radec = self.radec, wdir = self.wdir, calmethod = self.calmethod)
else:
return image_list.apply(Phot, radec = self.radec, wdir = self.wdir, calmethod = self.calmethod)
def _lc_fname(self, cterm, pmethod, lc_type, sub = False):
'''return light curve filename'''
if self.lc_base is None:
print('set lc_base first')
return
full_base = self.lc_base + cterm + '_' + pmethod
lc_fname = full_base + self.lc_ext[lc_type]
if sub is True:
lc_fname = lc_fname.replace('.dat', '_sub.dat')
return lc_fname
def _get_template_candidates(self):
'''wrap LPPu function to get template candidates'''
self.log.info('searching for galaxy subtraction template images')
# fall back on first obs date if don't know discovery date
if self.disc_date_mjd is None:
if self.first_obs is None:
self.first_obs = LPPu.get_first_obs_date(self)
dt = self.first_obs
else:
dt = self.disc_date_mjd
result = LPPu.get_template_candidates(self.targetra, self.targetdec, dt, self.templates_dir)
self.log.info(result)
def _reset_cal(self, reusecal_IDs = False):
'''resets calibration to initial state, makes copy to revert'''
self.cal_IDs_bak = self.cal_IDs.copy()
self.mrIndex_bak = self.mrIndex.copy()
self.wIndex_bak = self.wIndex.copy()
if not reusecal_IDs:
self.cal_IDs = 'all'
self.wIndex = self.wIndex.append(self.mrIndex)
self.mrIndex = pd.Index([])
def _revert_cal(self):
'''undoes effects of _reset_cal'''
self.cal_IDs = self.cal_IDs_bak.copy()
self.wIndex = self.wIndex_bak.copy()
self.mrIndex = self.mrIndex_bak.copy()
def _log_idl(self, idl_cmd, stdout, stderr):
'''log info regarding external idl calls'''
self.log.debug('output of IDL command: {}'.format(idl_cmd))
self.log.debug('STDOUT----\n{}'.format(stdout))
self.log.debug('STDERR----\n{}'.format(stderr))
def _display_refstars(self, imname = None, imidx = None, icut = False, display = False, save_fig = None,
ax = None, x = None, y = None, labels = None):
'''show (reference) image and plot selected reference stars'''
def onpick(event, cut_list, ref, refp, fig):
'''get index, append appropriate index to cut_list and remove star'''
ind = event.ind[0]
cut_list.append(ref.index.drop(cut_list)[ind])
refp.set_data(ref.loc[ref.index.drop(cut_list), 'x'], ref.loc[ref.index.drop(cut_list), 'y'])
fig.canvas.draw()
# set calibration IDs if necessary
if self.cal_IDs == 'all':
self.cal_IDs = self.cal_use.index
# read needed information from image
if imname is None:
imname = self.refname
with fits.open(imname) as f:
im = f[0].data
head = f[0].header
# find pixel locations of sn, reference stars, and radec stars
cs = WCS(header = head)
sn_x, sn_y = cs.all_world2pix(self.targetra, self.targetdec, 0)
ref_x, ref_y = cs.all_world2pix(self.cal_use.loc[self.cal_IDs, 'ra'], self.cal_use.loc[self.cal_IDs, 'dec'], 0)
ref = pd.DataFrame({'x': ref_x, 'y': ref_y}, index = self.cal_IDs)
rd_x, rd_y = cs.all_world2pix(self.radec.loc[1:, 'RA'], self.radec.loc[1:, 'DEC'], 0)
# plot (including interactive step if requested)
if ax is None:
fig, ax = plt.subplots(figsize = (8, 8))
z = ZScaleInterval()
zlim = z.get_limits(im.data)
ax.imshow(-1*im, cmap = 'gray', vmin = -1*zlim[1], vmax = -1*zlim[0])
ax.plot(sn_x, sn_y, 'mD', markersize = 15, mfc = 'none', mew = 2)
if (x is not None) and (y is not None):
ax.plot(x, y, 'bs', markersize = 15, mfc = 'none', mew = 2)
if labels is not None:
for ii in range(len(x)):
ax.annotate(labels[ii], (x[ii] + 20, y[ii]), color = 'b', size = 12)
else:
ax.plot(rd_x, rd_y, 'bs', markersize = 15, mfc = 'none', mew = 2)
refp, = ax.plot(ref['x'], ref['y'], 'ro', markersize = 15, mfc = 'none', picker = 14, mew = 2)
for idx, row in ref.iterrows():
ax.annotate(idx, (row['x'] + 20*head['NAXIS1']/1024, row['y']), color = 'r', size = 12)
ax.set_xticks(())
ax.set_yticks(())
if icut == True:
cut_list = []
plt.ion()
cid = fig.canvas.mpl_connect('pick_event', lambda event: onpick(event, cut_list, ref, refp, fig))
fig.show()
input('click on circled reference stars to be removed [hit "enter" when done]')
fig.canvas.mpl_disconnect(cid)
plt.ioff()
self.cal_IDs = self.cal_IDs.drop(cut_list)
if display is True:
plt.ion()
plt.show()
elif save_fig is not None:
plt.savefig(save_fig)
else:
if imidx is None:
plt.savefig(os.path.join(self.calibration_dir, 'ref_stars.png'))
else:
plt.savefig(os.path.join(self.calibration_dir, 'cut_img_{}.png'.format(imidx)))
plt.close()
def _display_obs_cal_mags(self, filt, id, sig = 3):
'''show all observed mags of a given calibration star in a given passband'''
# select relevant data
selection = self.calibrators.loc[self.calibrators['Filter'] == filt, :].loc[(self.wIndex, id),['Mag_obs', 'system']]
fig, ax = plt.subplots(1, 1, figsize = (7, 3))
def onpick(event):
'''get index, append appropriate index to cut_list and remove star'''
ind = event.ind[0]
ids = event.artist._x
mags = event.artist._y
print('\nIndex of clicked image mag: {}'.format(int(ids[ind])))
sub = mags[ids != ids[ind]]
print('Without this image: {:.2f} pm {:.2f}'.format(np.median(sub), np.std(sub)))
colors = ('b','r','g','k','m')
cnt = 0
for sys, group in selection.groupby('system', sort = False):
group = group.loc[group['Mag_obs'].notnull(), :]
mags = group.loc[:, 'Mag_obs'].values
ids = group.index.levels[0][group.index.labels[0]]
line, = ax.plot(ids, mags, '{}.'.format(colors[cnt]), label = sys, picker = 5)
ax.plot([ids.min(), ids.max()], [np.mean(mags)]*2, '{}--'.format(colors[cnt]),
label = '${:.2f} \pm {:.2f}$'.format(np.mean(mags), np.std(mags)))
ax.plot([ids.min(), ids.max()], [np.mean(mags) + sig * np.std(mags)]*2, '{}:'.format(colors[cnt]),
label = '${}\sigma$ boundary'.format(sig))
ax.plot([ids.min(), ids.max()], [np.mean(mags) - sig * np.std(mags)]*2, '{}:'.format(colors[cnt]))
cnt += 1
ax.set_title('Filter: {} Cal ID: {}'.format(filt, id))
ax.legend(bbox_to_anchor = (1.01, 0.5), loc = 'center left')
plt.tight_layout()
cid = fig.canvas.mpl_connect('pick_event', lambda event: onpick(event))
plt.show()
fig.canvas.mpl_disconnect(cid)
def compare_image2ref(self, idx):
'''plot ref image and selected image side by side'''
fig = plt.figure(figsize = (12, 6))
ref = Phot(self.refname)
wcs1 = WCS(header = ref.header)
ax1 = fig.add_subplot(1, 2, 1, projection = wcs1)
self._display_refstars(ax = ax1)
wcs2 = WCS(header = self.phot_instances.loc[idx].header)
ax2 = fig.add_subplot(1, 2, 2, projection = wcs2)
self.phot_instances.loc[idx].display_image(ax = ax2, display = False)
fig.show()
# provide script functionality via
# python LPP.py name
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('name', type = str, help = 'name of the object')
parser.add_argument('-a', '--add-images(s)', dest = 'new', type = str, default = False,
help = 'new image(s) or photlist to process')
parser.add_argument('-i', '--interactive', dest = 'interactive', action = 'store_const',
const = True, default = False, help = 'run in interactive mode')
parser.add_argument('-ct', '--force-color-term', dest = 'force_color_term', type = str,
default = False, help = 'force to use specified color term')
parser.add_argument('-dd', '--disc-date-mjd', dest = 'disc_date_mjd', type = float,
default = None, help = 'mjd of discovery')
parser.add_argument('-c', '--cut-lc-points', dest = 'lc_file', type = str,
default = None, help = 'light curve file to cut points from')
parser.add_argument('-cr', '--cut-raw-lc-points-and-regenerate', dest = 'raw_lc_file', type = str,
default = None, help = 'light curve file to cut points from')
args = parser.parse_args()
pipeline = LPP(args.name, interactive = args.interactive, force_color_term = args.force_color_term)
pipeline.disc_date_mjd = args.disc_date_mjd
if (args.new is False) and (args.lc_file is None) and (args.raw_lc_file is None):
pipeline.run()
elif (args.new is False) and (args.lc_file is not None):
pipeline.cut_lc_points(args.lc_file)
elif (args.new is False) and (args.raw_lc_file is not None):
pipeline.load()
pipeline.cut_lc_points(args.raw_lc_file, regenerate = True)
pipeline.save()
pipeline.write_summary()
else:
pipeline.load() # load from sav file
if '_c.fit' in args.new:
new_images = [fl.strip() for fl in args.new.replace(',', ' ').split(' ')]
pipeline.process_new_images(new_image_list = new_images)
else: # otherwise it is a photlist
pipeline.process_new_images(new_image_file = args.new)
| [
"logging.getLogger",
"logging.StreamHandler",
"numpy.sqrt",
"pandas.read_csv",
"pandas.Index",
"numpy.array",
"numpy.arctan2",
"astropy.io.fits.open",
"copy.deepcopy",
"numpy.sin",
"astropy.coordinates.match_coordinates_sky",
"LOSSPhotPypeline.utils.astroCatalog",
"LOSSPhotPypeline.utils.idl",
"os.path.exists",
"numpy.mean",
"LOSSPhotPypeline.image.FitsInfo",
"argparse.ArgumentParser",
"subprocess.Popen",
"inspect.getfile",
"matplotlib.pyplot.close",
"numpy.linspace",
"os.path.isdir",
"logging.FileHandler",
"matplotlib.pyplot.subplots",
"LOSSPhotPypeline.utils.genconf",
"numpy.random.seed",
"pandas.DataFrame",
"warnings.simplefilter",
"numpy.isinf",
"LOSSPhotPypeline.utils.get_first_obs_date",
"numpy.random.normal",
"numpy.abs",
"matplotlib.pyplot.savefig",
"pandas.merge",
"pickle.load",
"matplotlib.pyplot.ioff",
"itertools.zip_longest",
"os.path.dirname",
"LOSSPhotPypeline.image.Phot",
"numpy.isnan",
"numpy.cos",
"numpy.std",
"matplotlib.pyplot.ion",
"tqdm.tqdm.pandas",
"astropy.visualization.ZScaleInterval",
"matplotlib.pyplot.show",
"pandas.Series",
"numpy.median",
"pickle.dump",
"os.makedirs",
"logging.Formatter",
"tqdm.tqdm",
"os.path.join",
"astropy.coordinates.SkyCoord",
"warnings.catch_warnings",
"matplotlib.pyplot.figure",
"LOSSPhotPypeline.utils.plotLC",
"os.path.basename",
"LOSSPhotPypeline.utils.get_template_candidates",
"matplotlib.pyplot.tight_layout",
"os.path.abspath",
"pandas.concat",
"astropy.wcs.WCS"
] | [((526, 573), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'AstropyWarning'], {}), "('ignore', AstropyWarning)\n", (547, 573), False, 'import warnings\n'), ((923, 936), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (934, 936), False, 'from tqdm import tqdm\n'), ((94550, 94575), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (94573, 94575), False, 'import argparse\n'), ((1556, 1577), 'os.path.abspath', 'os.path.abspath', (['wdir'], {}), '(wdir)\n', (1571, 1577), False, 'import os\n'), ((4601, 4613), 'pandas.Index', 'pd.Index', (['[]'], {}), '([])\n', (4609, 4613), True, 'import pandas as pd\n'), ((5055, 5121), 'os.path.join', 'os.path.join', (['self.calibration_dir', "(self.targetname + '_radec.txt')"], {}), "(self.calibration_dir, self.targetname + '_radec.txt')\n", (5067, 5121), False, 'import os\n'), ((6855, 6884), 'os.path.dirname', 'os.path.dirname', (['self.refname'], {}), '(self.refname)\n', (6870, 6884), False, 'import os\n'), ((7663, 7692), 'os.path.exists', 'os.path.exists', (['self.savefile'], {}), '(self.savefile)\n', (7677, 7692), False, 'import os\n'), ((11202, 11239), 'logging.getLogger', 'logging.getLogger', (['"""LOSSPhotPypeline"""'], {}), "('LOSSPhotPypeline')\n", (11219, 11239), False, 'import logging\n'), ((11430, 11463), 'logging.FileHandler', 'logging.FileHandler', (['self.logfile'], {}), '(self.logfile)\n', (11449, 11463), False, 'import logging\n'), ((18223, 18321), 'pandas.read_csv', 'pd.read_csv', (['self.photlistfile'], {'header': 'None', 'delim_whitespace': '(True)', 'comment': '"""#"""', 'squeeze': '(True)'}), "(self.photlistfile, header=None, delim_whitespace=True, comment=\n '#', squeeze=True)\n", (18234, 18321), True, 'import pandas as pd\n'), ((21290, 21320), 'os.path.exists', 'os.path.exists', (['self.radecfile'], {}), '(self.radecfile)\n', (21304, 21320), False, 'import os\n'), ((21864, 21908), 'LOSSPhotPypeline.image.Phot', 'Phot', (['self.refname'], {'calmethod': 'self.calmethod'}), '(self.refname, calmethod=self.calmethod)\n', (21868, 21908), False, 'from LOSSPhotPypeline.image import Phot, FitsInfo, FileNames\n'), ((22107, 22137), 'os.path.join', 'os.path.join', (['sxcp', '"""kait.sex"""'], {}), "(sxcp, 'kait.sex')\n", (22119, 22137), False, 'import os\n'), ((22153, 22193), 'os.path.join', 'os.path.join', (['sxcp', '"""gauss_2.0_5x5.conv"""'], {}), "(sxcp, 'gauss_2.0_5x5.conv')\n", (22165, 22193), False, 'import os\n'), ((22208, 22238), 'os.path.join', 'os.path.join', (['sxcp', '"""kait.par"""'], {}), "(sxcp, 'kait.par')\n", (22220, 22238), False, 'import os\n'), ((22254, 22287), 'os.path.join', 'os.path.join', (['sxcp', '"""default.nnw"""'], {}), "(sxcp, 'default.nnw')\n", (22266, 22287), False, 'import os\n'), ((22604, 22707), 'subprocess.Popen', 'subprocess.Popen', (['cmd_list'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True)\n', (22620, 22707), False, 'import subprocess\n'), ((23408, 23430), 'astropy.wcs.WCS', 'WCS', ([], {'header': 'ref.header'}), '(header=ref.header)\n', (23411, 23430), False, 'from astropy.wcs import WCS\n'), ((23584, 23632), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['imagera', 'imagedec'], {'unit': '(u.deg, u.deg)'}), '(imagera, imagedec, unit=(u.deg, u.deg))\n', (23592, 23632), False, 'from astropy.coordinates import SkyCoord, match_coordinates_sky\n'), ((23659, 23719), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['self.targetra', 'self.targetdec'], {'unit': '(u.deg, u.deg)'}), '(self.targetra, self.targetdec, unit=(u.deg, u.deg))\n', (23667, 23719), False, 'from astropy.coordinates import SkyCoord, match_coordinates_sky\n'), ((24411, 24512), 'pandas.read_csv', 'pd.read_csv', (['self.radecfile'], {'delim_whitespace': '(True)', 'skiprows': '(0, 1, 3, 4, 5)', 'names': "['RA', 'DEC']"}), "(self.radecfile, delim_whitespace=True, skiprows=(0, 1, 3, 4, 5),\n names=['RA', 'DEC'])\n", (24422, 24512), True, 'import pandas as pd\n'), ((25353, 25439), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["self.radec.loc[1:, 'RA']", "self.radec.loc[1:, 'DEC']"], {'unit': '(u.deg, u.deg)'}), "(self.radec.loc[1:, 'RA'], self.radec.loc[1:, 'DEC'], unit=(u.deg,\n u.deg))\n", (25361, 25439), False, 'from astropy.coordinates import SkyCoord, match_coordinates_sky\n'), ((25557, 25631), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["cal_cat.loc[:, 'ra']", "cal_cat.loc[:, 'dec']"], {'unit': '(u.deg, u.deg)'}), "(cal_cat.loc[:, 'ra'], cal_cat.loc[:, 'dec'], unit=(u.deg, u.deg))\n", (25565, 25631), False, 'from astropy.coordinates import SkyCoord, match_coordinates_sky\n'), ((25658, 25691), 'astropy.coordinates.match_coordinates_sky', 'match_coordinates_sky', (['cal', 'radec'], {}), '(cal, radec)\n', (25679, 25691), False, 'from astropy.coordinates import SkyCoord, match_coordinates_sky\n'), ((26333, 26438), 'LOSSPhotPypeline.utils.astroCatalog', 'LPPu.astroCatalog', (['self.targetname', 'self.targetra', 'self.targetdec'], {'relative_path': 'self.calibration_dir'}), '(self.targetname, self.targetra, self.targetdec,\n relative_path=self.calibration_dir)\n', (26350, 26438), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((27954, 27999), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': "['success', 'log']"}), "(res, columns=['success', 'log'])\n", (27966, 27999), True, 'import pandas as pd\n'), ((29066, 29116), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': "['unsub', 'sub', 'log']"}), "(res, columns=['unsub', 'sub', 'log'])\n", (29078, 29116), True, 'import pandas as pd\n'), ((32653, 32726), 'pandas.concat', 'pd.concat', (['[df.loc[self.cal_IDs, :] for df in cal_list]'], {'keys': 'self.wIndex'}), '([df.loc[self.cal_IDs, :] for df in cal_list], keys=self.wIndex)\n', (32662, 32726), True, 'import pandas as pd\n'), ((49450, 49555), 'LOSSPhotPypeline.utils.astroCatalog', 'LPPu.astroCatalog', (['self.targetname', 'self.targetra', 'self.targetdec'], {'relative_path': 'self.calibration_dir'}), '(self.targetname, self.targetra, self.targetdec,\n relative_path=self.calibration_dir)\n', (49467, 49555), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((49714, 49724), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (49722, 49724), True, 'import matplotlib.pyplot as plt\n'), ((53999, 54016), 'LOSSPhotPypeline.utils.idl', 'LPPu.idl', (['idl_cmd'], {}), '(idl_cmd)\n', (54007, 54016), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((54303, 54320), 'LOSSPhotPypeline.utils.idl', 'LPPu.idl', (['idl_cmd'], {}), '(idl_cmd)\n', (54311, 54320), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((54802, 54819), 'LOSSPhotPypeline.utils.idl', 'LPPu.idl', (['idl_cmd'], {}), '(idl_cmd)\n', (54810, 54819), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((55953, 56012), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'lc', 'name': 'self.targetname', 'photmethod': 'm'}), '(lc_file=lc, name=self.targetname, photmethod=m)\n', (55964, 56012), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((57131, 57152), 'tqdm.tqdm', 'tqdm', (['self.photmethod'], {}), '(self.photmethod)\n', (57135, 57152), False, 'from tqdm import tqdm\n'), ((66553, 66587), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'index': 'sn.wIndex'}), '(res, index=sn.wIndex)\n', (66565, 66587), True, 'import pandas as pd\n'), ((66720, 66757), 'pandas.DataFrame', 'pd.DataFrame', (['mags'], {'index': 'self.wIndex'}), '(mags, index=self.wIndex)\n', (66732, 66757), True, 'import pandas as pd\n'), ((76236, 76248), 'pandas.Index', 'pd.Index', (['id'], {}), '(id)\n', (76244, 76248), True, 'import pandas as pd\n'), ((83212, 83240), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'lc_file'}), '(lc_file=lc_file)\n', (83223, 83240), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((83426, 83454), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'lc_file'}), '(lc_file=lc_file)\n', (83437, 83454), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((87663, 87751), 'LOSSPhotPypeline.utils.get_template_candidates', 'LPPu.get_template_candidates', (['self.targetra', 'self.targetdec', 'dt', 'self.templates_dir'], {}), '(self.targetra, self.targetdec, dt, self.\n templates_dir)\n', (87691, 87751), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((88177, 88189), 'pandas.Index', 'pd.Index', (['[]'], {}), '([])\n', (88185, 88189), True, 'import pandas as pd\n'), ((89706, 89722), 'astropy.wcs.WCS', 'WCS', ([], {'header': 'head'}), '(header=head)\n', (89709, 89722), False, 'from astropy.wcs import WCS\n'), ((89931, 89989), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': ref_x, 'y': ref_y}"], {'index': 'self.cal_IDs'}), "({'x': ref_x, 'y': ref_y}, index=self.cal_IDs)\n", (89943, 89989), True, 'import pandas as pd\n'), ((90232, 90248), 'astropy.visualization.ZScaleInterval', 'ZScaleInterval', ([], {}), '()\n', (90246, 90248), False, 'from astropy.visualization import ZScaleInterval\n'), ((92264, 92298), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 3)'}), '(1, 1, figsize=(7, 3))\n', (92276, 92298), True, 'import matplotlib.pyplot as plt\n'), ((93739, 93757), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (93755, 93757), True, 'import matplotlib.pyplot as plt\n'), ((93846, 93856), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (93854, 93856), True, 'import matplotlib.pyplot as plt\n'), ((94011, 94038), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (94021, 94038), True, 'import matplotlib.pyplot as plt\n'), ((94055, 94073), 'LOSSPhotPypeline.image.Phot', 'Phot', (['self.refname'], {}), '(self.refname)\n', (94059, 94073), False, 'from LOSSPhotPypeline.image import Phot, FitsInfo, FileNames\n'), ((94089, 94111), 'astropy.wcs.WCS', 'WCS', ([], {'header': 'ref.header'}), '(header=ref.header)\n', (94092, 94111), False, 'from astropy.wcs import WCS\n'), ((94228, 94275), 'astropy.wcs.WCS', 'WCS', ([], {'header': 'self.phot_instances.loc[idx].header'}), '(header=self.phot_instances.loc[idx].header)\n', (94231, 94275), False, 'from astropy.wcs import WCS\n'), ((3185, 3217), 'os.path.exists', 'os.path.exists', (['self.config_file'], {}), '(self.config_file)\n', (3199, 3217), False, 'import os\n'), ((3373, 3461), 'LOSSPhotPypeline.utils.genconf', 'LPPu.genconf', ([], {'targetname': 'self.targetname', 'config_file': "(self.config_file + '.template')"}), "(targetname=self.targetname, config_file=self.config_file +\n '.template')\n", (3385, 3461), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((4947, 4982), 'os.path.isdir', 'os.path.isdir', (['self.calibration_dir'], {}), '(self.calibration_dir)\n', (4960, 4982), False, 'import os\n'), ((4996, 5029), 'os.makedirs', 'os.makedirs', (['self.calibration_dir'], {}), '(self.calibration_dir)\n', (5007, 5029), False, 'import os\n'), ((11488, 11582), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s in %(funcName)s with level %(levelname)s ::: %(message)s"""'], {}), "(\n '%(asctime)s in %(funcName)s with level %(levelname)s ::: %(message)s')\n", (11505, 11582), False, 'import logging\n'), ((11730, 11753), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (11751, 11753), False, 'import logging\n'), ((15528, 15543), 'pickle.dump', 'pkl.dump', (['vs', 'f'], {}), '(vs, f)\n', (15536, 15543), True, 'import pickle as pkl\n'), ((15842, 15853), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (15850, 15853), True, 'import pickle as pkl\n'), ((21415, 21516), 'pandas.read_csv', 'pd.read_csv', (['self.radecfile'], {'delim_whitespace': '(True)', 'skiprows': '(0, 1, 3, 4, 5)', 'names': "['RA', 'DEC']"}), "(self.radecfile, delim_whitespace=True, skiprows=(0, 1, 3, 4, 5),\n names=['RA', 'DEC'])\n", (21426, 21516), True, 'import pandas as pd\n'), ((22867, 22891), 'os.path.exists', 'os.path.exists', (['ref.sobj'], {}), '(ref.sobj)\n', (22881, 22891), False, 'import os\n'), ((23094, 23113), 'astropy.io.fits.open', 'fits.open', (['ref.sobj'], {}), '(ref.sobj)\n', (23103, 23113), False, 'from astropy.io import fits\n'), ((24911, 25016), 'LOSSPhotPypeline.utils.astroCatalog', 'LPPu.astroCatalog', (['self.targetname', 'self.targetra', 'self.targetdec'], {'relative_path': 'self.calibration_dir'}), '(self.targetname, self.targetra, self.targetdec,\n relative_path=self.calibration_dir)\n', (24928, 25016), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((25468, 25516), 'os.path.join', 'os.path.join', (['self.calibration_dir', 'self.calfile'], {}), '(self.calibration_dir, self.calfile)\n', (25480, 25516), False, 'import os\n'), ((31915, 31990), 'numpy.abs', 'np.abs', (["(phot.loc[self.cal_IDs, 'RA_obs'] - phot.loc[self.cal_IDs, 'RA_cal'])"], {}), "(phot.loc[self.cal_IDs, 'RA_obs'] - phot.loc[self.cal_IDs, 'RA_cal'])\n", (31921, 31990), True, 'import numpy as np\n'), ((32040, 32117), 'numpy.abs', 'np.abs', (["(phot.loc[self.cal_IDs, 'DEC_obs'] - phot.loc[self.cal_IDs, 'DEC_cal'])"], {}), "(phot.loc[self.cal_IDs, 'DEC_obs'] - phot.loc[self.cal_IDs, 'DEC_cal'])\n", (32046, 32117), True, 'import numpy as np\n'), ((32925, 32947), 'pandas.Index', 'pd.Index', (['self.cfIndex'], {}), '(self.cfIndex)\n', (32933, 32947), True, 'import pandas as pd\n'), ((32976, 32999), 'pandas.Index', 'pd.Index', (['self.csfIndex'], {}), '(self.csfIndex)\n', (32984, 32999), True, 'import pandas as pd\n'), ((50582, 50599), 'copy.deepcopy', 'copy.deepcopy', (['lc'], {}), '(lc)\n', (50595, 50599), False, 'import copy\n'), ((50723, 50740), 'copy.deepcopy', 'copy.deepcopy', (['lm'], {}), '(lm)\n', (50736, 50740), False, 'import copy\n'), ((51722, 51823), 'pandas.read_csv', 'pd.read_csv', (['dat'], {'header': 'None', 'delim_whitespace': '(True)', 'comment': '""";"""', 'usecols': 'cols', 'names': 'col_names'}), "(dat, header=None, delim_whitespace=True, comment=';', usecols=\n cols, names=col_names)\n", (51733, 51823), True, 'import pandas as pd\n'), ((53243, 53263), 'pandas.DataFrame', 'pd.DataFrame', (['lcs[m]'], {}), '(lcs[m])\n', (53255, 53263), True, 'import pandas as pd\n'), ((53468, 53488), 'pandas.DataFrame', 'pd.DataFrame', (['lms[m]'], {}), '(lms[m])\n', (53480, 53488), True, 'import pandas as pd\n'), ((53606, 53699), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'lc_raw_name', 'lm_file': 'lm_raw_name', 'name': 'self.targetname', 'photmethod': 'm'}), '(lc_file=lc_raw_name, lm_file=lm_raw_name, name=self.targetname,\n photmethod=m)\n', (53617, 53699), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((54882, 54905), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (54896, 54905), False, 'import os\n'), ((56613, 56639), 'os.path.isdir', 'os.path.isdir', (['self.lc_dir'], {}), '(self.lc_dir)\n', (56626, 56639), False, 'import os\n'), ((56653, 56677), 'os.makedirs', 'os.makedirs', (['self.lc_dir'], {}), '(self.lc_dir)\n', (56664, 56677), False, 'import os\n'), ((59460, 59481), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (59474, 59481), True, 'import numpy as np\n'), ((59546, 59576), 'os.path.exists', 'os.path.exists', (['self.error_dir'], {}), '(self.error_dir)\n', (59560, 59576), False, 'import os\n'), ((59590, 59617), 'os.makedirs', 'os.makedirs', (['self.error_dir'], {}), '(self.error_dir)\n', (59601, 59617), False, 'import os\n'), ((59890, 59912), 'astropy.wcs.WCS', 'WCS', ([], {'header': 'img.header'}), '(header=img.header)\n', (59893, 59912), False, 'from astropy.wcs import WCS\n'), ((63405, 63422), 'LOSSPhotPypeline.utils.idl', 'LPPu.idl', (['idl_cmd'], {}), '(idl_cmd)\n', (63413, 63422), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((64065, 64080), 'pandas.Series', 'pd.Series', (['succ'], {}), '(succ)\n', (64074, 64080), True, 'import pandas as pd\n'), ((65088, 65132), 'LOSSPhotPypeline.image.Phot', 'Phot', (['self.refname'], {'calmethod': 'self.calmethod'}), '(self.refname, calmethod=self.calmethod)\n', (65092, 65132), False, 'from LOSSPhotPypeline.image import Phot, FitsInfo, FileNames\n'), ((65892, 65917), 'os.path.exists', 'os.path.exists', (['sn.lc_dir'], {}), '(sn.lc_dir)\n', (65906, 65917), False, 'import os\n'), ((65931, 65953), 'os.makedirs', 'os.makedirs', (['sn.lc_dir'], {}), '(sn.lc_dir)\n', (65942, 65953), False, 'import os\n'), ((70017, 70060), 'pandas.merge', 'pd.merge', (['lc', 'r'], {'on': '"""imagename"""', 'how': '"""left"""'}), "(lc, r, on='imagename', how='left')\n", (70025, 70060), True, 'import pandas as pd\n'), ((70147, 70200), 'numpy.sqrt', 'np.sqrt', (["(orig_stat_err ** 2 + tmp['sim_std_mag'] ** 2)"], {}), "(orig_stat_err ** 2 + tmp['sim_std_mag'] ** 2)\n", (70154, 70200), True, 'import numpy as np\n'), ((70702, 70788), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'lc_raw_name', 'name': 'self.targetname', 'photmethod': 'self.calmethod'}), '(lc_file=lc_raw_name, name=self.targetname, photmethod=self.\n calmethod)\n', (70713, 70788), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((71405, 71447), 'pandas.read_csv', 'pd.read_csv', (['row[1]'], {'delim_whitespace': '(True)'}), '(row[1], delim_whitespace=True)\n', (71416, 71447), True, 'import pandas as pd\n'), ((71710, 71786), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'lc_nat', 'name': 'self.targetname', 'photmethod': 'self.calmethod'}), '(lc_file=lc_nat, name=self.targetname, photmethod=self.calmethod)\n', (71721, 71786), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((72224, 72296), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'lc', 'name': 'self.targetname', 'photmethod': 'self.calmethod'}), '(lc_file=lc, name=self.targetname, photmethod=self.calmethod)\n', (72235, 72296), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((76810, 76904), 'pandas.read_csv', 'pd.read_csv', (['new_image_file'], {'header': 'None', 'delim_whitespace': '(True)', 'comment': '"""#"""', 'squeeze': '(True)'}), "(new_image_file, header=None, delim_whitespace=True, comment='#',\n squeeze=True)\n", (76821, 76904), True, 'import pandas as pd\n'), ((79666, 79700), 'os.path.exists', 'os.path.exists', (['self.templates_dir'], {}), '(self.templates_dir)\n', (79680, 79700), False, 'import os\n'), ((82240, 82283), 'os.path.join', 'os.path.join', (['self.calibration_dir', 'calfile'], {}), '(self.calibration_dir, calfile)\n', (82252, 82283), False, 'import os\n'), ((84168, 84187), 'os.path.exists', 'os.path.exists', (['raw'], {}), '(raw)\n', (84182, 84187), False, 'import os\n'), ((84531, 84573), 'pandas.read_csv', 'pd.read_csv', (['row[1]'], {'delim_whitespace': '(True)'}), '(row[1], delim_whitespace=True)\n', (84542, 84573), True, 'import pandas as pd\n'), ((84839, 84905), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'groupfile', 'name': 'self.targetname', 'photmethod': 'm'}), '(lc_file=groupfile, name=self.targetname, photmethod=m)\n', (84850, 84905), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((85031, 85049), 'os.path.exists', 'os.path.exists', (['fl'], {}), '(fl)\n', (85045, 85049), False, 'import os\n'), ((85304, 85363), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'lc', 'name': 'self.targetname', 'photmethod': 'm'}), '(lc_file=lc, name=self.targetname, photmethod=m)\n', (85315, 85363), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((85608, 85631), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'fl'}), '(lc_file=fl)\n', (85619, 85631), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((86391, 86416), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (86414, 86416), False, 'import warnings\n'), ((86430, 86477), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'AstropyWarning'], {}), "('ignore', AstropyWarning)\n", (86451, 86477), False, 'import warnings\n'), ((89539, 89556), 'astropy.io.fits.open', 'fits.open', (['imname'], {}), '(imname)\n', (89548, 89556), False, 'from astropy.io import fits\n'), ((90189, 90217), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (90201, 90217), True, 'import matplotlib.pyplot as plt\n'), ((91185, 91194), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (91192, 91194), True, 'import matplotlib.pyplot as plt\n'), ((91475, 91485), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (91483, 91485), True, 'import matplotlib.pyplot as plt\n'), ((91581, 91590), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (91588, 91590), True, 'import matplotlib.pyplot as plt\n'), ((91603, 91613), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (91611, 91613), True, 'import matplotlib.pyplot as plt\n'), ((8867, 8977), 'pandas.read_csv', 'pd.read_csv', (['self.config_file'], {'header': 'None', 'delim_whitespace': '(True)', 'comment': '"""#"""', 'index_col': '(0)', 'squeeze': '(True)'}), "(self.config_file, header=None, delim_whitespace=True, comment=\n '#', index_col=0, squeeze=True)\n", (8878, 8977), True, 'import pandas as pd\n'), ((11822, 11887), 'logging.Formatter', 'logging.Formatter', (["('\\n' + '*' * 60 + '\\n%(message)s\\n' + '*' * 60)"], {}), "('\\n' + '*' * 60 + '\\n%(message)s\\n' + '*' * 60)\n", (11839, 11887), False, 'import logging\n'), ((22025, 22058), 'inspect.getfile', 'inspect.getfile', (['LOSSPhotPypeline'], {}), '(LOSSPhotPypeline)\n', (22040, 22058), False, 'import inspect\n'), ((24790, 24838), 'os.path.join', 'os.path.join', (['self.calibration_dir', 'self.calfile'], {}), '(self.calibration_dir, self.calfile)\n', (24802, 24838), False, 'import os\n'), ((26179, 26231), 'os.path.join', 'os.path.join', (['self.calibration_dir', 'self.calfile_use'], {}), '(self.calibration_dir, self.calfile_use)\n', (26191, 26231), False, 'import os\n'), ((38524, 38553), 'numpy.abs', 'np.abs', (["df.loc[:, 'Mag_diff']"], {}), "(df.loc[:, 'Mag_diff'])\n", (38530, 38553), True, 'import numpy as np\n'), ((40952, 40987), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 6)'}), '(1, 2, figsize=(12, 6))\n', (40964, 40987), True, 'import matplotlib.pyplot as plt\n'), ((41752, 41779), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'offset_scale': '(2)'}), '(offset_scale=2)\n', (41763, 41779), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((42905, 42923), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (42921, 42923), True, 'import matplotlib.pyplot as plt\n'), ((46827, 46837), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (46835, 46837), True, 'import matplotlib.pyplot as plt\n'), ((46854, 46865), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (46863, 46865), True, 'import matplotlib.pyplot as plt\n'), ((48915, 48972), 'os.path.join', 'os.path.join', (['self.calibration_dir', '"""final_ref_stars.dat"""'], {}), "(self.calibration_dir, 'final_ref_stars.dat')\n", (48927, 48972), False, 'import os\n'), ((49275, 49327), 'os.path.join', 'os.path.join', (['self.calibration_dir', 'self.calfile_use'], {}), '(self.calibration_dir, self.calfile_use)\n', (49287, 49327), False, 'import os\n'), ((52463, 52476), 'numpy.isnan', 'np.isnan', (['mag'], {}), '(mag)\n', (52471, 52476), True, 'import numpy as np\n'), ((54415, 54438), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (54429, 54438), False, 'import os\n'), ((57867, 57909), 'pandas.read_csv', 'pd.read_csv', (['row[1]'], {'delim_whitespace': '(True)'}), '(row[1], delim_whitespace=True)\n', (57878, 57909), True, 'import pandas as pd\n'), ((58192, 58255), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'lc_nat', 'name': 'self.targetname', 'photmethod': 'm'}), '(lc_file=lc_nat, name=self.targetname, photmethod=m)\n', (58203, 58255), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((58708, 58767), 'LOSSPhotPypeline.utils.plotLC', 'LPPu.plotLC', ([], {'lc_file': 'lc', 'name': 'self.targetname', 'photmethod': 'm'}), '(lc_file=lc, name=self.targetname, photmethod=m)\n', (58719, 58767), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((60305, 60345), 'numpy.arctan2', 'np.arctan2', (['(sn_y - host_y)', '(sn_x - host_x)'], {}), '(sn_y - host_y, sn_x - host_x)\n', (60315, 60345), True, 'import numpy as np\n'), ((60446, 60520), 'numpy.linspace', 'np.linspace', (['(2 * np.pi / n_stars)', '(2 * np.pi - 2 * np.pi / n_stars)', 'n_stars'], {}), '(2 * np.pi / n_stars, 2 * np.pi - 2 * np.pi / n_stars, n_stars)\n', (60457, 60520), True, 'import numpy as np\n'), ((62674, 62687), 'numpy.isnan', 'np.isnan', (['mag'], {}), '(mag)\n', (62682, 62687), True, 'import numpy as np\n'), ((62693, 62706), 'numpy.isinf', 'np.isinf', (['mag'], {}), '(mag)\n', (62701, 62706), True, 'import numpy as np\n'), ((65300, 65346), 'pandas.DataFrame', 'pd.DataFrame', (["{'RA': fake_ra, 'DEC': fake_dec}"], {}), "({'RA': fake_ra, 'DEC': fake_dec})\n", (65312, 65346), True, 'import pandas as pd\n'), ((67372, 67412), 'os.path.join', 'os.path.join', (['sn.lc_dir', '"""inj_stars.png"""'], {}), "(sn.lc_dir, 'inj_stars.png')\n", (67384, 67412), False, 'import os\n'), ((72006, 72044), 'pandas.read_csv', 'pd.read_csv', (['fl'], {'delim_whitespace': '(True)'}), '(fl, delim_whitespace=True)\n', (72017, 72044), True, 'import pandas as pd\n'), ((77015, 77040), 'pandas.Series', 'pd.Series', (['new_image_list'], {}), '(new_image_list)\n', (77024, 77040), True, 'import pandas as pd\n'), ((80293, 80308), 'LOSSPhotPypeline.image.FitsInfo', 'FitsInfo', (['templ'], {}), '(templ)\n', (80301, 80308), False, 'from LOSSPhotPypeline.image import Phot, FitsInfo, FileNames\n'), ((87533, 87562), 'LOSSPhotPypeline.utils.get_first_obs_date', 'LPPu.get_first_obs_date', (['self'], {}), '(self)\n', (87556, 87562), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((91661, 91682), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_fig'], {}), '(save_fig)\n', (91672, 91682), True, 'import matplotlib.pyplot as plt\n'), ((91934, 91945), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (91943, 91945), True, 'import matplotlib.pyplot as plt\n'), ((5830, 5918), 'LOSSPhotPypeline.utils.genconf', 'LPPu.genconf', ([], {'targetname': 'self.targetname', 'config_file': "(self.config_file + '.template')"}), "(targetname=self.targetname, config_file=self.config_file +\n '.template')\n", (5842, 5918), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((19338, 19357), 'pandas.Series', 'pd.Series', (['bool_idx'], {}), '(bool_idx)\n', (19347, 19357), True, 'import pandas as pd\n'), ((19889, 19908), 'pandas.Series', 'pd.Series', (['bool_idx'], {}), '(bool_idx)\n', (19898, 19908), True, 'import pandas as pd\n'), ((20609, 20628), 'pandas.Series', 'pd.Series', (['bool_idx'], {}), '(bool_idx)\n', (20618, 20628), True, 'import pandas as pd\n'), ((32349, 32375), 'os.path.exists', 'os.path.exists', (['img.psfdat'], {}), '(img.psfdat)\n', (32363, 32375), False, 'import os\n'), ((39813, 39935), 'pandas.Series', 'pd.Series', (['[2, 4, 4, 3, 3, 3, 3]'], {'index': "['pct_im', 'RA_diff', 'DEC_diff', 'Mag_cal', 'Mag_obs', 'std_obs', 'Mag_diff']"}), "([2, 4, 4, 3, 3, 3, 3], index=['pct_im', 'RA_diff', 'DEC_diff',\n 'Mag_cal', 'Mag_obs', 'std_obs', 'Mag_diff'])\n", (39822, 39935), True, 'import pandas as pd\n'), ((42864, 42893), 'numpy.abs', 'np.abs', (['((x1 - x0) / (y1 - y0))'], {}), '((x1 - x0) / (y1 - y0))\n', (42870, 42893), True, 'import numpy as np\n'), ((44422, 44501), 'itertools.zip_longest', 'itertools.zip_longest', (['cal_succ.iloc[:nshow].index', 'img_succ.iloc[:nshow].index'], {}), '(cal_succ.iloc[:nshow].index, img_succ.iloc[:nshow].index)\n', (44443, 44501), False, 'import itertools\n'), ((58478, 58516), 'pandas.read_csv', 'pd.read_csv', (['fl'], {'delim_whitespace': '(True)'}), '(fl, delim_whitespace=True)\n', (58489, 58516), True, 'import pandas as pd\n'), ((63332, 63358), 'os.path.basename', 'os.path.basename', (['img.cimg'], {}), '(img.cimg)\n', (63348, 63358), False, 'import os\n'), ((63581, 63607), 'os.path.basename', 'os.path.basename', (['img.cimg'], {}), '(img.cimg)\n', (63597, 63607), False, 'import os\n'), ((64904, 64924), 'os.path.basename', 'os.path.basename', (['fl'], {}), '(fl)\n', (64920, 64924), False, 'import os\n'), ((71575, 71609), 'pandas.concat', 'pd.concat', (['concat_list'], {'sort': '(False)'}), '(concat_list, sort=False)\n', (71584, 71609), True, 'import pandas as pd\n'), ((72093, 72127), 'pandas.concat', 'pd.concat', (['concat_list'], {'sort': '(False)'}), '(concat_list, sort=False)\n', (72102, 72127), True, 'import pandas as pd\n'), ((80869, 80886), 'LOSSPhotPypeline.utils.idl', 'LPPu.idl', (['idl_cmd'], {}), '(idl_cmd)\n', (80877, 80886), True, 'import LOSSPhotPypeline.utils as LPPu\n'), ((84701, 84735), 'pandas.concat', 'pd.concat', (['concat_list'], {'sort': '(False)'}), '(concat_list, sort=False)\n', (84710, 84735), True, 'import pandas as pd\n'), ((85086, 85124), 'pandas.read_csv', 'pd.read_csv', (['fl'], {'delim_whitespace': '(True)'}), '(fl, delim_whitespace=True)\n', (85097, 85124), True, 'import pandas as pd\n'), ((85173, 85207), 'pandas.concat', 'pd.concat', (['concat_list'], {'sort': '(False)'}), '(concat_list, sort=False)\n', (85182, 85207), True, 'import pandas as pd\n'), ((92691, 92705), 'numpy.median', 'np.median', (['sub'], {}), '(sub)\n', (92700, 92705), True, 'import numpy as np\n'), ((92707, 92718), 'numpy.std', 'np.std', (['sub'], {}), '(sub)\n', (92713, 92718), True, 'import numpy as np\n'), ((32479, 32508), 'os.path.exists', 'os.path.exists', (['img.psfsubdat'], {}), '(img.psfsubdat)\n', (32493, 32508), False, 'import os\n'), ((49017, 49083), 'pandas.concat', 'pd.concat', (['[df.loc[self.cal_IDs, :] for df in df_list]'], {'sort': '(False)'}), '([df.loc[self.cal_IDs, :] for df in df_list], sort=False)\n', (49026, 49083), True, 'import pandas as pd\n'), ((58053, 58087), 'pandas.concat', 'pd.concat', (['concat_list'], {'sort': '(False)'}), '(concat_list, sort=False)\n', (58062, 58087), True, 'import pandas as pd\n'), ((58573, 58607), 'pandas.concat', 'pd.concat', (['concat_list'], {'sort': '(False)'}), '(concat_list, sort=False)\n', (58582, 58607), True, 'import pandas as pd\n'), ((60540, 60592), 'numpy.sqrt', 'np.sqrt', (['((sn_y - host_y) ** 2 + (sn_x - host_x) ** 2)'], {}), '((sn_y - host_y) ** 2 + (sn_x - host_x) ** 2)\n', (60547, 60592), True, 'import numpy as np\n'), ((60591, 60616), 'numpy.cos', 'np.cos', (['(theta_sn + dtheta)'], {}), '(theta_sn + dtheta)\n', (60597, 60616), True, 'import numpy as np\n'), ((60646, 60698), 'numpy.sqrt', 'np.sqrt', (['((sn_y - host_y) ** 2 + (sn_x - host_x) ** 2)'], {}), '((sn_y - host_y) ** 2 + (sn_x - host_x) ** 2)\n', (60653, 60698), True, 'import numpy as np\n'), ((60697, 60722), 'numpy.sin', 'np.sin', (['(theta_sn + dtheta)'], {}), '(theta_sn + dtheta)\n', (60703, 60722), True, 'import numpy as np\n'), ((60859, 60907), 'numpy.linspace', 'np.linspace', (['(np.pi / 4)', '(2 * np.pi - np.pi / 4)', '(4)'], {}), '(np.pi / 4, 2 * np.pi - np.pi / 4, 4)\n', (60870, 60907), True, 'import numpy as np\n'), ((61078, 61126), 'numpy.linspace', 'np.linspace', (['(np.pi / 4)', '(2 * np.pi - np.pi / 4)', '(4)'], {}), '(np.pi / 4, 2 * np.pi - np.pi / 4, 4)\n', (61089, 61126), True, 'import numpy as np\n'), ((61440, 61468), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(7)'], {}), '(0, 2 * np.pi, 7)\n', (61451, 61468), True, 'import numpy as np\n'), ((62961, 62997), 'numpy.random.normal', 'np.random.normal', (['mag', 'emag', 'n_stars'], {}), '(mag, emag, n_stars)\n', (62977, 62997), True, 'import numpy as np\n'), ((91755, 91806), 'os.path.join', 'os.path.join', (['self.calibration_dir', '"""ref_stars.png"""'], {}), "(self.calibration_dir, 'ref_stars.png')\n", (91767, 91806), False, 'import os\n'), ((93156, 93169), 'numpy.mean', 'np.mean', (['mags'], {}), '(mags)\n', (93163, 93169), True, 'import numpy as np\n'), ((93262, 93275), 'numpy.mean', 'np.mean', (['mags'], {}), '(mags)\n', (93269, 93275), True, 'import numpy as np\n'), ((93277, 93289), 'numpy.std', 'np.std', (['mags'], {}), '(mags)\n', (93283, 93289), True, 'import numpy as np\n'), ((41169, 41307), 'pandas.Series', 'pd.Series', (["[img.mjd, img.filter, img.phot_sub.loc[-1, self.calmethod], img.phot_sub.\n loc[-1, self.calmethod + '_err'], img.color_term]"], {}), "([img.mjd, img.filter, img.phot_sub.loc[-1, self.calmethod], img.\n phot_sub.loc[-1, self.calmethod + '_err'], img.color_term])\n", (41178, 41307), True, 'import pandas as pd\n'), ((41472, 41597), 'pandas.Series', 'pd.Series', (["[img.mjd, img.filter, img.phot.loc[-1, 'Mag_obs'], img.phot.loc[-1, self.\n calmethod + '_err'], img.color_term]"], {}), "([img.mjd, img.filter, img.phot.loc[-1, 'Mag_obs'], img.phot.loc[-\n 1, self.calmethod + '_err'], img.color_term])\n", (41481, 41597), True, 'import pandas as pd\n'), ((93337, 93350), 'numpy.mean', 'np.mean', (['mags'], {}), '(mags)\n', (93344, 93350), True, 'import numpy as np\n'), ((93511, 93524), 'numpy.mean', 'np.mean', (['mags'], {}), '(mags)\n', (93518, 93524), True, 'import numpy as np\n'), ((93359, 93371), 'numpy.std', 'np.std', (['mags'], {}), '(mags)\n', (93365, 93371), True, 'import numpy as np\n'), ((93533, 93545), 'numpy.std', 'np.std', (['mags'], {}), '(mags)\n', (93539, 93545), True, 'import numpy as np\n'), ((61548, 61562), 'numpy.cos', 'np.cos', (['dtheta'], {}), '(dtheta)\n', (61554, 61562), True, 'import numpy as np\n'), ((61811, 61825), 'numpy.sin', 'np.sin', (['dtheta'], {}), '(dtheta)\n', (61817, 61825), True, 'import numpy as np\n'), ((61023, 61046), 'numpy.array', 'np.array', (['[1, 0, -1, 0]'], {}), '([1, 0, -1, 0])\n', (61031, 61046), True, 'import numpy as np\n'), ((61242, 61265), 'numpy.array', 'np.array', (['[0, 1, 0, -1]'], {}), '([0, 1, 0, -1])\n', (61250, 61265), True, 'import numpy as np\n'), ((61568, 61594), 'numpy.cos', 'np.cos', (['(dtheta + np.pi / 6)'], {}), '(dtheta + np.pi / 6)\n', (61574, 61594), True, 'import numpy as np\n'), ((61598, 61612), 'numpy.cos', 'np.cos', (['dtheta'], {}), '(dtheta)\n', (61604, 61612), True, 'import numpy as np\n'), ((61688, 61714), 'numpy.cos', 'np.cos', (['(dtheta + np.pi / 6)'], {}), '(dtheta + np.pi / 6)\n', (61694, 61714), True, 'import numpy as np\n'), ((61718, 61732), 'numpy.cos', 'np.cos', (['dtheta'], {}), '(dtheta)\n', (61724, 61732), True, 'import numpy as np\n'), ((61831, 61857), 'numpy.sin', 'np.sin', (['(dtheta + np.pi / 6)'], {}), '(dtheta + np.pi / 6)\n', (61837, 61857), True, 'import numpy as np\n'), ((61861, 61875), 'numpy.sin', 'np.sin', (['dtheta'], {}), '(dtheta)\n', (61867, 61875), True, 'import numpy as np\n'), ((61951, 61977), 'numpy.sin', 'np.sin', (['(dtheta + np.pi / 6)'], {}), '(dtheta + np.pi / 6)\n', (61957, 61977), True, 'import numpy as np\n'), ((61981, 61995), 'numpy.sin', 'np.sin', (['dtheta'], {}), '(dtheta)\n', (61987, 61995), True, 'import numpy as np\n'), ((61005, 61022), 'numpy.cos', 'np.cos', (['(np.pi / 4)'], {}), '(np.pi / 4)\n', (61011, 61022), True, 'import numpy as np\n'), ((61224, 61241), 'numpy.sin', 'np.sin', (['(np.pi / 4)'], {}), '(np.pi / 4)\n', (61230, 61241), True, 'import numpy as np\n')] |
from argparse import ArgumentParser
import os
import sys
from . import solver, utils
from pprint import pprint
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-f", "--filename", nargs="*", default=[],
help="puzzle filenames. See data/puzzles for examples")
parser.add_argument("-d", "--dirname",
help="Solve all .txt puzzle files in directory")
parser.add_argument("-s", "--strategy",
choices=[strat.name for strat in solver.Strategies],
default=solver.Strategies.MIN_HEAP.name,
help="Strategy for state expansion")
parser.add_argument("-v", "--verbose",
action="store_true",
help="If set will also print stats about search")
args = parser.parse_args()
print(f"Using strategy {args.strategy}")
filenames = list(args.filename)
if args.dirname:
try:
for fname in sorted(os.listdir(args.dirname)):
if fname.endswith(".txt"):
filenames.append(os.path.join(args.dirname, fname))
except FileNotFoundError:
print(f"error: no such directory: {args.dirname}", file=sys.stderr)
sys.exit(1)
for filename in filenames:
if not os.path.exists(filename):
print(f"file {filename} does not exist", file=sys.stderr)
sys.exit(1)
for filename in filenames:
print(filename)
board = utils.read_board_from_file(filename)
print("unsolved board:")
utils.print_unsolved_board(board)
print("solving...")
stats = {}
solver.solve_search_naive(
board,
strategy=solver.Strategies[args.strategy],
stats=stats
)
print("solved board:")
utils.print_solved_board(board)
if args.verbose:
pprint(stats)
| [
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"sys.exit",
"pprint.pprint"
] | [((153, 169), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (167, 169), False, 'from argparse import ArgumentParser\n'), ((1349, 1373), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1363, 1373), False, 'import os\n'), ((1457, 1468), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1465, 1468), False, 'import sys\n'), ((1951, 1964), 'pprint.pprint', 'pprint', (['stats'], {}), '(stats)\n', (1957, 1964), False, 'from pprint import pprint\n'), ((1022, 1046), 'os.listdir', 'os.listdir', (['args.dirname'], {}), '(args.dirname)\n', (1032, 1046), False, 'import os\n'), ((1290, 1301), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1298, 1301), False, 'import sys\n'), ((1129, 1162), 'os.path.join', 'os.path.join', (['args.dirname', 'fname'], {}), '(args.dirname, fname)\n', (1141, 1162), False, 'import os\n')] |
import math
import snap
from src.preprocessing import data_manager
def degree_frac(graph, budget, seed):
"""Selects each vertex fractionally proportional to its degree."""
# Store incentive assignments in a dictionary indexed on nodes id
incentives = dict()
# Compute the fractional budget based on the number of edges and keep track of the amount spent
budget_fraction = budget / graph.GetEdges()
spent = 0
# Set initial incentives to be proportional to the out-degree of each node
for node in graph.Nodes():
node_budget = math.floor(budget_fraction * node.GetOutDeg())
incentives[node.GetId()] = node_budget
spent += node_budget
# Set snap random seed to be able to reproduce results
snap.TRnd(seed)
# Get the remainder unassigned budget and add it randomly
remainder = budget - spent
for i in range(0, remainder):
incentives[graph.GetRndNId()] += 1
return incentives
def discount_frac(graph, thresholds, budget):
"""Selects the vertex having the highest degree at each step and assigns to it a budged equal to the minimum
amount that allows to activate it.
"""
# Initialize the set of unexplored nodes in a dictionary with their current number of unexplored nodes pointed
unexplored = {node.GetId(): node.GetOutDeg() for node in graph.Nodes()}
# Store the current number of explored nodes pointing to each node
neighbors_explored = {node.GetId(): 0 for node in graph.Nodes()}
# Store incentive assignments in a dictionary indexed on nodes id
incentives = {node.GetId(): 0 for node in graph.Nodes()}
while budget > 0 and len(unexplored) > 0:
# Get the identifier of the node with most unexplored neighbors
max_id = max(unexplored, key=unexplored.get)
candidate = graph.GetNI(max_id)
# Compute node index
threshold = thresholds[candidate.GetId()]
index = max(0, threshold - neighbors_explored[max_id])
# Compute node incentive and update the budget
incentive = min(budget, index)
incentives[candidate.GetId()] = incentive
budget -= incentive
# Lower the number of unexplored neighbors for each node that points to the candidate
for node_id in set(candidate.GetInEdges()).intersection(unexplored.keys()):
unexplored[node_id] -= 1
# Increase the number of explored neighbors for each node pointed by the candidate
for node_id in candidate.GetOutEdges():
neighbors_explored[node_id] += 1
# Add the node to the target set
unexplored.pop(candidate.GetId())
return incentives
def tpi(graph, thresholds):
# Make a temporary copy of the graph to make direct changes
temp_graph = data_manager.copy_graph(graph)
temp_thresholds = thresholds.copy()
# Store incentive assignments in a dictionary indexed on nodes id
incentives = {node.GetId(): 0 for node in graph.Nodes()}
# Keep track of nodes not examined yet
unexplored = {}
for node in temp_graph.Nodes():
# Get current threshold and in-degree
threshold = temp_thresholds[node.GetId()]
in_degree = node.GetInDeg()
if in_degree == 0:
index = threshold
else:
index = (threshold * (threshold + 1)) / (in_degree * (in_degree + 1))
unexplored[node.GetId()] = index
# Perform operations until all nodes have been examined
while len(unexplored) > 0:
explored = set()
for node_id in unexplored:
# Get the node iterator from its identifier
node = temp_graph.GetNI(node_id)
# Get current threshold and in-degree to see if condition holds
threshold = temp_thresholds[node.GetId()]
in_degree = node.GetInDeg()
if threshold > in_degree:
# Get the current incentive for the node
incentive = incentives[node.GetId()]
# Update both incentive and threshold for the node
incentives[node.GetId()] = incentive + threshold - in_degree
temp_thresholds[node.GetId()] = in_degree
# Remove the node if it has no more in-edges
if in_degree == 0:
explored.add(node_id)
# Remove all nodes with 0 in-degree
for node_id in explored:
unexplored.pop(node_id)
if len(unexplored) == 0:
# Exit the loop if all nodes have been explored
break
else:
# Choose a vertex to remove from the graph
max_id = max(unexplored, key=unexplored.get)
candidate = graph.GetNI(max_id)
# Mark the edges going out from the candidate to be removed
for destination in candidate.GetOutEdges():
temp_graph.DelEdge(candidate.GetId(), destination)
# Remove the candidate node from the set of those to be examined
unexplored.pop(candidate.GetId())
return incentives
| [
"src.preprocessing.data_manager.copy_graph",
"snap.TRnd"
] | [((758, 773), 'snap.TRnd', 'snap.TRnd', (['seed'], {}), '(seed)\n', (767, 773), False, 'import snap\n'), ((2794, 2824), 'src.preprocessing.data_manager.copy_graph', 'data_manager.copy_graph', (['graph'], {}), '(graph)\n', (2817, 2824), False, 'from src.preprocessing import data_manager\n')] |
import time
import traceback
import cv2 as cv
from picamera.array import PiRGBArray
from picamera import PiCamera
# Min matches to look for homography
#MIN_MATCH_COUNT = 10
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet')
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (224, 224)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(224, 224))
time.sleep(0.1)
def main():
#img1 = cv.imread('banana.jpg')
#img1 = cv.resize(img1, (0,0), fx=0.5, fy=0.5)
#orb = cv.ORB_create(
# nfeatures=5000, edgeThreshold=20, patchSize=20, scaleFactor=1.3, nlevels=20)
#kp1, des1 = orb.detectAndCompute(img1, None)
frame_count = 0
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
#cv.imshow("Frame", image)
key = cv.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
get_match_image(image)
# When everything done, release the capture
cap.release()
cv.destroyAllWindows()
def get_match_image(img):
#img_path = 'banana.jpg'
#img = image.load_img(img_path, target_size=(224, 224))
#x = image.img_to_array(img)
x = np.expand_dims(img, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=3)[0])
if __name__ == "__main__":
main()
| [
"keras.applications.resnet50.decode_predictions",
"keras.applications.resnet50.preprocess_input",
"picamera.PiCamera",
"time.sleep",
"cv2.destroyAllWindows",
"keras.applications.resnet50.ResNet50",
"picamera.array.PiRGBArray",
"numpy.expand_dims",
"cv2.waitKey"
] | [((368, 396), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (376, 396), False, 'from keras.applications.resnet50 import ResNet50\n'), ((478, 488), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (486, 488), False, 'from picamera import PiCamera\n'), ((555, 590), 'picamera.array.PiRGBArray', 'PiRGBArray', (['camera'], {'size': '(224, 224)'}), '(camera, size=(224, 224))\n', (565, 590), False, 'from picamera.array import PiRGBArray\n'), ((591, 606), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (601, 606), False, 'import time\n'), ((1466, 1488), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1486, 1488), True, 'import cv2 as cv\n'), ((1646, 1673), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1660, 1673), True, 'import numpy as np\n'), ((1682, 1701), 'keras.applications.resnet50.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (1698, 1701), False, 'from keras.applications.resnet50 import preprocess_input, decode_predictions\n'), ((1162, 1175), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (1172, 1175), True, 'import cv2 as cv\n'), ((1888, 1920), 'keras.applications.resnet50.decode_predictions', 'decode_predictions', (['preds'], {'top': '(3)'}), '(preds, top=3)\n', (1906, 1920), False, 'from keras.applications.resnet50 import preprocess_input, decode_predictions\n')] |
# --------------------------
# UFSC - CTC - INE - INE5603
# Exercício calculos
# --------------------------
# Classe responsável por determinar se dois números inteiros são amigos.
from view.paineis.painel_abstrato import PainelAbstrato
from model.calculos import amigos
class PainelAmigos(PainelAbstrato):
def __init__(self):
super().__init__('Números Amigos')
def interaja(self):
(n1, n2) = self._leia2int()
if amigos(n1,n2):
msg = 'Os números {} e {} são amigos.'.format(n1,n2)
else:
msg = 'Os números {} e {} não são amigos.'.format(n1,n2)
print(msg)
| [
"model.calculos.amigos"
] | [((448, 462), 'model.calculos.amigos', 'amigos', (['n1', 'n2'], {}), '(n1, n2)\n', (454, 462), False, 'from model.calculos import amigos\n')] |
from pyecharts import options as opts
from pyecharts.charts import Grid, Line, Scatter
from pyecharts.faker import Faker
scatter = (
Scatter()
.add_xaxis(Faker.choose())
.add_yaxis("商家A", Faker.values())
.add_yaxis("商家B", Faker.values())
.set_global_opts(
title_opts=opts.TitleOpts(title="Grid-Scatter"),
legend_opts=opts.LegendOpts(pos_left="20%"),
)
)
line = (
Line()
.add_xaxis(Faker.choose())
.add_yaxis("商家A", Faker.values())
.add_yaxis("商家B", Faker.values())
.set_global_opts(
title_opts=opts.TitleOpts(title="Grid-Line", pos_right="5%"),
legend_opts=opts.LegendOpts(pos_right="20%"),
)
)
grid = (
Grid()
.add(scatter, grid_opts=opts.GridOpts(pos_left="55%"))
.add(line, grid_opts=opts.GridOpts(pos_right="55%"))
.render("grid_horizontal.html")
)
| [
"pyecharts.faker.Faker.values",
"pyecharts.options.TitleOpts",
"pyecharts.options.LegendOpts",
"pyecharts.charts.Scatter",
"pyecharts.charts.Line",
"pyecharts.charts.Grid",
"pyecharts.options.GridOpts",
"pyecharts.faker.Faker.choose"
] | [((296, 332), 'pyecharts.options.TitleOpts', 'opts.TitleOpts', ([], {'title': '"""Grid-Scatter"""'}), "(title='Grid-Scatter')\n", (310, 332), True, 'from pyecharts import options as opts\n'), ((354, 385), 'pyecharts.options.LegendOpts', 'opts.LegendOpts', ([], {'pos_left': '"""20%"""'}), "(pos_left='20%')\n", (369, 385), True, 'from pyecharts import options as opts\n'), ((563, 612), 'pyecharts.options.TitleOpts', 'opts.TitleOpts', ([], {'title': '"""Grid-Line"""', 'pos_right': '"""5%"""'}), "(title='Grid-Line', pos_right='5%')\n", (577, 612), True, 'from pyecharts import options as opts\n'), ((634, 666), 'pyecharts.options.LegendOpts', 'opts.LegendOpts', ([], {'pos_right': '"""20%"""'}), "(pos_right='20%')\n", (649, 666), True, 'from pyecharts import options as opts\n'), ((243, 257), 'pyecharts.faker.Faker.values', 'Faker.values', ([], {}), '()\n', (255, 257), False, 'from pyecharts.faker import Faker\n'), ((510, 524), 'pyecharts.faker.Faker.values', 'Faker.values', ([], {}), '()\n', (522, 524), False, 'from pyecharts.faker import Faker\n'), ((781, 811), 'pyecharts.options.GridOpts', 'opts.GridOpts', ([], {'pos_right': '"""55%"""'}), "(pos_right='55%')\n", (794, 811), True, 'from pyecharts import options as opts\n'), ((205, 219), 'pyecharts.faker.Faker.values', 'Faker.values', ([], {}), '()\n', (217, 219), False, 'from pyecharts.faker import Faker\n'), ((472, 486), 'pyecharts.faker.Faker.values', 'Faker.values', ([], {}), '()\n', (484, 486), False, 'from pyecharts.faker import Faker\n'), ((690, 696), 'pyecharts.charts.Grid', 'Grid', ([], {}), '()\n', (694, 696), False, 'from pyecharts.charts import Grid, Line, Scatter\n'), ((725, 754), 'pyecharts.options.GridOpts', 'opts.GridOpts', ([], {'pos_left': '"""55%"""'}), "(pos_left='55%')\n", (738, 754), True, 'from pyecharts import options as opts\n'), ((163, 177), 'pyecharts.faker.Faker.choose', 'Faker.choose', ([], {}), '()\n', (175, 177), False, 'from pyecharts.faker import Faker\n'), ((430, 444), 'pyecharts.faker.Faker.choose', 'Faker.choose', ([], {}), '()\n', (442, 444), False, 'from pyecharts.faker import Faker\n'), ((138, 147), 'pyecharts.charts.Scatter', 'Scatter', ([], {}), '()\n', (145, 147), False, 'from pyecharts.charts import Grid, Line, Scatter\n'), ((408, 414), 'pyecharts.charts.Line', 'Line', ([], {}), '()\n', (412, 414), False, 'from pyecharts.charts import Grid, Line, Scatter\n')] |
# Generated by Django 3.0.5 on 2020-04-23 22:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('basta', '0010_auto_20200423_2310'),
]
operations = [
migrations.AlterField(
model_name='round',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='round',
name='modified_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='session',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='session',
name='modified_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL),
),
]
| [
"django.db.migrations.swappable_dependency",
"django.db.models.ForeignKey"
] | [((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((470, 595), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='+', to=settings.AUTH_USER_MODEL)\n", (487, 595), False, 'from django.db import migrations, models\n'), ((717, 842), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='+', to=settings.AUTH_USER_MODEL)\n", (734, 842), False, 'from django.db import migrations, models\n'), ((965, 1090), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='+', to=settings.AUTH_USER_MODEL)\n", (982, 1090), False, 'from django.db import migrations, models\n'), ((1214, 1339), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='+', to=settings.AUTH_USER_MODEL)\n", (1231, 1339), False, 'from django.db import migrations, models\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import datetime as dt
fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(15,5))
# Get time axes set up
x_major_lct = mpl.dates.AutoDateLocator(minticks=2,maxticks=10, interval_multiples=True)
x_fmt = mpl.dates.AutoDateFormatter(x_major_lct)
plt.xlabel("Timestamp")
ax[0,0].set_title("Average Degree")
ax[0,1].set_title('Maximum Degree')
ax[0,2].set_title('Average Clustering Coefficient')
ax[0,3].set_title('Mean squared degree')
ax[1,0].set_title('Degree Assortativity')
ax[1,1].set_title('Singleton Nodes')
ax[1,2].set_title('Doubleton Nodes')
ax[1,3].set_title('Number of Triangles')
# File with all the time series of measurements
files = ["tutorial/CitationsTS.dat", "tutorial/CitationsTSArtificial.dat"]
labels = ["Real Data", "Model"]
for i in range(2):
# Read stats from datafile
with open(files[i],'r') as f:
f.readline()
rawdata = f.read().splitlines()
times = [dt.datetime.fromtimestamp(int(l.split()[0])) for l in rawdata]
times = times[:39]
matrix = np.array([[int(row.split()[0])]+[float(num) for num in row.split()[1:]] for row in rawdata])
matrix = matrix[:39]
df = pd.DataFrame(matrix)
f.close()
df.columns = ['timestamp', 'nodes', 'links', 'avgdeg', 'maxdeg', 'singletons', 'doubletons', 'meandegsq',
'assortativity', 'clustercoeff', 'triangles']
if i == 1:
start, end = df.iloc[0]['timestamp'], df.iloc[-1]['timestamp']
ax[0,0].plot(times, df['avgdeg'])
ax[0,1].plot(times, df['maxdeg'], label = labels[i])
ax[0,2].plot(times, df['clustercoeff'], label = labels[i])
ax[0,3].plot(times, df['meandegsq'], label = labels[i])
ax[1,0].plot(times, df['assortativity'], label = labels[i])
ax[1,1].plot(times, df['singletons'], label = labels[i])
ax[1,2].plot(times, df['doubletons'], label = labels[i])
ax[1,3].plot(times, df['triangles'], label = labels[i])
for row in range(2):
for col in range(4):
ax[row,col].xaxis.set_major_locator(x_major_lct)
ax[row,col].xaxis.set_major_formatter(x_fmt)
for label in ax[row,col].get_xmajorticklabels():
label.set_rotation(30)
for label in ax[row,col].get_ymajorticklabels():
label.set_rotation(30)
plt.legend(loc = 'upper left')
plt.tight_layout()
fig.savefig("tutorial/plots/CitationsVsArtificial.png")
plt.show() | [
"matplotlib.dates.AutoDateFormatter",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.dates.AutoDateLocator",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((129, 176), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(4)', 'figsize': '(15, 5)'}), '(nrows=2, ncols=4, figsize=(15, 5))\n', (141, 176), True, 'import matplotlib.pyplot as plt\n'), ((214, 289), 'matplotlib.dates.AutoDateLocator', 'mpl.dates.AutoDateLocator', ([], {'minticks': '(2)', 'maxticks': '(10)', 'interval_multiples': '(True)'}), '(minticks=2, maxticks=10, interval_multiples=True)\n', (239, 289), True, 'import matplotlib as mpl\n'), ((297, 337), 'matplotlib.dates.AutoDateFormatter', 'mpl.dates.AutoDateFormatter', (['x_major_lct'], {}), '(x_major_lct)\n', (324, 337), True, 'import matplotlib as mpl\n'), ((339, 362), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Timestamp"""'], {}), "('Timestamp')\n", (349, 362), True, 'import matplotlib.pyplot as plt\n'), ((2354, 2382), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2364, 2382), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2403), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2401, 2403), True, 'import matplotlib.pyplot as plt\n'), ((2460, 2470), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2468, 2470), True, 'import matplotlib.pyplot as plt\n'), ((1246, 1266), 'pandas.DataFrame', 'pd.DataFrame', (['matrix'], {}), '(matrix)\n', (1258, 1266), True, 'import pandas as pd\n')] |
# -*- coding: iso-8859-1 -*-
from __future__ import print_function, division
import sys
if( sys.version_info[0] == 2 ):
range = xrange
import os
sys.path.insert( 0, os.getenv( "QM3_OPENMM" ) )
try:
import simtk.openmm
import simtk.openmm.app
import simtk.unit
class py_openmm( object ):
def __simulation( self ):
self.sim = simtk.openmm.app.Simulation( self.top, self.sys,
simtk.openmm.CustomIntegrator( 0.001 ),
simtk.openmm.Platform.getPlatformByName( self.knd ) )
def __init__( self, omm_system, topology, qm_excl = [], platform = "CPU" ):
self.sys = omm_system
self.top = topology
self.knd = platform
self.nbn = None
self.sim = None
for i in range( self.sys.getNumForces() ):
if( type( self.sys.getForce( i ) ) == simtk.openmm.NonbondedForce ):
self.nbn = self.sys.getForce( i )
try:
n = len( qm_excl )
for i in range( 0, n - 1 ):
for j in range( i + 1, n ):
self.nbn.addException( qm_excl[i], qm_excl[j], 0.0, 0.0, 0.0 )
except:
pass
self.__simulation()
def update_chrg( self, mol ):
for i in range( mol.natm ):
t = self.nbn.getParticleParameters( i )
self.nbn.setParticleParameters( i, mol.chrg[i], t[1], t[2] )
self.__simulation()
self.update_coor( mol )
def update_coor( self, mol ):
tmp = []
for i in range( mol.natm ):
i3 = i * 3
tmp.append( simtk.openmm.Vec3( mol.coor[i3], mol.coor[i3+1], mol.coor[i3+2] ) * simtk.unit.angstrom )
self.sim.context.setPositions( tmp )
def get_func( self, mol ):
self.update_coor( mol )
stt = self.sim.context.getState( getEnergy = True, getForces = False )
mol.func += stt.getPotentialEnergy().value_in_unit( simtk.unit.kilojoule/simtk.unit.mole )
def get_grad( self, mol ):
self.update_coor( mol )
stt = self.sim.context.getState( getEnergy = True, getForces = True )
mol.func += stt.getPotentialEnergy().value_in_unit( simtk.unit.kilojoule/simtk.unit.mole )
frc = stt.getForces()
for i in range( mol.natm ):
i3 = i * 3
for j in [0, 1, 2]:
mol.grad[i3+j] -= frc[i][j].value_in_unit( simtk.unit.kilojoule/(simtk.unit.angstrom*simtk.unit.mole) )
except:
pass
| [
"os.getenv"
] | [((172, 195), 'os.getenv', 'os.getenv', (['"""QM3_OPENMM"""'], {}), "('QM3_OPENMM')\n", (181, 195), False, 'import os\n')] |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import accuracy_score
from sklearn.metrics import pairwise_distances
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
import scipy.spatial as sp
import scipy.cluster.hierarchy as hc
def toint(x):
d=dict(zip("ACGT", range(4)))
return d[x]
def get_features_and_labels(filename):
df = pd.read_csv(filename, sep='\t')
y = df.y
A = np.array(df.X.map(list).values.tolist())
toint2 = np.vectorize(toint)
A = toint2(A)
return A, y
def plot(distances, method='average', affinity='euclidean'):
mylinkage = hc.linkage(sp.distance.squareform(distances), method=method)
g=sns.clustermap(distances, row_linkage=mylinkage, col_linkage=mylinkage )
g.fig.suptitle(f"Hierarchical clustering using {method} linkage and {affinity} affinity")
plt.show()
def cluster_euclidean(filename):
A, y = get_features_and_labels(filename)
model = AgglomerativeClustering(2, linkage="average", affinity='euclidean')
yfitted = model.fit_predict(A)
acc = accuracy_score(y, yfitted)
return acc
def cluster_hamming(filename):
A, y = get_features_and_labels(filename)
distances = pairwise_distances(A, metric="hamming")
model = AgglomerativeClustering(2, linkage="average", affinity='precomputed')
yfitted = 1 - model.fit_predict(distances)
acc = accuracy_score(y, yfitted)
# plot commented out from model solution, due to tests returning MemoryError sometimes
# plot(distances, "average", "hamming")
return acc
def main():
print("Accuracy score with Euclidean affinity is", cluster_euclidean("src/data.seq"))
print("Accuracy score with Hamming affinity is", cluster_hamming("src/data.seq"))
if __name__ == "__main__":
main()
| [
"seaborn.set",
"sklearn.cluster.AgglomerativeClustering",
"scipy.spatial.distance.squareform",
"pandas.read_csv",
"seaborn.clustermap",
"sklearn.metrics.pairwise_distances",
"numpy.vectorize",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.show"
] | [((269, 294), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)'}), '(color_codes=True)\n', (276, 294), True, 'import seaborn as sns\n'), ((475, 506), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '"""\t"""'}), "(filename, sep='\\t')\n", (486, 506), True, 'import pandas as pd\n'), ((582, 601), 'numpy.vectorize', 'np.vectorize', (['toint'], {}), '(toint)\n', (594, 601), True, 'import numpy as np\n'), ((782, 853), 'seaborn.clustermap', 'sns.clustermap', (['distances'], {'row_linkage': 'mylinkage', 'col_linkage': 'mylinkage'}), '(distances, row_linkage=mylinkage, col_linkage=mylinkage)\n', (796, 853), True, 'import seaborn as sns\n'), ((953, 963), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (961, 963), True, 'from matplotlib import pyplot as plt\n'), ((1056, 1123), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', (['(2)'], {'linkage': '"""average"""', 'affinity': '"""euclidean"""'}), "(2, linkage='average', affinity='euclidean')\n", (1079, 1123), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((1169, 1195), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'yfitted'], {}), '(y, yfitted)\n', (1183, 1195), False, 'from sklearn.metrics import accuracy_score\n'), ((1305, 1344), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['A'], {'metric': '"""hamming"""'}), "(A, metric='hamming')\n", (1323, 1344), False, 'from sklearn.metrics import pairwise_distances\n'), ((1359, 1428), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', (['(2)'], {'linkage': '"""average"""', 'affinity': '"""precomputed"""'}), "(2, linkage='average', affinity='precomputed')\n", (1382, 1428), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((1486, 1512), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'yfitted'], {}), '(y, yfitted)\n', (1500, 1512), False, 'from sklearn.metrics import accuracy_score\n'), ((726, 759), 'scipy.spatial.distance.squareform', 'sp.distance.squareform', (['distances'], {}), '(distances)\n', (748, 759), True, 'import scipy.spatial as sp\n')] |
# coding:utf8
# run: python fout.py './file_pattern*' cmds
# --------------------------------------------------------------------------------------- #
# limit:cmd,offset:int,size:int|a(all)
# --------------------------------------------------------------------------------------- #
# grep:cmd,extract_ex:bool,regexp:string[,regexp]...
# --------------------------------------------------------------------------------------- #
# sort:cmd[,a|n:cmd[,sep:string[,col:int]]]
# --------------------------------------------------------------------------------------- #
# unique:cmd[,show_count:bool[,sep:string[,col:int]]]
# --------------------------------------------------------------------------------------- #
# cut:cmd,sep:string,joiner:string,field:int[,field]...
# --------------------------------------------------------------------------------------- #
# cmd_sep[*]:cmd
# --------------------------------------------------------------------------------------- #
# reverse:cmd
# --------------------------------------------------------------------------------------- #
# count:cmd
# --------------------------------------------------------------------------------------- #
# copy:cmd
# --------------------------------------------------------------------------------------- #
import sys
import glob
import re
import pyperclip
if len(sys.argv) < 2:
print('missing file pattern')
exit()
file_pattern = sys.argv[1]
cmd_sep = ','
def parse_by_limit(args: list, lines: list):
"""
支持倒着取行数
"""
offset = int(args[0])
size = args[1]
if size == 'a' or size == 'all':
if offset < 0:
return lines[:len(lines)+offset+1]
return lines[offset:]
if offset < 0:
end = len(lines)+offset+1
return lines[end-int(size):end]
return lines[offset:offset+int(size)]
def parse_by_regexp(args: list, lines: list):
extract_ex = 't' == args[0] or 'true' == args[0]
regexp = '.*'.join(args[1:])
match_lines = []
can_extract = False
for line in lines:
if can_extract:
if re.match(r'\d{4}(-\d{2}){2}.*', line):
can_extract = False
else:
# 提取异常数据
match_lines.append(line)
if re.search(regexp, line, re.I):
match_lines.append(line)
can_extract = extract_ex
return match_lines
def parse_by_sort(args: list, lines: list):
args = parse_unique_and_sort_args(args)
sort_by_number = 'n' == args[0]
col = int(args[2])
def get_sort_token(line: str):
if len(args[1]) > 0:
fields = str.split(line, args[1])
line = ''
if len(fields) > col:
line = fields[col]
if sort_by_number and len(line) < 1:
line = '0'
return int(line) if sort_by_number else line
lines.sort(key=get_sort_token)
return lines
def parse_by_unique(args: list, lines: list):
args = parse_unique_and_sort_args(args)
show_count = 't' == args[0] or 'true' == args[0]
col = int(args[2])
unique_lines = []
line_col_map = {}
unique_map = {}
# 去重
for line in lines:
fileds = [line] if len(args[1]) < 1 else str.split(line, args[1])
key = fileds[col]
count = unique_map.get(key, 0)
if count == 0:
unique_lines.insert
unique_lines.append(line)
line_col_map[line] = key
unique_map[key] = count+1
if not show_count:
return unique_lines
for i in range(0, len(unique_lines)):
unique_lines = [
' '.join([str(unique_map[line_col_map[line]]), line]) for line in unique_lines]
return unique_lines
def parse_unique_and_sort_args(args: list):
parsed_args = ['a', '', '0']
if len(args) > 2:
parsed_args[2] = args[2]
if len(args) > 1:
parsed_args[1] = args[1]
if len(args) > 0:
parsed_args[0] = args[0]
return parsed_args
def parse_by_cut(args: list, lines: list):
"""
重组字符串
"""
if len(args) < 1:
return lines
sep = args[0]
cols = [int(arg) for arg in args[2:]]
joiner = args[1] if len(args) > 1 else ''
# 拼接指定列
joinned_lines = []
for line in lines:
fields = str.split(line, sep)
if len(cols) > 0:
fields = [fields[col] for col in cols]
joinned_lines.append(joiner.join(fields))
return joinned_lines
def parse_by_cmd(cmd: str, lines: list):
"""
按命令解析字符
"""
args = str.split(cmd, cmd_sep)[1:]
if str.startswith(cmd, 'limit'):
return parse_by_limit(args, lines)
if str.startswith(cmd, 'grep'):
return parse_by_regexp(args, lines)
if str.startswith(cmd, 'cut'):
return parse_by_cut(args, lines)
if str.startswith(cmd, 'unique'):
return parse_by_unique(args, lines)
if str.startswith(cmd, 'sort'):
return parse_by_sort(args, lines)
if str.startswith(cmd, 'count'):
return [str(len(lines))]
if str.startswith(cmd, 'reverse'):
lines.reverse()
if str.startswith(cmd, 'copy'):
pyperclip.copy('\n'.join(lines))
return []
return lines
# 读取文件内容
lines = []
for filename in glob.glob(file_pattern):
with open(filename, 'r', encoding='utf8') as fr:
lines.extend(str.strip(line, '\n')
for line in fr.readlines() if len(line) > 0)
# 依次解析命令
# print(sys.argv)
# lines = [line for line in lines if len(line) > 0]
for arg in sys.argv[2:]:
if str.startswith(arg, 'cmd_sep'):
cmd_sep = ''.join(arg[7:])
continue
lines = parse_by_cmd(str.lower(arg), lines)
print('\n'.join(lines))
| [
"re.match",
"glob.glob",
"re.search"
] | [((5230, 5253), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (5239, 5253), False, 'import glob\n'), ((2249, 2278), 're.search', 're.search', (['regexp', 'line', 're.I'], {}), '(regexp, line, re.I)\n', (2258, 2278), False, 'import re\n'), ((2079, 2117), 're.match', 're.match', (['"""\\\\d{4}(-\\\\d{2}){2}.*"""', 'line'], {}), "('\\\\d{4}(-\\\\d{2}){2}.*', line)\n", (2087, 2117), False, 'import re\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
from amaasutils.random_utils import random_string
import datetime
import random
from amaascore.monitor.item import Item
def generate_item(client_id=None, asset_manager_id=None, item_id=None, item_class=None, item_type=None,
item_level=None, item_source=None, item_date=None, message=None):
item = Item(
client_id=client_id or random.randint(1, 2**31-1),
asset_manager_id=asset_manager_id or random.randint(1, 1000),
item_id=item_id or random_string(10),
item_class=item_class or random.choice(['Exception', 'Notification']),
item_type=item_type or random_string(15),
item_level=item_level or random.choice(['Info', 'Warning', 'Error', 'Critical']),
item_source=item_source or random.choice(['Transactions', 'Assets', random_string(20)]),
item_date=item_date or datetime.date.today(),
message=message or random_string(200)
)
return item
def generate_items(asset_manager_ids=[], number=5):
items = []
for i in range(number):
item = generate_item(asset_manager_id=random.choice(asset_manager_ids))
items.append(item)
return items
| [
"amaasutils.random_utils.random_string",
"datetime.date.today",
"random.choice",
"random.randint"
] | [((444, 474), 'random.randint', 'random.randint', (['(1)', '(2 ** 31 - 1)'], {}), '(1, 2 ** 31 - 1)\n', (458, 474), False, 'import random\n'), ((517, 540), 'random.randint', 'random.randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (531, 540), False, 'import random\n'), ((569, 586), 'amaasutils.random_utils.random_string', 'random_string', (['(10)'], {}), '(10)\n', (582, 586), False, 'from amaasutils.random_utils import random_string\n'), ((621, 665), 'random.choice', 'random.choice', (["['Exception', 'Notification']"], {}), "(['Exception', 'Notification'])\n", (634, 665), False, 'import random\n'), ((698, 715), 'amaasutils.random_utils.random_string', 'random_string', (['(15)'], {}), '(15)\n', (711, 715), False, 'from amaasutils.random_utils import random_string\n'), ((750, 805), 'random.choice', 'random.choice', (["['Info', 'Warning', 'Error', 'Critical']"], {}), "(['Info', 'Warning', 'Error', 'Critical'])\n", (763, 805), False, 'import random\n'), ((935, 956), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (954, 956), False, 'import datetime\n'), ((985, 1003), 'amaasutils.random_utils.random_string', 'random_string', (['(200)'], {}), '(200)\n', (998, 1003), False, 'from amaasutils.random_utils import random_string\n'), ((1169, 1201), 'random.choice', 'random.choice', (['asset_manager_ids'], {}), '(asset_manager_ids)\n', (1182, 1201), False, 'import random\n'), ((883, 900), 'amaasutils.random_utils.random_string', 'random_string', (['(20)'], {}), '(20)\n', (896, 900), False, 'from amaasutils.random_utils import random_string\n')] |
import sys
x, y = map(int, sys.stdin.readline().split())
def main(x):
cnt = 0
while x <= y:
cnt += 1
x *= 2
print(cnt)
if __name__ == "__main__":
main(x)
| [
"sys.stdin.readline"
] | [((30, 50), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (48, 50), False, 'import sys\n')] |
# Tests for the natural order sorting package.
#
# Author: <NAME> <<EMAIL>>
# Last Change: November 2, 2015
# URL: https://github.com/xolox/python-naturalsort
"""Tests for the natural order sorting package."""
# Standard library modules.
import random
import unittest
# The module we're testing.
from natsort import NaturalOrderKey, natsort
class NaturalSortTestCase(unittest.TestCase):
"""Container for the `naturalsort` tests."""
def test_plain_old_sorting(self):
"""Test plain old sorting (what we don't want :-)."""
assert sorted(['1', '5', '10', '50']) == ['1', '10', '5', '50']
def test_version_sorting(self):
"""Test version sorting (what we're after)."""
assert natsort(['1', '5', '10', '50']) == ['1', '5', '10', '50']
def test_reversed_version_sorting(self):
"""Test reversed version sorting."""
assert natsort(['1', '5', '10', '50'], reverse=True) == ['50', '10', '5', '1']
def test_zero_padding(self):
"""Test that zero padding semantics are respected."""
assert natsort(['1.5.1', '1.5']) == ['1.5', '1.5.1']
def test_dotted_sorting(self):
"""
Test a previously fixed bug to prevent regressions.
I've purposefully shuffled the order on the left side to avoid false
positives caused by stable sorting.
"""
assert natsort(['1.5', '1.0']) == ['1.0', '1.5']
def test_python_3_compatibility(self):
"""
Test the Python 3 incompatibility reported in `issue 2`_.
.. _issue 2: https://github.com/xolox/python-naturalsort/issues/2
"""
assert natsort(['1', 'a']) == ['1', 'a']
def test_more_complex_versions(self):
"""
Test the implementation of the ``NaturalOrderKey`` class.
This test uses some more complex version strings that were sorted
incorrectly by the initial (way too naive) implementation in 1.4.
"""
sorted_versions = ['1532-44349', '1534-44658', '1536-44582', '1536-44935', '1538-44874', '1538-44920']
random_versions = ['1534-44658', '1536-44935', '1532-44349', '1538-44920', '1536-44582', '1538-44874']
assert sorted_versions == natsort(random_versions)
def test_input_order_irrelevant(self):
"""
Test that order of input does not adversely affect order of output.
Works by shuffling the input and checking that all 10.000 iterations
result in the same output.
"""
sorted_strings = ['1532-44349', '1534-44658', '1536-44582', '1536-44935', '1538-44874', '1538-44920']
mutable_copy = list(sorted_strings)
for i in range(10000):
random.shuffle(mutable_copy)
assert natsort(mutable_copy) == sorted_strings
def test_eq(self):
"""Test :func:`.NaturalOrderKey.__eq__()`."""
# Equality comparison between objects of same type.
assert NaturalOrderKey('1.0') == NaturalOrderKey('1.0')
# Equality comparison between objects of different types.
assert NaturalOrderKey('1.0').__eq__(object) is NotImplemented
def test_ne(self):
"""Test :func:`.NaturalOrderKey.__ne__()`."""
# Non-equality comparison between objects of same type.
assert NaturalOrderKey('1.0') != NaturalOrderKey('1.1')
# Non-equality comparison between objects of different types.
assert NaturalOrderKey('1.0').__ne__(object()) is NotImplemented
def test_lt(self):
"""Test :func:`.NaturalOrderKey.__lt__()`."""
# Less than comparison between objects of same type.
assert NaturalOrderKey('1') < NaturalOrderKey('1.1')
# Less than comparison between objects of different types.
assert NaturalOrderKey('1').__lt__(object()) is NotImplemented
def test_le(self):
"""Test :func:`.NaturalOrderKey.__le__()`."""
# Less than or equal comparison between objects of same type.
assert NaturalOrderKey('1') <= NaturalOrderKey('1.1')
assert NaturalOrderKey('1') <= NaturalOrderKey('1')
assert not (NaturalOrderKey('1.1') <= NaturalOrderKey('1'))
# Less than or equal comparison between objects of different types.
assert NaturalOrderKey('1').__le__(object()) is NotImplemented
def test_gt(self):
"""Test :func:`.NaturalOrderKey.__gt__()`."""
# Greater than comparison between objects of same type.
assert NaturalOrderKey('1.1') > NaturalOrderKey('1')
# Greater than comparison between objects of different types.
assert NaturalOrderKey('1').__gt__(object()) is NotImplemented
def test_ge(self):
"""Test :func:`.NaturalOrderKey.__ge__()`."""
# Greater than or equal comparison between objects of same type.
assert NaturalOrderKey('1.1') >= NaturalOrderKey('1')
assert NaturalOrderKey('1') >= NaturalOrderKey('1')
assert not (NaturalOrderKey('1') >= NaturalOrderKey('1.1'))
# Greater than or equal comparison between objects of different types.
assert NaturalOrderKey('1').__ge__(object()) is NotImplemented
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"natsort.natsort",
"random.shuffle",
"natsort.NaturalOrderKey"
] | [((5156, 5171), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5169, 5171), False, 'import unittest\n'), ((722, 753), 'natsort.natsort', 'natsort', (["['1', '5', '10', '50']"], {}), "(['1', '5', '10', '50'])\n", (729, 753), False, 'from natsort import NaturalOrderKey, natsort\n'), ((886, 931), 'natsort.natsort', 'natsort', (["['1', '5', '10', '50']"], {'reverse': '(True)'}), "(['1', '5', '10', '50'], reverse=True)\n", (893, 931), False, 'from natsort import NaturalOrderKey, natsort\n'), ((1069, 1094), 'natsort.natsort', 'natsort', (["['1.5.1', '1.5']"], {}), "(['1.5.1', '1.5'])\n", (1076, 1094), False, 'from natsort import NaturalOrderKey, natsort\n'), ((1372, 1395), 'natsort.natsort', 'natsort', (["['1.5', '1.0']"], {}), "(['1.5', '1.0'])\n", (1379, 1395), False, 'from natsort import NaturalOrderKey, natsort\n'), ((1638, 1657), 'natsort.natsort', 'natsort', (["['1', 'a']"], {}), "(['1', 'a'])\n", (1645, 1657), False, 'from natsort import NaturalOrderKey, natsort\n'), ((2210, 2234), 'natsort.natsort', 'natsort', (['random_versions'], {}), '(random_versions)\n', (2217, 2234), False, 'from natsort import NaturalOrderKey, natsort\n'), ((2689, 2717), 'random.shuffle', 'random.shuffle', (['mutable_copy'], {}), '(mutable_copy)\n', (2703, 2717), False, 'import random\n'), ((2930, 2952), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.0"""'], {}), "('1.0')\n", (2945, 2952), False, 'from natsort import NaturalOrderKey, natsort\n'), ((2956, 2978), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.0"""'], {}), "('1.0')\n", (2971, 2978), False, 'from natsort import NaturalOrderKey, natsort\n'), ((3273, 3295), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.0"""'], {}), "('1.0')\n", (3288, 3295), False, 'from natsort import NaturalOrderKey, natsort\n'), ((3299, 3321), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.1"""'], {}), "('1.1')\n", (3314, 3321), False, 'from natsort import NaturalOrderKey, natsort\n'), ((3619, 3639), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (3634, 3639), False, 'from natsort import NaturalOrderKey, natsort\n'), ((3642, 3664), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.1"""'], {}), "('1.1')\n", (3657, 3664), False, 'from natsort import NaturalOrderKey, natsort\n'), ((3966, 3986), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (3981, 3986), False, 'from natsort import NaturalOrderKey, natsort\n'), ((3990, 4012), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.1"""'], {}), "('1.1')\n", (4005, 4012), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4028, 4048), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (4043, 4048), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4052, 4072), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (4067, 4072), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4445, 4467), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.1"""'], {}), "('1.1')\n", (4460, 4467), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4470, 4490), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (4485, 4490), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4798, 4820), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.1"""'], {}), "('1.1')\n", (4813, 4820), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4824, 4844), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (4839, 4844), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4860, 4880), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (4875, 4880), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4884, 4904), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (4899, 4904), False, 'from natsort import NaturalOrderKey, natsort\n'), ((2737, 2758), 'natsort.natsort', 'natsort', (['mutable_copy'], {}), '(mutable_copy)\n', (2744, 2758), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4093, 4115), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.1"""'], {}), "('1.1')\n", (4108, 4115), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4119, 4139), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (4134, 4139), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4925, 4945), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (4940, 4945), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4949, 4971), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.1"""'], {}), "('1.1')\n", (4964, 4971), False, 'from natsort import NaturalOrderKey, natsort\n'), ((3060, 3082), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.0"""'], {}), "('1.0')\n", (3075, 3082), False, 'from natsort import NaturalOrderKey, natsort\n'), ((3407, 3429), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1.0"""'], {}), "('1.0')\n", (3422, 3429), False, 'from natsort import NaturalOrderKey, natsort\n'), ((3747, 3767), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (3762, 3767), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4232, 4252), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (4247, 4252), False, 'from natsort import NaturalOrderKey, natsort\n'), ((4576, 4596), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (4591, 4596), False, 'from natsort import NaturalOrderKey, natsort\n'), ((5067, 5087), 'natsort.NaturalOrderKey', 'NaturalOrderKey', (['"""1"""'], {}), "('1')\n", (5082, 5087), False, 'from natsort import NaturalOrderKey, natsort\n')] |
# --------------- AND Perceptron ---------------
import pandas as pd
# TODO: Set weight1, weight2, and bias
weight1 = 0.2
weight2 = 0.8
bias = -1.0
# DON'T CHANGE ANYTHING BELOW
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [False, False, False, True]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False))
# --------------- NOT Perceptron --------------------
import pandas as pd
# TODO: Set weight1, weight2, and bias
weight1 = 0.0
weight2 = -1.0
bias = -0.0
# DON'T CHANGE ANYTHING BELOW
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [True, False, True, False]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False))
# --------------- Perceptron Step --------------------
import numpy as np
# Setting the random seed, feel free to change it and see different solutions.
np.random.seed(42)
def stepFunction(t):
if t >= 0:
return 1
return 0
def prediction(X, W, b):
return stepFunction((np.matmul(X,W)+b)[0])
# TODO: Fill in the code below to implement the perceptron trick.
# The function should receive as inputs the data X, the labels y,
# the weights W (as an array), and the bias b,
# update the weights and bias W, b, according to the perceptron algorithm,
# and return W and b.
def perceptronStep(X, y, W, b, learn_rate = 0.01):
for i in range(len(X)):
y_hat = prediction(X[i],W,b)
if y[i]-y_hat == 1:
W[0] += X[i][0]*learn_rate
W[1] += X[i][1]*learn_rate
b += learn_rate
elif y[i]-y_hat == -1:
W[0] -= X[i][0]*learn_rate
W[1] -= X[i][1]*learn_rate
b -= learn_rate
return W, b
# This function runs the perceptron algorithm repeatedly on the dataset,
# and returns a few of the boundary lines obtained in the iterations,
# for plotting purposes.
# Feel free to play with the learning rate and the num_epochs,
# and see your results plotted below.
def trainPerceptronAlgorithm(X, y, learn_rate = 0.01, num_epochs = 25):
x_min, x_max = min(X.T[0]), max(X.T[0])
y_min, y_max = min(X.T[1]), max(X.T[1])
W = np.array(np.random.rand(2,1))
b = np.random.rand(1)[0] + x_max
# These are the solution lines that get plotted below.
boundary_lines = []
for i in range(num_epochs):
# In each epoch, we apply the perceptron step.
W, b = perceptronStep(X, y, W, b, learn_rate)
boundary_lines.append((-W[0]/W[1], -b/W[1]))
return boundary_lines
# --------------- Softmax --------------------
import numpy as np
def softmax(L):
expL = np.exp(L)
sumExpL = sum(expL)
result = []
for i in expL:
result.append(i*1.0/sumExpL)
return result
# Note: The function np.divide can also be used here, as follows:
# def softmax(L):
# expL = np.exp(L)
# return np.divide (expL, expL.sum())
# --------------- Cross Entropy --------------------
import numpy as np
def cross_entropy(Y, P):
Y = np.float_(Y)
P = np.float_(P)
return -np.sum(Y * np.log(P) + (1 - Y) * np.log(1 - P))
# -------------------- Gradient Descent --------------------
# Sigmoid Activation Function ( Integral of log(e^x + 1) + C )
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Output (prediction) formula
def output_formula(features, weights, bias):
linear_combination=np.dot(features, weights)
return sigmoid(linear_combination + bias)
# Error Formula (Binary Cross-Entropy / Log Loss)
# y = probability
def error_formula(y, output):
porbability_of_1 = - y * np.log(output)
probability_of_0 = - (1 - y) * np.log(1-output)
binary_cross_entropy = porbability_of_1 + probability_of_0
return binary_cross_entropy
# Gradient descent step
def update_weights(x, y, weights, bias, learnrate):
output = output_formula(x, weights, bias)
d_error = -(y - output)
weights -= learnrate * d_error * x
bias -= learnrate * d_error
return weights, bias
# -------------------- Gradient Descent 2 --------------------
import numpy as np
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1/(1+np.exp(-x))
def sigmoid_prime(x):
"""
# Derivative of the sigmoid function
"""
return sigmoid(x) * (1 - sigmoid(x))
learnrate = 0.5
x = np.array([1, 2, 3, 4])
y = np.array(0.5)
# Initial weights
w = np.array([0.5, -0.5, 0.3, 0.1])
### Calculate one gradient descent step for each weight
### Note: Some steps have been consolidated, so there are
### fewer variable names than in the above sample code
# TODO: Calculate the node's linear combination of inputs and weights
h = np.dot(x, w)
# TODO: Calculate output of neural network
nn_output = sigmoid(h)
# TODO: Calculate error of neural network
error = y - nn_output
# TODO: Calculate the error term
# Remember, this requires the output gradient, which we haven't
# specifically added a variable for.
error_term = error * sigmoid_prime(h)
# Note: The sigmoid_prime function calculates sigmoid(h) twice,
# but you've already calculated it once. You can make this
# code more efficient by calculating the derivative directly
# rather than calling sigmoid_prime, like this:
# error_term = error * nn_output * (1 - nn_output)
# TODO: Calculate change in weights
del_w = learnrate * error_term * x
print('Neural Network output:')
print(nn_output)
print('Amount of Error:')
print(error)
print('Change in Weights:')
print(del_w)
# --------------- Gradient Descent 3 --------------------
import numpy as np
from data_prep import features, targets, features_test, targets_test
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
# TODO: We haven't provided the sigmoid_prime function like we did in
# the previous lesson to encourage you to come up with a more
# efficient solution. If you need a hint, check out the comments
# in solution.py from the previous lecture.
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = sigmoid(np.dot(x, weights))
# The error, the target minus the network output
error = y - output
# The error term
# Notice we calulate f'(h) here instead of defining a separate
# sigmoid_prime function. This just makes it faster because we
# can re-use the result of the sigmoid function stored in
# the output variable
error_term = error * output * (1 - output)
# The gradient descent step, the error times the gradient times the inputs
del_w += error_term * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
# Calculate accuracy on test data
tes_out = sigmoid(np.dot(features_test, weights))
predictions = tes_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
# --------------- Multiplayer Perceptrons (Hidden Layers) --------------------
import numpy as np
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1/(1+np.exp(-x))
# Network size
N_input = 4
N_hidden = 3
N_output = 2
np.random.seed(42)
# Make some fake data
X = np.random.randn(4)
weights_input_to_hidden = np.random.normal(0, scale=0.1, size=(N_input, N_hidden))
weights_hidden_to_output = np.random.normal(0, scale=0.1, size=(N_hidden, N_output))
# TODO: Make a forward pass through the network
hidden_layer_in = np.dot(X, weights_input_to_hidden)
hidden_layer_out = sigmoid(hidden_layer_in)
print('Hidden-layer Output:')
print(hidden_layer_out)
output_layer_in = np.dot(hidden_layer_out, weights_hidden_to_output)
output_layer_out = sigmoid(output_layer_in)
print('Output-layer Output:')
print(output_layer_out)
# -------------------- Backpropagation --------------------
import numpy as np
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
x = np.array([0.5, 0.1, -0.2])
target = 0.6
learnrate = 0.5
weights_input_hidden = np.array([[0.5, -0.6],
[0.1, -0.2],
[0.1, 0.7]])
weights_hidden_output = np.array([0.1, -0.3])
## Forward pass
hidden_layer_input = np.dot(x, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)
output_layer_in = np.dot(hidden_layer_output, weights_hidden_output)
output = sigmoid(output_layer_in)
## Backwards pass
## TODO: Calculate output error
error = target - output
# TODO: Calculate error term for output layer
output_error_term = error * output * (1 - output)
# TODO: Calculate error term for hidden layer
hidden_error_term = np.dot(output_error_term, weights_hidden_output) * \
hidden_layer_output * (1 - hidden_layer_output)
# TODO: Calculate change in weights for hidden layer to output layer
delta_w_h_o = learnrate * output_error_term * hidden_layer_output
# TODO: Calculate change in weights for input layer to hidden layer
delta_w_i_h = learnrate * hidden_error_term * x[:, None]
print('Change in weights for hidden layer to output layer:')
print(delta_w_h_o)
print('Change in weights for input layer to hidden layer:')
print(delta_w_i_h)
# -------------------- Backpropagation 2 --------------------
import numpy as np
from data_prep import features, targets, features_test, targets_test
np.random.seed(21)
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
# Hyperparameters
n_hidden = 2 # number of hidden units
epochs = 900
learnrate = 0.005
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights_input_hidden = np.random.normal(scale=1 / n_features ** .5,
size=(n_features, n_hidden))
weights_hidden_output = np.random.normal(scale=1 / n_features ** .5,
size=n_hidden)
for e in range(epochs):
del_w_input_hidden = np.zeros(weights_input_hidden.shape)
del_w_hidden_output = np.zeros(weights_hidden_output.shape)
for x, y in zip(features.values, targets):
## Forward pass ##
# TODO: Calculate the output
hidden_input = np.dot(x, weights_input_hidden)
hidden_output = sigmoid(hidden_input)
output = sigmoid(np.dot(hidden_output,
weights_hidden_output))
## Backward pass ##
# TODO: Calculate the network's prediction error
error = y - output
# TODO: Calculate error term for the output unit
output_error_term = error * output * (1 - output)
## propagate errors to hidden layer
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(output_error_term, weights_hidden_output)
# TODO: Calculate the error term for the hidden layer
hidden_error_term = hidden_error * hidden_output * (1 - hidden_output)
# TODO: Update the change in weights
del_w_hidden_output += output_error_term * hidden_output
del_w_input_hidden += hidden_error_term * x[:, None]
# TODO: Update weights
weights_input_hidden += learnrate * del_w_input_hidden / n_records
weights_hidden_output += learnrate * del_w_hidden_output / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
hidden_output = sigmoid(np.dot(x, weights_input_hidden))
out = sigmoid(np.dot(hidden_output,
weights_hidden_output))
loss = np.mean((out - targets) ** 2)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
# Calculate accuracy on test data
hidden = sigmoid(np.dot(features_test, weights_input_hidden))
out = sigmoid(np.dot(hidden, weights_hidden_output))
predictions = out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
| [
"numpy.random.normal",
"numpy.mean",
"numpy.random.rand",
"numpy.float_",
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.matmul",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.random.randn"
] | [((799, 921), 'pandas.DataFrame', 'pd.DataFrame', (['outputs'], {'columns': "['Input 1', ' Input 2', ' Linear Combination', ' Activation Output',\n ' Is Correct']"}), "(outputs, columns=['Input 1', ' Input 2',\n ' Linear Combination', ' Activation Output', ' Is Correct'])\n", (811, 921), True, 'import pandas as pd\n'), ((1903, 2025), 'pandas.DataFrame', 'pd.DataFrame', (['outputs'], {'columns': "['Input 1', ' Input 2', ' Linear Combination', ' Activation Output',\n ' Is Correct']"}), "(outputs, columns=['Input 1', ' Input 2',\n ' Linear Combination', ' Activation Output', ' Is Correct'])\n", (1915, 2025), True, 'import pandas as pd\n'), ((2355, 2373), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2369, 2373), True, 'import numpy as np\n'), ((5802, 5824), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (5810, 5824), True, 'import numpy as np\n'), ((5829, 5842), 'numpy.array', 'np.array', (['(0.5)'], {}), '(0.5)\n', (5837, 5842), True, 'import numpy as np\n'), ((5866, 5897), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.3, 0.1]'], {}), '([0.5, -0.5, 0.3, 0.1])\n', (5874, 5897), True, 'import numpy as np\n'), ((6149, 6161), 'numpy.dot', 'np.dot', (['x', 'w'], {}), '(x, w)\n', (6155, 6161), True, 'import numpy as np\n'), ((7528, 7546), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (7542, 7546), True, 'import numpy as np\n'), ((7636, 7698), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(1 / n_features ** 0.5)', 'size': 'n_features'}), '(scale=1 / n_features ** 0.5, size=n_features)\n', (7652, 7698), True, 'import numpy as np\n'), ((9345, 9381), 'numpy.mean', 'np.mean', (['(predictions == targets_test)'], {}), '(predictions == targets_test)\n', (9352, 9381), True, 'import numpy as np\n'), ((9675, 9693), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (9689, 9693), True, 'import numpy as np\n'), ((9720, 9738), 'numpy.random.randn', 'np.random.randn', (['(4)'], {}), '(4)\n', (9735, 9738), True, 'import numpy as np\n'), ((9766, 9822), 'numpy.random.normal', 'np.random.normal', (['(0)'], {'scale': '(0.1)', 'size': '(N_input, N_hidden)'}), '(0, scale=0.1, size=(N_input, N_hidden))\n', (9782, 9822), True, 'import numpy as np\n'), ((9850, 9907), 'numpy.random.normal', 'np.random.normal', (['(0)'], {'scale': '(0.1)', 'size': '(N_hidden, N_output)'}), '(0, scale=0.1, size=(N_hidden, N_output))\n', (9866, 9907), True, 'import numpy as np\n'), ((9977, 10011), 'numpy.dot', 'np.dot', (['X', 'weights_input_to_hidden'], {}), '(X, weights_input_to_hidden)\n', (9983, 10011), True, 'import numpy as np\n'), ((10130, 10180), 'numpy.dot', 'np.dot', (['hidden_layer_out', 'weights_hidden_to_output'], {}), '(hidden_layer_out, weights_hidden_to_output)\n', (10136, 10180), True, 'import numpy as np\n'), ((10455, 10481), 'numpy.array', 'np.array', (['[0.5, 0.1, -0.2]'], {}), '([0.5, 0.1, -0.2])\n', (10463, 10481), True, 'import numpy as np\n'), ((10535, 10583), 'numpy.array', 'np.array', (['[[0.5, -0.6], [0.1, -0.2], [0.1, 0.7]]'], {}), '([[0.5, -0.6], [0.1, -0.2], [0.1, 0.7]])\n', (10543, 10583), True, 'import numpy as np\n'), ((10675, 10696), 'numpy.array', 'np.array', (['[0.1, -0.3]'], {}), '([0.1, -0.3])\n', (10683, 10696), True, 'import numpy as np\n'), ((10735, 10766), 'numpy.dot', 'np.dot', (['x', 'weights_input_hidden'], {}), '(x, weights_input_hidden)\n', (10741, 10766), True, 'import numpy as np\n'), ((10836, 10886), 'numpy.dot', 'np.dot', (['hidden_layer_output', 'weights_hidden_output'], {}), '(hidden_layer_output, weights_hidden_output)\n', (10842, 10886), True, 'import numpy as np\n'), ((11856, 11874), 'numpy.random.seed', 'np.random.seed', (['(21)'], {}), '(21)\n', (11870, 11874), True, 'import numpy as np\n'), ((12153, 12227), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(1 / n_features ** 0.5)', 'size': '(n_features, n_hidden)'}), '(scale=1 / n_features ** 0.5, size=(n_features, n_hidden))\n', (12169, 12227), True, 'import numpy as np\n'), ((12291, 12351), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(1 / n_features ** 0.5)', 'size': 'n_hidden'}), '(scale=1 / n_features ** 0.5, size=n_hidden)\n', (12307, 12351), True, 'import numpy as np\n'), ((14442, 14478), 'numpy.mean', 'np.mean', (['(predictions == targets_test)'], {}), '(predictions == targets_test)\n', (14449, 14478), True, 'import numpy as np\n'), ((4102, 4111), 'numpy.exp', 'np.exp', (['L'], {}), '(L)\n', (4108, 4111), True, 'import numpy as np\n'), ((4504, 4516), 'numpy.float_', 'np.float_', (['Y'], {}), '(Y)\n', (4513, 4516), True, 'import numpy as np\n'), ((4525, 4537), 'numpy.float_', 'np.float_', (['P'], {}), '(P)\n', (4534, 4537), True, 'import numpy as np\n'), ((4880, 4905), 'numpy.dot', 'np.dot', (['features', 'weights'], {}), '(features, weights)\n', (4886, 4905), True, 'import numpy as np\n'), ((7797, 7820), 'numpy.zeros', 'np.zeros', (['weights.shape'], {}), '(weights.shape)\n', (7805, 7820), True, 'import numpy as np\n'), ((9274, 9304), 'numpy.dot', 'np.dot', (['features_test', 'weights'], {}), '(features_test, weights)\n', (9280, 9304), True, 'import numpy as np\n'), ((12442, 12478), 'numpy.zeros', 'np.zeros', (['weights_input_hidden.shape'], {}), '(weights_input_hidden.shape)\n', (12450, 12478), True, 'import numpy as np\n'), ((12505, 12542), 'numpy.zeros', 'np.zeros', (['weights_hidden_output.shape'], {}), '(weights_hidden_output.shape)\n', (12513, 12542), True, 'import numpy as np\n'), ((14309, 14352), 'numpy.dot', 'np.dot', (['features_test', 'weights_input_hidden'], {}), '(features_test, weights_input_hidden)\n', (14315, 14352), True, 'import numpy as np\n'), ((14368, 14405), 'numpy.dot', 'np.dot', (['hidden', 'weights_hidden_output'], {}), '(hidden, weights_hidden_output)\n', (14374, 14405), True, 'import numpy as np\n'), ((3645, 3665), 'numpy.random.rand', 'np.random.rand', (['(2)', '(1)'], {}), '(2, 1)\n', (3659, 3665), True, 'import numpy as np\n'), ((5080, 5094), 'numpy.log', 'np.log', (['output'], {}), '(output)\n', (5086, 5094), True, 'import numpy as np\n'), ((5130, 5148), 'numpy.log', 'np.log', (['(1 - output)'], {}), '(1 - output)\n', (5136, 5148), True, 'import numpy as np\n'), ((8997, 9026), 'numpy.mean', 'np.mean', (['((out - targets) ** 2)'], {}), '((out - targets) ** 2)\n', (9004, 9026), True, 'import numpy as np\n'), ((11160, 11208), 'numpy.dot', 'np.dot', (['output_error_term', 'weights_hidden_output'], {}), '(output_error_term, weights_hidden_output)\n', (11166, 11208), True, 'import numpy as np\n'), ((12677, 12708), 'numpy.dot', 'np.dot', (['x', 'weights_input_hidden'], {}), '(x, weights_input_hidden)\n', (12683, 12708), True, 'import numpy as np\n'), ((13228, 13276), 'numpy.dot', 'np.dot', (['output_error_term', 'weights_hidden_output'], {}), '(output_error_term, weights_hidden_output)\n', (13234, 13276), True, 'import numpy as np\n'), ((14033, 14062), 'numpy.mean', 'np.mean', (['((out - targets) ** 2)'], {}), '((out - targets) ** 2)\n', (14040, 14062), True, 'import numpy as np\n'), ((3674, 3691), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (3688, 3691), True, 'import numpy as np\n'), ((4769, 4779), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (4775, 4779), True, 'import numpy as np\n'), ((5648, 5658), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (5654, 5658), True, 'import numpy as np\n'), ((7211, 7221), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (7217, 7221), True, 'import numpy as np\n'), ((8122, 8140), 'numpy.dot', 'np.dot', (['x', 'weights'], {}), '(x, weights)\n', (8128, 8140), True, 'import numpy as np\n'), ((8955, 8980), 'numpy.dot', 'np.dot', (['features', 'weights'], {}), '(features, weights)\n', (8961, 8980), True, 'import numpy as np\n'), ((9608, 9618), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (9614, 9618), True, 'import numpy as np\n'), ((10437, 10447), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (10443, 10447), True, 'import numpy as np\n'), ((11950, 11960), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (11956, 11960), True, 'import numpy as np\n'), ((12781, 12825), 'numpy.dot', 'np.dot', (['hidden_output', 'weights_hidden_output'], {}), '(hidden_output, weights_hidden_output)\n', (12787, 12825), True, 'import numpy as np\n'), ((13888, 13919), 'numpy.dot', 'np.dot', (['x', 'weights_input_hidden'], {}), '(x, weights_input_hidden)\n', (13894, 13919), True, 'import numpy as np\n'), ((13943, 13987), 'numpy.dot', 'np.dot', (['hidden_output', 'weights_hidden_output'], {}), '(hidden_output, weights_hidden_output)\n', (13949, 13987), True, 'import numpy as np\n'), ((2492, 2507), 'numpy.matmul', 'np.matmul', (['X', 'W'], {}), '(X, W)\n', (2501, 2507), True, 'import numpy as np\n'), ((4561, 4570), 'numpy.log', 'np.log', (['P'], {}), '(P)\n', (4567, 4570), True, 'import numpy as np\n'), ((4583, 4596), 'numpy.log', 'np.log', (['(1 - P)'], {}), '(1 - P)\n', (4589, 4596), True, 'import numpy as np\n')] |
from twisted.internet import abstract, fdesc
class TwistedRawSocket(abstract.FileDescriptor):
def __init__(self, reactor, protocol, fd):
super().__init__(reactor)
self.__protocol = protocol
self.__protocol.makeConnection(self)
self.__fd = fd
self.startReading()
def fileno(self):
return self.__fd
def doRead(self):
return fdesc.readFromFD(self.fileno(), self.__protocol.dataReceived)
def writeSomeData(self, data):
return fdesc.writeToFD(self.fileno(), data)
def connectionLost(self, reason):
abstract.FileDescriptor.connectionLost(self, reason)
self.__protocol.connectionLost(reason)
| [
"twisted.internet.abstract.FileDescriptor.connectionLost"
] | [((590, 642), 'twisted.internet.abstract.FileDescriptor.connectionLost', 'abstract.FileDescriptor.connectionLost', (['self', 'reason'], {}), '(self, reason)\n', (628, 642), False, 'from twisted.internet import abstract, fdesc\n')] |
import pwd
import os
from django.core.checks import register, Warning
from django.conf import settings
#
# Internal checks
#
W001 = Warning(
'No DEPLOY_TARGET_USER set, define it to check write permissions on storages.',
id='preflight.W001'
)
W002 = Warning(
'You are not running as DEPLOY_TARGET_USER, writeable-checks can not be trusted.',
id='preflight.W002'
)
# noinspection PyUnusedLocal
@register('preflight', deploy=True)
def check_user(app_configs, **kwargs):
errors = []
current_user = pwd.getpwuid(os.getuid()).pw_name
target_user = getattr(settings, 'DEPLOY_TARGET_USER', None)
if target_user is None:
errors.append(W001)
if target_user and target_user != current_user:
errors.append(W002)
return errors
| [
"django.core.checks.register",
"os.getuid",
"django.core.checks.Warning"
] | [((135, 253), 'django.core.checks.Warning', 'Warning', (['"""No DEPLOY_TARGET_USER set, define it to check write permissions on storages."""'], {'id': '"""preflight.W001"""'}), "(\n 'No DEPLOY_TARGET_USER set, define it to check write permissions on storages.'\n , id='preflight.W001')\n", (142, 253), False, 'from django.core.checks import register, Warning\n'), ((262, 383), 'django.core.checks.Warning', 'Warning', (['"""You are not running as DEPLOY_TARGET_USER, writeable-checks can not be trusted."""'], {'id': '"""preflight.W002"""'}), "(\n 'You are not running as DEPLOY_TARGET_USER, writeable-checks can not be trusted.'\n , id='preflight.W002')\n", (269, 383), False, 'from django.core.checks import register, Warning\n'), ((416, 450), 'django.core.checks.register', 'register', (['"""preflight"""'], {'deploy': '(True)'}), "('preflight', deploy=True)\n", (424, 450), False, 'from django.core.checks import register, Warning\n'), ((539, 550), 'os.getuid', 'os.getuid', ([], {}), '()\n', (548, 550), False, 'import os\n')] |
import authorize, requests, config
def send_trade_list(
pair,
side,
first_trade_size,
size_increase,
first_trade_price,
price_increase,
trade_count,
auth=None):
"""function takes in intial info trading pair (BTC-USD), side (buy or
sell), first trade price, minimum trade value, increase in price per trade,
increase in value per trade, the number of trades and an authorization
token and lists a corrosponding sequence of trades on GDAX through there
API
To do's : write function to accept class object instead of each item
"""
auth = authorize.run_GdaxAuth()
# Initiate trading index, trade dictionary to be sent in while-loop, and
# neg_pos variable to help manage buy vs sell sequence direction in loop.
n = 0
listed_trades = []
trade = {
"size": "",
"price": "",
"side": side,
"product_id": pair
}
api_url = config.url
if side == "buy":
neg_pos = -1
else:
neg_pos = 1
print("\n-- Listing New {}s --".format(trade["side"].title()))
# While loop to list each trade in sequence
while n < trade_count:
trade["size"] = str(
round(first_trade_size + size_increase * n, 10)
)
trade["price"] = str(
round(
first_trade_price + neg_pos * price_increase * n, 10
)
)
t = requests.post(api_url + 'orders', json=trade, auth=auth)
#Check for error
if t.status_code != 200:
print(("Response: {}, Message {}, Price: {}, Size: {}").format(
str(t.status_code),
t.json()["message"],
trade["price"],
trade["size"],
)
)
# Try to enter data into ts
else:
try:
print(("{}, Size: {}, Price: {}").format(
t.json()["product_id"],
t.json()["size"],
t.json()["price"],
)
)
listed_trades.append( t.json() )
except:
print( t.json() )
n += 1
# Return trades
return listed_trades
def cancel_id(trade):
auth = authorize.run_GdaxAuth()
api_url = 'https://api.gdax.com/'
response = requests.delete(api_url + 'orders/' + trade["id"], auth=auth)
print(
"response: {}, id: {}, side: {}, size: {}, price: {}".format(
response.status_code,
trade["id"],
trade["side"],
trade["size"],
trade["price"]
)
)
| [
"authorize.run_GdaxAuth",
"requests.post",
"requests.delete"
] | [((581, 605), 'authorize.run_GdaxAuth', 'authorize.run_GdaxAuth', ([], {}), '()\n', (603, 605), False, 'import authorize, requests, config\n'), ((2023, 2047), 'authorize.run_GdaxAuth', 'authorize.run_GdaxAuth', ([], {}), '()\n', (2045, 2047), False, 'import authorize, requests, config\n'), ((2097, 2158), 'requests.delete', 'requests.delete', (["(api_url + 'orders/' + trade['id'])"], {'auth': 'auth'}), "(api_url + 'orders/' + trade['id'], auth=auth)\n", (2112, 2158), False, 'import authorize, requests, config\n'), ((1319, 1375), 'requests.post', 'requests.post', (["(api_url + 'orders')"], {'json': 'trade', 'auth': 'auth'}), "(api_url + 'orders', json=trade, auth=auth)\n", (1332, 1375), False, 'import authorize, requests, config\n')] |
from socket import socket
from zlib import decompress
import cv2
import numpy
from PIL import Image
WIDTH = int(1366 / 1)
HEIGHT = int(768 / 1)
def recvall(conn, length):
buf = b''
while len(buf) < length:
data = conn.recv(length - len(buf))
if not data:
return data
buf += data
return buf
def main(host='127.0.0.1', port=5000):
watching = True
sock = socket()
sock.connect((host, port))
try:
while watching:
size_len = int.from_bytes(sock.recv(1), byteorder='big')
size = int.from_bytes(sock.recv(size_len), byteorder='big')
bgra = decompress(recvall(sock, size))
img = Image.frombytes("RGB", (WIDTH, HEIGHT), bgra, "raw", "BGRX")
np_ar = numpy.array(img, dtype=numpy.uint8)
np_ar = numpy.flip(np_ar[:, :, :3], 2)
cv2.imshow("OpenCV show", np_ar)
if cv2.waitKey(25) & 0xFF == ord("q"): ##escape key
cv2.destroyAllWindows()#to jump out of window
break
finally:
sock.close()
if __name__ == '__main__':
main()
| [
"numpy.flip",
"socket.socket",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"PIL.Image.frombytes",
"cv2.waitKey"
] | [((416, 424), 'socket.socket', 'socket', ([], {}), '()\n', (422, 424), False, 'from socket import socket\n'), ((711, 771), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGB"""', '(WIDTH, HEIGHT)', 'bgra', '"""raw"""', '"""BGRX"""'], {}), "('RGB', (WIDTH, HEIGHT), bgra, 'raw', 'BGRX')\n", (726, 771), False, 'from PIL import Image\n'), ((792, 827), 'numpy.array', 'numpy.array', (['img'], {'dtype': 'numpy.uint8'}), '(img, dtype=numpy.uint8)\n', (803, 827), False, 'import numpy\n'), ((861, 891), 'numpy.flip', 'numpy.flip', (['np_ar[:, :, :3]', '(2)'], {}), '(np_ar[:, :, :3], 2)\n', (871, 891), False, 'import numpy\n'), ((904, 936), 'cv2.imshow', 'cv2.imshow', (['"""OpenCV show"""', 'np_ar'], {}), "('OpenCV show', np_ar)\n", (914, 936), False, 'import cv2\n'), ((1018, 1041), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1039, 1041), False, 'import cv2\n'), ((953, 968), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (964, 968), False, 'import cv2\n')] |
from typing import Tuple
from time import localtime
def to_one_decimal(value: float) -> str:
return "%.1f" % value
def get_bytes_per_sample(bits: int) -> int:
if bits <= 8:
return 1
if bits <= 16:
return 2
if bits <= 24:
return 3
return 4
def get_frequency_readable(frequency: int) -> Tuple[int, str]:
frequency_unit = "Hz"
if frequency > 1000:
frequency /= 1000
frequency_unit = "kHz"
if frequency > 1000:
frequency /= 1000
frequency_unit = "MHz"
if frequency > 1000:
frequency /= 1000
frequency_unit = "GHz"
return frequency, frequency_unit
def get_timestamp_readable(time: float) -> str:
local_time = localtime(time)
return str(local_time.tm_hour) + ":" + str(local_time.tm_min) + ":" + str(local_time.tm_sec)
def get_timesteps_readable(time: float) -> Tuple[float, str]:
time_unit = "sec"
if time < 1.0:
time *= 1000
time_unit = "ms"
if time < 1.0:
time *= 1000
time_unit = "µs"
if time < 1.0:
time *= 1000
time_unit = "ns"
return time, time_unit
| [
"time.localtime"
] | [((728, 743), 'time.localtime', 'localtime', (['time'], {}), '(time)\n', (737, 743), False, 'from time import localtime\n')] |
from datetime import date, datetime, timedelta
from django.conf import settings
from django.utils.timezone import localdate
from dateutil import rrule
from django.utils.dateparse import parse_date
from collections import defaultdict
def get_date_interval_from_get(request):
"""
Parses a request and returns the start and end date from it.
Parameters
----------
request: The GET Request Object
Returns
-------
Return a tuple in the form of (start_date, end_date). If either the start date or the end date is not present in the request None is returned in the tuple
"""
start_identifier = getattr(settings, "BRIDGER_START_IDENTIFIERS", ["start", "start_date", "from", "date_gte"],)
end_identifier = getattr(settings, "BRIDGER_END_IDENTIFIERS", ["end", "end_date", "to", "date_lte"])
date_format = getattr(settings, "BRIDGER_DATE_FORMAT", "%Y-%m-%d")
assert isinstance(start_identifier, list)
assert isinstance(end_identifier, list)
assert isinstance(date_format, str)
start = next((identifier for identifier in start_identifier if identifier in request.GET), None,)
end = next((identifier for identifier in end_identifier if identifier in request.GET), None)
if start:
try:
start = datetime.strptime(request.GET.get(start), date_format).date()
except ValueError:
start = None
if end:
try:
end = datetime.strptime(request.GET.get(end), date_format).date()
except ValueError:
end = None
return start, end
def get_quarter_from_date(d):
return ((d.month - 1) // 3) + 1
def get_start_date_from_date(d):
quarter = get_quarter_from_date(d)
return date(d.year, quarter * 3 - 2, 1)
def get_end_date_from_date(d):
quarter = get_quarter_from_date(d)
return date(d.year + ((quarter * 3 + 1) // 12), (quarter * 3 + 1) % 12, 1) - timedelta(days=1)
def get_start_and_end_date_from_date(d):
return get_start_date_from_date(d), get_end_date_from_date(d)
def current_quarter_date_start(field=None, request=None, view=None):
return get_start_date_from_date(localdate())
def current_quarter_date_end(field=None, request=None, view=None):
return get_end_date_from_date(localdate())
def current_quarter_date_interval(field, request, view):
return (
current_quarter_date_start(field, request, view),
current_quarter_date_end(field, request, view),
)
def current_year_date_start(field, request, view):
d = localdate()
return date(d.year,1,1)
def current_year_date_end(field, request, view):
d = localdate()
return date(d.year + 1,1,1) - timedelta(days=1)
def current_year_date_interval(field, request, view):
return (
current_year_date_start(field, request, view),
current_year_date_end(field, request, view),
)
def current_month_date_start(field, request, view):
d = localdate()
return date(d.year,d.month,1)
def current_month_date_end(field=None, request=None, view=None):
d = localdate()
if d.month == 12:
return date(d.year, 12, 31)
return date(d.year, d.month + 1, 1) - timedelta(days=1)
def current_month_date_interval(field, request, view):
return (
current_month_date_start(field, request, view),
current_month_date_end(field, request, view),
)
def get_date_interval_from_request(request, request_type="GET"):
"""
Parses a request and returns the start and end date from it.
Parameters
----------
request: The GET Request Object
Returns
-------
Return a tuple in the form of (start_date, end_date). If either the start date or the end date is not present in the request None is returned in the tuple
"""
start_identifier = ["start", "start_date", "from", "date_gte"]
end_identifier = ["end", "end_date", "to", "date_lte"]
params = request.GET if request_type == "GET" else request.POST
start = None
end = None
if "date" in params:
if len(params.get("date").split(",")) == 2:
start, end = params.get("date").split(",")
else:
start = next(
(params.get(identifier) for identifier in start_identifier if identifier in params),
None,
)
end = next(
(params.get(identifier) for identifier in end_identifier if identifier in params), None
)
if start:
start = parse_date(start)
if end:
end = parse_date(end)
return start, end
def get_number_of_hours_between_dates(
d1, d2, skip_weekends=True, list_public_holidays=False, hours_range=range(0,23), granularity=12
):
def convert_days_from_hours(hours, granularity, hours_per_day):
return int(hours/granularity)*granularity/hours_per_day
rules = rrule.rruleset()
byweekday_list = [rrule.MO, rrule.TU, rrule.WE, rrule.TH, rrule.FR]
if not skip_weekends:
byweekday_list.extend([rrule.SA, rrule.SU])
rules.rrule(
rrule.rrule(
freq=rrule.HOURLY,
byweekday=byweekday_list,
byhour=hours_range,
dtstart=d1,
until=d2,
)
)
if list_public_holidays:
for holiday in list_public_holidays:
s1 = datetime(holiday.year, holiday.month, holiday.day, 0, 0, 0)
s2 = datetime(holiday.year, holiday.month, holiday.day, 23, 59, 59)
rules.exrule(
rrule.rrule(
rrule.HOURLY,
dtstart=s1,
until=s2
)
)
dates = defaultdict(int)
for r in list(rules):
dates[r.date()] += 1
return {k: convert_days_from_hours(v, granularity, len(hours_range)) for k, v in dates.items()}
| [
"datetime.datetime",
"dateutil.rrule.rruleset",
"dateutil.rrule.rrule",
"collections.defaultdict",
"datetime.date",
"django.utils.dateparse.parse_date",
"django.utils.timezone.localdate",
"datetime.timedelta"
] | [((1721, 1753), 'datetime.date', 'date', (['d.year', '(quarter * 3 - 2)', '(1)'], {}), '(d.year, quarter * 3 - 2, 1)\n', (1725, 1753), False, 'from datetime import date, datetime, timedelta\n'), ((2522, 2533), 'django.utils.timezone.localdate', 'localdate', ([], {}), '()\n', (2531, 2533), False, 'from django.utils.timezone import localdate\n'), ((2545, 2563), 'datetime.date', 'date', (['d.year', '(1)', '(1)'], {}), '(d.year, 1, 1)\n', (2549, 2563), False, 'from datetime import date, datetime, timedelta\n'), ((2620, 2631), 'django.utils.timezone.localdate', 'localdate', ([], {}), '()\n', (2629, 2631), False, 'from django.utils.timezone import localdate\n'), ((2927, 2938), 'django.utils.timezone.localdate', 'localdate', ([], {}), '()\n', (2936, 2938), False, 'from django.utils.timezone import localdate\n'), ((2950, 2974), 'datetime.date', 'date', (['d.year', 'd.month', '(1)'], {}), '(d.year, d.month, 1)\n', (2954, 2974), False, 'from datetime import date, datetime, timedelta\n'), ((3047, 3058), 'django.utils.timezone.localdate', 'localdate', ([], {}), '()\n', (3056, 3058), False, 'from django.utils.timezone import localdate\n'), ((4815, 4831), 'dateutil.rrule.rruleset', 'rrule.rruleset', ([], {}), '()\n', (4829, 4831), False, 'from dateutil import rrule\n'), ((5618, 5634), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (5629, 5634), False, 'from collections import defaultdict\n'), ((1837, 1902), 'datetime.date', 'date', (['(d.year + (quarter * 3 + 1) // 12)', '((quarter * 3 + 1) % 12)', '(1)'], {}), '(d.year + (quarter * 3 + 1) // 12, (quarter * 3 + 1) % 12, 1)\n', (1841, 1902), False, 'from datetime import date, datetime, timedelta\n'), ((1907, 1924), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1916, 1924), False, 'from datetime import date, datetime, timedelta\n'), ((2142, 2153), 'django.utils.timezone.localdate', 'localdate', ([], {}), '()\n', (2151, 2153), False, 'from django.utils.timezone import localdate\n'), ((2258, 2269), 'django.utils.timezone.localdate', 'localdate', ([], {}), '()\n', (2267, 2269), False, 'from django.utils.timezone import localdate\n'), ((2643, 2665), 'datetime.date', 'date', (['(d.year + 1)', '(1)', '(1)'], {}), '(d.year + 1, 1, 1)\n', (2647, 2665), False, 'from datetime import date, datetime, timedelta\n'), ((2666, 2683), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2675, 2683), False, 'from datetime import date, datetime, timedelta\n'), ((3096, 3116), 'datetime.date', 'date', (['d.year', '(12)', '(31)'], {}), '(d.year, 12, 31)\n', (3100, 3116), False, 'from datetime import date, datetime, timedelta\n'), ((3128, 3156), 'datetime.date', 'date', (['d.year', '(d.month + 1)', '(1)'], {}), '(d.year, d.month + 1, 1)\n', (3132, 3156), False, 'from datetime import date, datetime, timedelta\n'), ((3159, 3176), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3168, 3176), False, 'from datetime import date, datetime, timedelta\n'), ((4442, 4459), 'django.utils.dateparse.parse_date', 'parse_date', (['start'], {}), '(start)\n', (4452, 4459), False, 'from django.utils.dateparse import parse_date\n'), ((4486, 4501), 'django.utils.dateparse.parse_date', 'parse_date', (['end'], {}), '(end)\n', (4496, 4501), False, 'from django.utils.dateparse import parse_date\n'), ((5017, 5119), 'dateutil.rrule.rrule', 'rrule.rrule', ([], {'freq': 'rrule.HOURLY', 'byweekday': 'byweekday_list', 'byhour': 'hours_range', 'dtstart': 'd1', 'until': 'd2'}), '(freq=rrule.HOURLY, byweekday=byweekday_list, byhour=hours_range,\n dtstart=d1, until=d2)\n', (5028, 5119), False, 'from dateutil import rrule\n'), ((5284, 5343), 'datetime.datetime', 'datetime', (['holiday.year', 'holiday.month', 'holiday.day', '(0)', '(0)', '(0)'], {}), '(holiday.year, holiday.month, holiday.day, 0, 0, 0)\n', (5292, 5343), False, 'from datetime import date, datetime, timedelta\n'), ((5361, 5423), 'datetime.datetime', 'datetime', (['holiday.year', 'holiday.month', 'holiday.day', '(23)', '(59)', '(59)'], {}), '(holiday.year, holiday.month, holiday.day, 23, 59, 59)\n', (5369, 5423), False, 'from datetime import date, datetime, timedelta\n'), ((5466, 5513), 'dateutil.rrule.rrule', 'rrule.rrule', (['rrule.HOURLY'], {'dtstart': 's1', 'until': 's2'}), '(rrule.HOURLY, dtstart=s1, until=s2)\n', (5477, 5513), False, 'from dateutil import rrule\n')] |
# -*-coding:utf-8-*-
# 애너그램은 일종의 언어유희
# 문자를 재배열하여 다른 뜻을 가진 단어로 바꾸는 것
def solution(strs):
d = {}
for s in strs:
_s = ''.join(sorted(s))
#print(_s)
if _s not in d:
d[_s] = [s]
else:
d[_s].append(s)
return list(d.values())
import collections
def solution_2(strs):
a = collections.defaultdict(list)
for word in strs:
# sort and added in dictionary
a[''.join(sorted(word))].append(word)
return a.values()
strs = ["eat","tea","tan","ate","nat","bat"]
print(solution(strs))
print(solution_2(strs)) | [
"collections.defaultdict"
] | [((342, 371), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (365, 371), False, 'import collections\n')] |
# -*- coding: utf-8 -*-
import os
from honeybee_radiance_folder import ModelFolder as Folder
from honeybee_radiance_folder.folderutil import add_output_spec_to_receiver
from honeybee_radiance_folder.folderutil import _nukedir
def test_static_aperture():
radiance_folder = r'./tests/assets/project_folder'
folder = Folder(radiance_folder)
files = folder.aperture_files(black_out=False, rel_path=True)
assert 'model/aperture/aperture.mat' in files
assert 'model/aperture/aperture.rad' in files
def test_aperture_group():
radiance_folder = r'./tests/assets/project_folder'
folder = Folder(radiance_folder)
apertures = folder.aperture_groups(interior=False)
assert len(apertures) == 1
ap = apertures[0]
assert ap.states[0].identifier == '0_clear'
assert ap.states[0].default == 'south_window..default..000.rad'
assert ap.states[1].identifier == '1_diffuse'
assert ap.states[1].default == 'south_window..default..001.rad'
def test_add_output_spec():
re_file = r'./tests/assets/project_folder/model/aperture_group/south_window..mtx.rad'
out_file = r'./tests/assets/temp/south_window..mtx.rad'
output_folder = r'./tests/assets/temp'
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
_nukedir(output_folder, False)
add_output_spec_to_receiver(re_file, 'cubical.vmx', out_file)
assert os.path.isfile(out_file)
with open(out_file) as outf:
content = outf.read()
assert '#@rfluxmtx h=kf u=0,0,1.0 o=cubical.vmx' in content
_nukedir(output_folder, False)
| [
"honeybee_radiance_folder.ModelFolder",
"honeybee_radiance_folder.folderutil.add_output_spec_to_receiver",
"os.path.isfile",
"os.path.isdir",
"os.mkdir",
"honeybee_radiance_folder.folderutil._nukedir"
] | [((325, 348), 'honeybee_radiance_folder.ModelFolder', 'Folder', (['radiance_folder'], {}), '(radiance_folder)\n', (331, 348), True, 'from honeybee_radiance_folder import ModelFolder as Folder\n'), ((612, 635), 'honeybee_radiance_folder.ModelFolder', 'Folder', (['radiance_folder'], {}), '(radiance_folder)\n', (618, 635), True, 'from honeybee_radiance_folder import ModelFolder as Folder\n'), ((1278, 1308), 'honeybee_radiance_folder.folderutil._nukedir', '_nukedir', (['output_folder', '(False)'], {}), '(output_folder, False)\n', (1286, 1308), False, 'from honeybee_radiance_folder.folderutil import _nukedir\n'), ((1313, 1374), 'honeybee_radiance_folder.folderutil.add_output_spec_to_receiver', 'add_output_spec_to_receiver', (['re_file', '"""cubical.vmx"""', 'out_file'], {}), "(re_file, 'cubical.vmx', out_file)\n", (1340, 1374), False, 'from honeybee_radiance_folder.folderutil import add_output_spec_to_receiver\n'), ((1386, 1410), 'os.path.isfile', 'os.path.isfile', (['out_file'], {}), '(out_file)\n', (1400, 1410), False, 'import os\n'), ((1542, 1572), 'honeybee_radiance_folder.folderutil._nukedir', '_nukedir', (['output_folder', '(False)'], {}), '(output_folder, False)\n', (1550, 1572), False, 'from honeybee_radiance_folder.folderutil import _nukedir\n'), ((1212, 1240), 'os.path.isdir', 'os.path.isdir', (['output_folder'], {}), '(output_folder)\n', (1225, 1240), False, 'import os\n'), ((1250, 1273), 'os.mkdir', 'os.mkdir', (['output_folder'], {}), '(output_folder)\n', (1258, 1273), False, 'import os\n')] |
# Released under the MIT License. See LICENSE for details.
#
"""UI functionality related to accounts."""
from __future__ import annotations
import _ba
import ba
def show_sign_in_prompt(account_type: str = None) -> None:
"""Bring up a prompt telling the user they must sign in."""
from bastd.ui import confirm
from bastd.ui.account import settings
if account_type == 'Google Play':
confirm.ConfirmWindow(
ba.Lstr(resource='notSignedInGooglePlayErrorText'),
lambda: _ba.sign_in('Google Play'),
ok_text=ba.Lstr(resource='accountSettingsWindow.signInText'),
width=460,
height=130)
else:
confirm.ConfirmWindow(
ba.Lstr(resource='notSignedInErrorText'),
lambda: settings.AccountSettingsWindow(modal=True,
close_once_signed_in=True),
ok_text=ba.Lstr(resource='accountSettingsWindow.signInText'),
width=460,
height=130)
| [
"_ba.sign_in",
"ba.Lstr",
"bastd.ui.account.settings.AccountSettingsWindow"
] | [((444, 494), 'ba.Lstr', 'ba.Lstr', ([], {'resource': '"""notSignedInGooglePlayErrorText"""'}), "(resource='notSignedInGooglePlayErrorText')\n", (451, 494), False, 'import ba\n'), ((718, 758), 'ba.Lstr', 'ba.Lstr', ([], {'resource': '"""notSignedInErrorText"""'}), "(resource='notSignedInErrorText')\n", (725, 758), False, 'import ba\n'), ((516, 542), '_ba.sign_in', '_ba.sign_in', (['"""Google Play"""'], {}), "('Google Play')\n", (527, 542), False, 'import _ba\n'), ((564, 616), 'ba.Lstr', 'ba.Lstr', ([], {'resource': '"""accountSettingsWindow.signInText"""'}), "(resource='accountSettingsWindow.signInText')\n", (571, 616), False, 'import ba\n'), ((780, 849), 'bastd.ui.account.settings.AccountSettingsWindow', 'settings.AccountSettingsWindow', ([], {'modal': '(True)', 'close_once_signed_in': '(True)'}), '(modal=True, close_once_signed_in=True)\n', (810, 849), False, 'from bastd.ui.account import settings\n'), ((922, 974), 'ba.Lstr', 'ba.Lstr', ([], {'resource': '"""accountSettingsWindow.signInText"""'}), "(resource='accountSettingsWindow.signInText')\n", (929, 974), False, 'import ba\n')] |
import os
opj = os.path.join
import numpy as np
import pandas as pd
import glob
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
color_cycle = ['dimgrey', 'firebrick', 'darkorange', 'olivedrab',
'dodgerblue', 'magenta']
plt.ioff()
plt.rcParams.update({'font.family': 'serif',
'font.size': 16, 'axes.labelsize': 20,
'mathtext.fontset': 'stix',
'axes.prop_cycle': plt.cycler('color', color_cycle)})
plt.rcParams["font.serif"] = ["Times New Roman"] + plt.rcParams["font.serif"]
import pffit
fit = pffit.phase_function_models.inversion()
m = pffit.phase_function_models.models()
dir = pffit.__path__[0]
dirdata = opj(dir, 'data')
trunc = False
if trunc:
dirfig = opj(dir, 'fig', 'truncated')
angular_range_t = [3, 150]
else:
dirfig = opj(dir, 'fig','article' )
angular_range = [3, 173]
# -------------------
# fitting section
# -------------------
theta_ = np.linspace(0, 180, 1000)
# remove 0deg to comply with FF model (not defined at 0)
theta_ = theta_[1:]
theta_ = np.logspace(-2, np.log10(180), 1000)
# load petzold
file = opj(dirdata, 'petzold_data.txt')
df_petzold = pd.read_csv(file, skiprows=3, sep='\s+', index_col=0, skipinitialspace=True, na_values='inf')
models = (fit.FF_fit, fit.RM_fit, fit.TTFF_fit, fit.TTRM_fit)
def process(wl, data, model, angular_range=[0, 180], x_only=False, theta_=theta_):
'''
Execute non-linear fitting for the given model and phase function data
:param wl:
:param data:
:param model:
:param angular_range:
:param x_only:
:param theta_:
:return:
'''
model_ = model.__name__
N_ang = len(theta_)
back_ang = theta_[theta_ > 90]
group_ = data.dropna()
group_ = group_[
(group_.index >= angular_range[0]) & (group_.index <= angular_range[1])] # [group_.index<140]
theta, vsf = group_.index.values, group_.values
min1, func = model(theta, vsf)
out1 = min1.least_squares() # max_nfev=30, xtol=1e-7, ftol=1e-4)
x = out1.x
if x_only:
return x
res_ = pd.DataFrame(data={'model': [model_],'sample': [sample], 'name': [names[irow]], 'wavelength': [wl]})
df_ = pd.DataFrame(data={'model': model_, 'sample': [sample] * N_ang, 'name': [names[irow]] * N_ang,
'wavelength': [wl] * N_ang, 'theta': theta_})
res_['cost'] = out1.residual.__abs__().mean()
for c in ('redchi', 'bic', 'aic'):
res_[c] = out1.__getattribute__(c)
for name, param in out1.params.items():
res_[name] = param.value
res_[name + '_std'] = param.stderr
pf = func(theta_, *x)
raw = np.interp(theta_, theta, vsf, left=np.nan, right=np.nan)
df_['pf_raw'] = raw
df_['pf_fit'] = pf
norm = np.trapz(func(theta_[1:], *x) * np.sin(np.radians(theta_[1:])), np.radians(theta_[1:])) * np.pi * 2
bb_tilde = np.trapz(func(back_ang, *x) * np.sin(np.radians(back_ang)),
np.radians(back_ang)) * np.pi * 2 / norm
cos_ave = np.trapz(func(theta_[1:], *x) * np.sin(np.radians(theta_[1:]) * np.cos(np.radians(theta_[1:]))),
np.radians(theta_[1:])) * np.pi * 2
res_['norm'] = norm
res_['bb_ratio'] = bb_tilde
res_['asymmetry_factor'] = cos_ave
return res_, df_
files = glob.glob(opj(dirdata, 'normalized_vsf*txt'))
samples = ['PF_clear', 'PF_coast', 'PF_turbid', 'PF_avg-part',
'Arizona', 'Chlorella', 'Cylindrotheca', 'Dunaliella', 'Karenia', 'Skeletonema']
names = ['Petzold clear', 'Petzold coast', 'Petzold turbid', 'Petzold average',
'Arizona dust', r'$\it{C. autotrophica}$', r'$\it{C. closterium}$', r'$\it{D. salina}$',
r'$\it{K. mikimotoi}$', r'$\it{S. cf. costatum}$']
file_pattern = opj(dirdata, 'normalized_vsf_lov_experiment2015_xxx.txt')
fitdf = []
res = []
for icol, model in enumerate(models):
model_ = model.__name__
for irow, sample in enumerate(samples):
if 'PF' in sample:
# ===============
# Petzold data
# ===============
print(model_, sample)
group = df_petzold[sample]
wl = 514
# set range to removce extrapolated values and uncertain forward scatt data
angular_range = [10, 170]
res_, df_ = process(wl, group, model, angular_range=angular_range)
res.append(res_)
fitdf.append(df_)
else:
# ===============
# Harmel et al 2016 data
# ===============
file = file_pattern.replace('xxx', sample)
df = pd.read_csv(file, skiprows=8, sep='\t', index_col=0, skipinitialspace=True, na_values='inf')
angular_range = [3, 173]
if trunc:
angular_range = angular_range_t
# if trunc: # to truncate phase function and verify consistency over different scatt. angle range
# dirfig = opj(dir, 'fig', 'truncated')
# angular_range = [3, 150]
for i, (label, group) in enumerate(df.iteritems()):
print(model_, sample, label)
wl = int(label.split('.')[-1])
res_, df_ = process(wl, group, model, angular_range=angular_range)
res.append(res_)
fitdf.append(df_)
res = pd.concat(res)
res.to_csv(opj(dirdata, 'fit_res_all.csv'), index=False)
fitdf = pd.concat(fitdf)
fitdf.to_csv(opj(dirdata, 'fitted_data_all.csv'), index=False)
# -------------------
# plotting section
# -------------------
# ===============
# Performances
# ===============
for param in ('redchi', 'bic', 'aic', 'bb_ratio', 'asymmetry_factor'):
fig, axs = plt.subplots(2, 2, figsize=(10, 9), sharex=True)
fig.subplots_adjust(bottom=0.175, top=0.96, left=0.1, right=0.98,
hspace=0.25, wspace=0.27)
axs = axs.ravel()
for icol, model in enumerate(models):
model_ = model.__name__
#res = pd.read_csv(opj(dirdata, 'fit_res_' + model_ + '.csv')).sort_values(['sample', 'wavelength'])
res_=res[res['model']==model_]
ax = axs[icol]
ax.set_title(model_)
icolor=0
for sample, group in res_.groupby('sample'):
name = group.name.values[0]
print(name)
if 'Petzold' in name:
continue
if icol == 3:
ax.plot(group.wavelength, group[param], label=name, linestyle='dashed', lw=2, marker='o',
c=color_cycle[icolor], mec='grey', ms=12, alpha=0.6)
else:
ax.plot(group.wavelength, group[param], linestyle='dashed', lw=2, marker='o',
c=color_cycle[icolor], mec='grey', ms=12, alpha=0.6)
if param == "redchi":
ax.set_ylabel(r'${\chi_\nu^2}$')
else:
ax.set_ylabel(param)
icolor+=1
axs[-1].set_xlabel('Wavelength (nm)')
axs[-2].set_xlabel('Wavelength (nm)')
fig.legend(loc='upper center', bbox_to_anchor=(0.535, .115),
fancybox=True, shadow=True, ncol=3, handletextpad=0.5, fontsize=20)
# fig.tight_layout()
plt.savefig(opj(dirfig, param + '_fitting_performances.png'), dpi=300)
# ===============
# TTRM parameters
# ===============
fig, axs = plt.subplots(4, 2, figsize=(10, 12), sharex=True)
res_TTRM = res[res['model']=='TTRM_fit']
axs = axs.ravel()
labels = ['$\gamma$', '$g_1$', '$g_2$', r'$\alpha _1$', r'$\alpha_2$', '$\~b_b$', r'$<cos\theta >$']
for i, param in enumerate(['gamma', 'g1', 'g2', 'alpha1', 'alpha2', 'bb_ratio', 'asymmetry_factor']):
ax = axs[i]
ax.set_ylabel(labels[i])
icolor=0
for sample, group in res_TTRM.groupby('sample'):
name = group.name.values[0]
if 'Petzold' in name:
continue
print(name,icolor,color_cycle[icolor])
# ax.errorbar(group.wavelength,group[param],yerr=group[param+'_std'],label=name,linestyle='dashed',lw=2, marker='o',mec='grey',ms=12,alpha=0.6)
ax.errorbar(group.wavelength, group[param], linestyle='dashed', lw=2, marker='o',c=color_cycle[icolor], mec='grey', ms=12, alpha=0.6)
icolor+=1
icolor=0
for sample, group in res_TTRM.groupby('sample'):
name = group.name.values[0]
if 'Petzold' in name:
continue
print(name,icolor,color_cycle[icolor])
axs[-1].errorbar(group.wavelength, group[param], label=name, linestyle='dashed', lw=2, marker='o', c=color_cycle[icolor], mec='grey',
ms=12, alpha=0.6)
icolor+=1
axs[-1].set_visible(False)
axs[-2].set_xlabel('Wavelength (nm)')
axs[-3].set_xlabel('Wavelength (nm)')
axs[-3].tick_params(axis='x', labelbottom='on')
fig.legend(loc='lower left', bbox_to_anchor=(0.57, 0.04),
fancybox=True, shadow=True, ncol=1, handletextpad=0.5, fontsize=17)
plt.tight_layout()
fig.subplots_adjust(hspace=0.065) # , wspace=0.065)
plt.savefig(opj(dirfig, 'TTRM_fitting_parameters.png'), dpi=300)
# ===============
# PF Fitting per sample
# ===============
def semilog(ax, size=4):
ax.set_xlim((0.01, 10))
divider = make_axes_locatable(ax)
axlin = divider.append_axes("right", size=size, pad=0, sharey=ax)
ax.spines['right'].set_visible(False)
axlin.spines['left'].set_linestyle('--')
# axlin.spines['left'].set_linewidth(1.8)
axlin.spines['left'].set_color('grey')
axlin.yaxis.set_ticks_position('right')
axlin.yaxis.set_visible(False)
axlin.xaxis.set_visible(False)
axlin.set_xscale('linear')
axlin.set_xlim((10, 190))
ax.semilogy()
ax.xaxis.set_major_locator(mpl.ticker.LogLocator(base=10.0, numticks=4))
ax.yaxis.set_major_locator(mpl.ticker.LogLocator(base=10.0, numticks=10))
ax.xaxis.set_minor_locator(mpl.ticker.LogLocator(base=10.0, numticks=10, subs=np.arange(10) * 0.1))
ax.yaxis.set_minor_locator(mpl.ticker.LogLocator(base=10.0, numticks=10, subs=np.arange(10) * 0.1))
return ax, axlin
color = ['black', 'blue', 'green', 'red']
samples = ['PF',
'Arizona', 'Chlorella', 'Cylindrotheca', 'Dunaliella', 'Karenia', 'Skeletonema']
for sample in samples:
print(sample)
basename = sample
df = fitdf[fitdf['sample'].str.contains(sample)]
by_ = 'wavelength'
title = df.name.values[0]
if sample == 'PF':
by_ = 'name'
title = "Petzold measurements"
# if not '3µm' in basename:
# continue
fig, axs_ = plt.subplots(2, 2, figsize=(15, 12), sharex=True, sharey=True)
axslin = [0 for x in range(4)]
axs = axs_.ravel()
for im, (model, group) in enumerate(df.groupby('model')):
print(model)
ax = axs[im]
ax.loglog()
ax, axlin = semilog(ax)
axslin[im] = axlin
for i, (label, g_) in enumerate(group.groupby(by_)):
res_ = res[res['sample'].str.contains(sample) & (res['model'] == model) & (res[by_] == label)]
if by_ == 'wavelength':
label = str(label) + ' nm'
print(label)
for ax_ in (ax, axlin):
ax_.plot(g_.theta, g_.pf_raw, color=color[i], label=label)
ax_.plot(g_.theta, g_.pf_fit, '--', color=color[i])
bp_tilde = res_.bb_ratio.values[0]
asym = res_.asymmetry_factor.values[0]
axlin.text(0.95, 0.95-(i*0.08), r'$\~b_b=${:6.4f}, $<cos \theta > =${:6.3f}'.format(bp_tilde,asym),
size=20, color=color[i],transform=axlin.transAxes, ha="right", va="top", )
ax.set_title(model)
ax.set_ylim(ymin=0.0003, ymax=30 ** 2)
plt.legend(loc='upper center', bbox_to_anchor=(-0.5, -0.14),
fancybox=True, shadow=True, ncol=4, handletextpad=0.5, fontsize=17)
for irow in range(2):
axs_[irow, 0].set_ylabel(r'Phase function $(sr^{-1})$')
for icol in range(-2, 0):
axslin[icol].xaxis.set_visible(True)
axslin[icol].set_xlabel('Scattering angle (deg)')
fig.subplots_adjust(hspace=0.085, wspace=0.085)
plt.suptitle(title, fontsize=24)
plt.savefig(opj(dirfig, basename + '.png'), dpi=300)
# ===============
# PF Fitting summary
# ===============
color_cycle = ['white','dimgrey', 'firebrick', 'olivedrab',
]
samples = ['PF',
'Arizona', 'Chlorella', 'Dunaliella']
rows, cols = 4, 4
axslin = [[0 for x in range(cols)] for x in range(rows)]
names=[]
fig, axs = plt.subplots(rows, cols, figsize=(22, 17), sharex=True, sharey=True)
for irow, sample in enumerate(samples):
print(sample)
basename = sample
df = fitdf[fitdf['sample'].str.contains(sample)]
by_ = 'wavelength'
name = df.name.values[0]
if sample == 'PF':
by_ = 'name'
name = "Petzold meas."
names.append(name)
for im, (model, group) in enumerate(df.groupby('model')):
print(model)
axs[0, im].set_title(model)
ax = axs[irow,im]
ax.loglog()
ax, axlin = semilog(ax,size=3.1)
axslin[irow][im] = axlin
for i, (label, g_) in enumerate(group.groupby(by_)):
res_ = res[res['sample'].str.contains(sample) & (res['model'] == model) & (res[by_] == label)]
if by_ == 'wavelength':
label = str(label) + ' nm'
print(label)
for ax_ in (ax, axlin):
ax_.plot(g_.theta, g_.pf_raw, color=color[i], label=label)
ax_.plot(g_.theta, g_.pf_fit, '--', color=color[i])
ax.set_ylim(ymin=0.0003, ymax=30 ** 2)
plt.legend(loc='upper right', bbox_to_anchor=(0.975, 0.97),
fancybox=True, shadow=True, ncol=1, handletextpad=0.5, fontsize=16)
for irow, sample in enumerate(samples):
axslin[irow][0].text(0.95, 0.95, names[irow], size=20,
transform=axslin[irow][0].transAxes, ha="right", va="top",
bbox=dict(boxstyle="round",
ec=(0.1, 0.1, 0.1),
fc=plt.matplotlib.colors.to_rgba(color_cycle[irow], 0.3),
))
axs[irow, 0].set_ylabel(r'Phase function $(sr^{-1})$')
for icol, model in enumerate(models):
axslin[-1][icol].xaxis.set_visible(True)
axslin[-1][icol].set_xlabel('Scattering angle (deg)')
plt.tight_layout()
fig.subplots_adjust(hspace=0.065, wspace=0.065)
plt.suptitle('')
plt.savefig(opj(dirfig, 'Figure_1.png'), dpi=300)
| [
"numpy.radians",
"numpy.log10",
"matplotlib.ticker.LogLocator",
"pandas.read_csv",
"numpy.arange",
"numpy.linspace",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"pandas.DataFrame",
"pffit.phase_function_models.inversion",
"pffit.phase_function_models.models",
"matplotlib.pyplot.ioff",
"numpy.interp",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.cycler",
"matplotlib.pyplot.tight_layout",
"pandas.concat",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.matplotlib.colors.to_rgba"
] | [((302, 312), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (310, 312), True, 'import matplotlib.pyplot as plt\n'), ((641, 680), 'pffit.phase_function_models.inversion', 'pffit.phase_function_models.inversion', ([], {}), '()\n', (678, 680), False, 'import pffit\n'), ((685, 721), 'pffit.phase_function_models.models', 'pffit.phase_function_models.models', ([], {}), '()\n', (719, 721), False, 'import pffit\n'), ((1020, 1045), 'numpy.linspace', 'np.linspace', (['(0)', '(180)', '(1000)'], {}), '(0, 180, 1000)\n', (1031, 1045), True, 'import numpy as np\n'), ((1238, 1337), 'pandas.read_csv', 'pd.read_csv', (['file'], {'skiprows': '(3)', 'sep': '"""\\\\s+"""', 'index_col': '(0)', 'skipinitialspace': '(True)', 'na_values': '"""inf"""'}), "(file, skiprows=3, sep='\\\\s+', index_col=0, skipinitialspace=\n True, na_values='inf')\n", (1249, 1337), True, 'import pandas as pd\n'), ((5400, 5414), 'pandas.concat', 'pd.concat', (['res'], {}), '(res)\n', (5409, 5414), True, 'import pandas as pd\n'), ((5481, 5497), 'pandas.concat', 'pd.concat', (['fitdf'], {}), '(fitdf)\n', (5490, 5497), True, 'import pandas as pd\n'), ((7382, 7431), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(2)'], {'figsize': '(10, 12)', 'sharex': '(True)'}), '(4, 2, figsize=(10, 12), sharex=True)\n', (7394, 7431), True, 'import matplotlib.pyplot as plt\n'), ((8909, 8927), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8925, 8927), True, 'import matplotlib.pyplot as plt\n'), ((12455, 12523), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': '(22, 17)', 'sharex': '(True)', 'sharey': '(True)'}), '(rows, cols, figsize=(22, 17), sharex=True, sharey=True)\n', (12467, 12523), True, 'import matplotlib.pyplot as plt\n'), ((14315, 14333), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14331, 14333), True, 'import matplotlib.pyplot as plt\n'), ((14382, 14398), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['""""""'], {}), "('')\n", (14394, 14398), True, 'import matplotlib.pyplot as plt\n'), ((1148, 1161), 'numpy.log10', 'np.log10', (['(180)'], {}), '(180)\n', (1156, 1161), True, 'import numpy as np\n'), ((2154, 2260), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'model': [model_], 'sample': [sample], 'name': [names[irow]], 'wavelength':\n [wl]}"}), "(data={'model': [model_], 'sample': [sample], 'name': [names[\n irow]], 'wavelength': [wl]})\n", (2166, 2260), True, 'import pandas as pd\n'), ((2266, 2411), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'model': model_, 'sample': [sample] * N_ang, 'name': [names[irow]] * N_ang,\n 'wavelength': [wl] * N_ang, 'theta': theta_}"}), "(data={'model': model_, 'sample': [sample] * N_ang, 'name': [\n names[irow]] * N_ang, 'wavelength': [wl] * N_ang, 'theta': theta_})\n", (2278, 2411), True, 'import pandas as pd\n'), ((2726, 2782), 'numpy.interp', 'np.interp', (['theta_', 'theta', 'vsf'], {'left': 'np.nan', 'right': 'np.nan'}), '(theta_, theta, vsf, left=np.nan, right=np.nan)\n', (2735, 2782), True, 'import numpy as np\n'), ((5764, 5812), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(10, 9)', 'sharex': '(True)'}), '(2, 2, figsize=(10, 9), sharex=True)\n', (5776, 5812), True, 'import matplotlib.pyplot as plt\n'), ((9175, 9198), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (9194, 9198), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((10498, 10560), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(15, 12)', 'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, figsize=(15, 12), sharex=True, sharey=True)\n', (10510, 10560), True, 'import matplotlib.pyplot as plt\n'), ((11647, 11779), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'bbox_to_anchor': '(-0.5, -0.14)', 'fancybox': '(True)', 'shadow': '(True)', 'ncol': '(4)', 'handletextpad': '(0.5)', 'fontsize': '(17)'}), "(loc='upper center', bbox_to_anchor=(-0.5, -0.14), fancybox=True,\n shadow=True, ncol=4, handletextpad=0.5, fontsize=17)\n", (11657, 11779), True, 'import matplotlib.pyplot as plt\n'), ((12067, 12099), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {'fontsize': '(24)'}), '(title, fontsize=24)\n', (12079, 12099), True, 'import matplotlib.pyplot as plt\n'), ((13554, 13685), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'bbox_to_anchor': '(0.975, 0.97)', 'fancybox': '(True)', 'shadow': '(True)', 'ncol': '(1)', 'handletextpad': '(0.5)', 'fontsize': '(16)'}), "(loc='upper right', bbox_to_anchor=(0.975, 0.97), fancybox=True,\n shadow=True, ncol=1, handletextpad=0.5, fontsize=16)\n", (13564, 13685), True, 'import matplotlib.pyplot as plt\n'), ((507, 539), 'matplotlib.pyplot.cycler', 'plt.cycler', (['"""color"""', 'color_cycle'], {}), "('color', color_cycle)\n", (517, 539), True, 'import matplotlib.pyplot as plt\n'), ((9669, 9713), 'matplotlib.ticker.LogLocator', 'mpl.ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(4)'}), '(base=10.0, numticks=4)\n', (9690, 9713), True, 'import matplotlib as mpl\n'), ((9746, 9791), 'matplotlib.ticker.LogLocator', 'mpl.ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(10)'}), '(base=10.0, numticks=10)\n', (9767, 9791), True, 'import matplotlib as mpl\n'), ((4678, 4774), 'pandas.read_csv', 'pd.read_csv', (['file'], {'skiprows': '(8)', 'sep': '"""\t"""', 'index_col': '(0)', 'skipinitialspace': '(True)', 'na_values': '"""inf"""'}), "(file, skiprows=8, sep='\\t', index_col=0, skipinitialspace=True,\n na_values='inf')\n", (4689, 4774), True, 'import pandas as pd\n'), ((2905, 2927), 'numpy.radians', 'np.radians', (['theta_[1:]'], {}), '(theta_[1:])\n', (2915, 2927), True, 'import numpy as np\n'), ((3215, 3237), 'numpy.radians', 'np.radians', (['theta_[1:]'], {}), '(theta_[1:])\n', (3225, 3237), True, 'import numpy as np\n'), ((3040, 3060), 'numpy.radians', 'np.radians', (['back_ang'], {}), '(back_ang)\n', (3050, 3060), True, 'import numpy as np\n'), ((9875, 9888), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (9884, 9888), True, 'import numpy as np\n'), ((9979, 9992), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (9988, 9992), True, 'import numpy as np\n'), ((14022, 14075), 'matplotlib.pyplot.matplotlib.colors.to_rgba', 'plt.matplotlib.colors.to_rgba', (['color_cycle[irow]', '(0.3)'], {}), '(color_cycle[irow], 0.3)\n', (14051, 14075), True, 'import matplotlib.pyplot as plt\n'), ((2880, 2902), 'numpy.radians', 'np.radians', (['theta_[1:]'], {}), '(theta_[1:])\n', (2890, 2902), True, 'import numpy as np\n'), ((2993, 3013), 'numpy.radians', 'np.radians', (['back_ang'], {}), '(back_ang)\n', (3003, 3013), True, 'import numpy as np\n'), ((3134, 3156), 'numpy.radians', 'np.radians', (['theta_[1:]'], {}), '(theta_[1:])\n', (3144, 3156), True, 'import numpy as np\n'), ((3166, 3188), 'numpy.radians', 'np.radians', (['theta_[1:]'], {}), '(theta_[1:])\n', (3176, 3188), True, 'import numpy as np\n')] |
#!env/bin/python
# -*- coding:utf-8 -*-
# author: <EMAIL>
import requests
import json
from utils.get_configure import env_file_conf
from utils.apollo_handler import ApolloQuery
def prometh_hosts():
"""
从apollo查询prome地址
:return: list
"""
external = env_file_conf('EXTERNAL', conf_type='bool')
if not external:
conf_name = 'prome_host'
else:
conf_name = 'prome_external_host'
if external:
print('Conneting to apollo from external net!')
apollo_query = ApolloQuery()
prome_hosts = None
try:
prome_hosts = apollo_query.apo_config(conf_name).split(',')
except Exception as e:
print('Getting prometheus addr from apollo failed!{}'.format(e.__str__()))
exit(1)
print('Debug prometheus hosts: {}'.format(prome_hosts))
return prome_hosts
def prome_query(prome_sql):
"""
查询prome
:param prome_sql: promesql
:return:
"""
res = None
res_data = None
prome_host = prometh_hosts()
time_out = 60 * 3
if not prome_host: exit(1)
try:
res = requests.request(method="get", url='http://' + prome_host[0] + prome_sql, timeout=time_out)
except requests.RequestException as e:
try:
res = requests.request(method="get", url='http://' + prome_host[1] + prome_sql, timeout=time_out)
except Exception as e1:
print("Query prometheus failed!{}".format(e1.__str__()))
exit(1)
except TypeError as e:
print("Gettting prometheus addr from apollo failed!{}".format(e.__str__()))
print('Prometheus returned: {} {}'.format(res.status_code, res.raw))
if 400 <= res.status_code < 600:
print('Error query prome {} {}'.format(res.status_code, res.content))
exit(1)
else:
try:
res_content = json.loads(res.content)
print("prome returned:{} {}".format(res.status_code, res_content))
except Exception as e:
res_content = res.content
res_status = res_content["status"]
res_data = res_content["data"]
if not res_status == "success":
print("prome returned:{} {}".format(res.status_code, res_status))
exit(1)
return res_data
| [
"utils.get_configure.env_file_conf",
"json.loads",
"utils.apollo_handler.ApolloQuery",
"requests.request"
] | [((288, 331), 'utils.get_configure.env_file_conf', 'env_file_conf', (['"""EXTERNAL"""'], {'conf_type': '"""bool"""'}), "('EXTERNAL', conf_type='bool')\n", (301, 331), False, 'from utils.get_configure import env_file_conf\n'), ((530, 543), 'utils.apollo_handler.ApolloQuery', 'ApolloQuery', ([], {}), '()\n', (541, 543), False, 'from utils.apollo_handler import ApolloQuery\n'), ((1127, 1222), 'requests.request', 'requests.request', ([], {'method': '"""get"""', 'url': "('http://' + prome_host[0] + prome_sql)", 'timeout': 'time_out'}), "(method='get', url='http://' + prome_host[0] + prome_sql,\n timeout=time_out)\n", (1143, 1222), False, 'import requests\n'), ((1872, 1895), 'json.loads', 'json.loads', (['res.content'], {}), '(res.content)\n', (1882, 1895), False, 'import json\n'), ((1293, 1388), 'requests.request', 'requests.request', ([], {'method': '"""get"""', 'url': "('http://' + prome_host[1] + prome_sql)", 'timeout': 'time_out'}), "(method='get', url='http://' + prome_host[1] + prome_sql,\n timeout=time_out)\n", (1309, 1388), False, 'import requests\n')] |
from kafka.protocol.admin import Request
from kafka.protocol.admin import Response
from kafka.protocol.types import Schema
from kafka.protocol.types import Array
from kafka.protocol.types import Int16
from kafka.protocol.types import String
import pytest
@pytest.mark.parametrize('superclass', (Request, Response))
class TestObjectConversion:
def test_get_item(self, superclass):
class TestClass(superclass):
API_KEY = 0
API_VERSION = 0
RESPONSE_TYPE = None # To satisfy the Request ABC
SCHEMA = Schema(
('myobject', Int16))
tc = TestClass(myobject=0)
assert tc.get_item('myobject') == 0
with pytest.raises(KeyError):
tc.get_item('does-not-exist')
def test_with_empty_schema(self, superclass):
class TestClass(superclass):
API_KEY = 0
API_VERSION = 0
RESPONSE_TYPE = None # To satisfy the Request ABC
SCHEMA = Schema()
tc = TestClass()
tc.encode()
assert tc.to_object() == {}
def test_with_basic_schema(self, superclass):
class TestClass(superclass):
API_KEY = 0
API_VERSION = 0
RESPONSE_TYPE = None # To satisfy the Request ABC
SCHEMA = Schema(
('myobject', Int16))
tc = TestClass(myobject=0)
tc.encode()
assert tc.to_object() == {'myobject': 0}
def test_with_basic_array_schema(self, superclass):
class TestClass(superclass):
API_KEY = 0
API_VERSION = 0
RESPONSE_TYPE = None # To satisfy the Request ABC
SCHEMA = Schema(
('myarray', Array(Int16)))
tc = TestClass(myarray=[1,2,3])
tc.encode()
assert tc.to_object()['myarray'] == [1, 2, 3]
def test_with_complex_array_schema(self, superclass):
class TestClass(superclass):
API_KEY = 0
API_VERSION = 0
RESPONSE_TYPE = None # To satisfy the Request ABC
SCHEMA = Schema(
('myarray', Array(
('subobject', Int16),
('othersubobject', String('utf-8')))))
tc = TestClass(
myarray=[[10, 'hello']]
)
tc.encode()
obj = tc.to_object()
assert len(obj['myarray']) == 1
assert obj['myarray'][0]['subobject'] == 10
assert obj['myarray'][0]['othersubobject'] == 'hello'
def test_with_array_and_other(self, superclass):
class TestClass(superclass):
API_KEY = 0
API_VERSION = 0
RESPONSE_TYPE = None # To satisfy the Request ABC
SCHEMA = Schema(
('myarray', Array(
('subobject', Int16),
('othersubobject', String('utf-8')))),
('notarray', Int16))
tc = TestClass(
myarray=[[10, 'hello']],
notarray=42
)
obj = tc.to_object()
assert len(obj['myarray']) == 1
assert obj['myarray'][0]['subobject'] == 10
assert obj['myarray'][0]['othersubobject'] == 'hello'
assert obj['notarray'] == 42
def test_with_nested_array(self, superclass):
class TestClass(superclass):
API_KEY = 0
API_VERSION = 0
RESPONSE_TYPE = None # To satisfy the Request ABC
SCHEMA = Schema(
('myarray', Array(
('subarray', Array(Int16)),
('otherobject', Int16))))
tc = TestClass(
myarray=[
[[1, 2], 2],
[[2, 3], 4],
]
)
print(tc.encode())
obj = tc.to_object()
assert len(obj['myarray']) == 2
assert obj['myarray'][0]['subarray'] == [1, 2]
assert obj['myarray'][0]['otherobject'] == 2
assert obj['myarray'][1]['subarray'] == [2, 3]
assert obj['myarray'][1]['otherobject'] == 4
def test_with_complex_nested_array(self, superclass):
class TestClass(superclass):
API_KEY = 0
API_VERSION = 0
RESPONSE_TYPE = None # To satisfy the Request ABC
SCHEMA = Schema(
('myarray', Array(
('subarray', Array(
('innertest', String('utf-8')),
('otherinnertest', String('utf-8')))),
('othersubarray', Array(Int16)))),
('notarray', String('utf-8')))
tc = TestClass(
myarray=[
[[['hello', 'hello'], ['hello again', 'hello again']], [0]],
[[['hello', 'hello again']], [1]],
],
notarray='notarray'
)
tc.encode()
obj = tc.to_object()
assert obj['notarray'] == 'notarray'
myarray = obj['myarray']
assert len(myarray) == 2
assert myarray[0]['othersubarray'] == [0]
assert len(myarray[0]['subarray']) == 2
assert myarray[0]['subarray'][0]['innertest'] == 'hello'
assert myarray[0]['subarray'][0]['otherinnertest'] == 'hello'
assert myarray[0]['subarray'][1]['innertest'] == 'hello again'
assert myarray[0]['subarray'][1]['otherinnertest'] == 'hello again'
assert myarray[1]['othersubarray'] == [1]
assert len(myarray[1]['subarray']) == 1
assert myarray[1]['subarray'][0]['innertest'] == 'hello'
assert myarray[1]['subarray'][0]['otherinnertest'] == 'hello again'
def test_with_metadata_response():
from kafka.protocol.metadata import MetadataResponse_v5
tc = MetadataResponse_v5(
throttle_time_ms=0,
brokers=[
[0, 'testhost0', 9092, 'testrack0'],
[1, 'testhost1', 9092, 'testrack1'],
],
cluster_id='abcd',
controller_id=0,
topics=[
[0, 'testtopic1', False, [
[0, 0, 0, [0, 1], [0, 1], []],
[0, 1, 1, [1, 0], [1, 0], []],
],
], [0, 'other-test-topic', True, [
[0, 0, 0, [0, 1], [0, 1], []],
]
]]
)
tc.encode() # Make sure this object encodes successfully
obj = tc.to_object()
assert obj['throttle_time_ms'] == 0
assert len(obj['brokers']) == 2
assert obj['brokers'][0]['node_id'] == 0
assert obj['brokers'][0]['host'] == 'testhost0'
assert obj['brokers'][0]['port'] == 9092
assert obj['brokers'][0]['rack'] == 'testrack0'
assert obj['brokers'][1]['node_id'] == 1
assert obj['brokers'][1]['host'] == 'testhost1'
assert obj['brokers'][1]['port'] == 9092
assert obj['brokers'][1]['rack'] == 'testrack1'
assert obj['cluster_id'] == 'abcd'
assert obj['controller_id'] == 0
assert len(obj['topics']) == 2
assert obj['topics'][0]['error_code'] == 0
assert obj['topics'][0]['topic'] == 'testtopic1'
assert obj['topics'][0]['is_internal'] == False
assert len(obj['topics'][0]['partitions']) == 2
assert obj['topics'][0]['partitions'][0]['error_code'] == 0
assert obj['topics'][0]['partitions'][0]['partition'] == 0
assert obj['topics'][0]['partitions'][0]['leader'] == 0
assert obj['topics'][0]['partitions'][0]['replicas'] == [0, 1]
assert obj['topics'][0]['partitions'][0]['isr'] == [0, 1]
assert obj['topics'][0]['partitions'][0]['offline_replicas'] == []
assert obj['topics'][0]['partitions'][1]['error_code'] == 0
assert obj['topics'][0]['partitions'][1]['partition'] == 1
assert obj['topics'][0]['partitions'][1]['leader'] == 1
assert obj['topics'][0]['partitions'][1]['replicas'] == [1, 0]
assert obj['topics'][0]['partitions'][1]['isr'] == [1, 0]
assert obj['topics'][0]['partitions'][1]['offline_replicas'] == []
assert obj['topics'][1]['error_code'] == 0
assert obj['topics'][1]['topic'] == 'other-test-topic'
assert obj['topics'][1]['is_internal'] == True
assert len(obj['topics'][1]['partitions']) == 1
assert obj['topics'][1]['partitions'][0]['error_code'] == 0
assert obj['topics'][1]['partitions'][0]['partition'] == 0
assert obj['topics'][1]['partitions'][0]['leader'] == 0
assert obj['topics'][1]['partitions'][0]['replicas'] == [0, 1]
assert obj['topics'][1]['partitions'][0]['isr'] == [0, 1]
assert obj['topics'][1]['partitions'][0]['offline_replicas'] == []
tc.encode()
| [
"kafka.protocol.metadata.MetadataResponse_v5",
"kafka.protocol.types.Schema",
"kafka.protocol.types.Array",
"pytest.mark.parametrize",
"pytest.raises",
"kafka.protocol.types.String"
] | [((258, 316), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""superclass"""', '(Request, Response)'], {}), "('superclass', (Request, Response))\n", (281, 316), False, 'import pytest\n'), ((5700, 6038), 'kafka.protocol.metadata.MetadataResponse_v5', 'MetadataResponse_v5', ([], {'throttle_time_ms': '(0)', 'brokers': "[[0, 'testhost0', 9092, 'testrack0'], [1, 'testhost1', 9092, 'testrack1']]", 'cluster_id': '"""abcd"""', 'controller_id': '(0)', 'topics': "[[0, 'testtopic1', False, [[0, 0, 0, [0, 1], [0, 1], []], [0, 1, 1, [1, 0],\n [1, 0], []]]], [0, 'other-test-topic', True, [[0, 0, 0, [0, 1], [0, 1],\n []]]]]"}), "(throttle_time_ms=0, brokers=[[0, 'testhost0', 9092,\n 'testrack0'], [1, 'testhost1', 9092, 'testrack1']], cluster_id='abcd',\n controller_id=0, topics=[[0, 'testtopic1', False, [[0, 0, 0, [0, 1], [0,\n 1], []], [0, 1, 1, [1, 0], [1, 0], []]]], [0, 'other-test-topic', True,\n [[0, 0, 0, [0, 1], [0, 1], []]]]])\n", (5719, 6038), False, 'from kafka.protocol.metadata import MetadataResponse_v5\n'), ((559, 586), 'kafka.protocol.types.Schema', 'Schema', (["('myobject', Int16)"], {}), "(('myobject', Int16))\n", (565, 586), False, 'from kafka.protocol.types import Schema\n'), ((697, 720), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (710, 720), False, 'import pytest\n'), ((996, 1004), 'kafka.protocol.types.Schema', 'Schema', ([], {}), '()\n', (1002, 1004), False, 'from kafka.protocol.types import Schema\n'), ((1311, 1338), 'kafka.protocol.types.Schema', 'Schema', (["('myobject', Int16)"], {}), "(('myobject', Int16))\n", (1317, 1338), False, 'from kafka.protocol.types import Schema\n'), ((1727, 1739), 'kafka.protocol.types.Array', 'Array', (['Int16'], {}), '(Int16)\n', (1732, 1739), False, 'from kafka.protocol.types import Array\n'), ((4562, 4577), 'kafka.protocol.types.String', 'String', (['"""utf-8"""'], {}), "('utf-8')\n", (4568, 4577), False, 'from kafka.protocol.types import String\n'), ((2213, 2228), 'kafka.protocol.types.String', 'String', (['"""utf-8"""'], {}), "('utf-8')\n", (2219, 2228), False, 'from kafka.protocol.types import String\n'), ((2858, 2873), 'kafka.protocol.types.String', 'String', (['"""utf-8"""'], {}), "('utf-8')\n", (2864, 2873), False, 'from kafka.protocol.types import String\n'), ((3540, 3552), 'kafka.protocol.types.Array', 'Array', (['Int16'], {}), '(Int16)\n', (3545, 3552), False, 'from kafka.protocol.types import Array\n'), ((4516, 4528), 'kafka.protocol.types.Array', 'Array', (['Int16'], {}), '(Int16)\n', (4521, 4528), False, 'from kafka.protocol.types import Array\n'), ((4397, 4412), 'kafka.protocol.types.String', 'String', (['"""utf-8"""'], {}), "('utf-8')\n", (4403, 4412), False, 'from kafka.protocol.types import String\n'), ((4458, 4473), 'kafka.protocol.types.String', 'String', (['"""utf-8"""'], {}), "('utf-8')\n", (4464, 4473), False, 'from kafka.protocol.types import String\n')] |
"""View for about."""
from componentstore.const import VERSION, DEMO
import componentstore.resources.html as load
async def view():
"""View for about."""
installed_version = VERSION
if not installed_version:
installed_version = 'dev'
if DEMO:
installed_version = installed_version + '(DEMO)'
#------------------------------------------------------------------------------
about = load.TEXT.format(
"This tool can help you manage your 'custom_components' for Home Assistant.")
about += load.BREAK
about += load.TEXT.format(
"This will only manage the '.py' files for you under 'custom_components/', "
"you still need to manually add/remove entries in 'configuration.yaml'.")
about += load.BREAK
about += load.TEXT.format(
"All components that are trackable with this has a 'REPOSITORY' button, "
"use that to verify the content before installing/upgrading.")
about += load.BREAK
about += load.TEXT.format(
"Do not install/upgrade something with this that you do not trust.")
about += load.HR
lines = load.LINE.format(
type='installed_version', text='installed version: '+installed_version)
text = load.LINK.format(
url='https://github.com/ludeeus/custom-component-store',
target='_blank', style='', id='', htmlclass='', extra='',
text='Project @ GitHub')
lines += load.LINE.format(type='github_link', text=text)
text = load.LINK.format(
url='https://hub.docker.com/r/ludeeus/custom-component-store',
target='_blank', style='', id='', htmlclass='', extra='',
text='Project @ Docker hub')
lines += load.LINE.format(type='docker_hub_link', text=text)
about += load.LIST.format(title='', lines=lines)
content = load.BASE_CARD.format(
title='About', content=about)
#------------------------------------------------------------------------------
customjson = load.LINK.format(
url='https://github.com/ludeeus/customjson',
target='_blank', style='', id='', htmlclass='', extra='',
text='customjson')
org = load.LINK.format(
url='https://github.com/custom-components',
target='_blank', style='', id='', htmlclass='', extra='',
text='custom-component org. on GitHub')
text = """
All the components/platforms that you can manage with this needs to
be added to {customjson}, by default all components/platforms that folow
the standard in the {org} are managable, other components/platforms would
need to be added to {customjson} before they can show up here.
""".format(customjson=customjson, org=org)
components = load.TEXT.format(text)
components += load.TEXT.format(
"The platform structure needs to be as embedded platforms to be managed here.")
content += load.BASE_CARD.format(
title='Custom Components', content=components)
#------------------------------------------------------------------------------
notice = load.TEXT.format("This project uses many recources to work:")
links = [
{
'link': 'https://github.com/ludeeus/customjson',
'text': 'customjson'
},
{
'link': 'https://fontawesome.com/',
'text': 'Font Awesome'
},
{
'link': 'http://fonts.googleapis.com/css?family=Roboto',
'text': 'fonts.googleapis.com'
},
{
'link': 'https://materializecss.com',
'text': 'materialize'
},
{
'link': 'https://aiohttp.readthedocs.io/en/stable/',
'text': 'aiohttp'
},
{
'link': 'https://github.com/just-containers/s6-overlay',
'text': 's6-overlay'
},
{
'link': 'https://redis.io/',
'text': 'redis'
}
]
for link in links:
notice += load.LINK.format(
url=link['link'], target='_blank', style='', id='', htmlclass='',
extra='', text=link['text'])
notice += load.BREAK
content += load.BASE_CARD.format(
title='Notice', content=notice)
#------------------------------------------------------------------------------
text = load.TEXT.format(
"This is in the footer of every page here, but I think that it belongs here to.")
text += load.BREAK
text += load.TEXT.format(
"This site and the items here is not created, developed, affiliated, "
"supported, maintained or endorsed by Home Assistant.")
content += load.NO_TITLE_CARD.format(text)
#------------------------------------------------------------------------------
text = load.LINK.format(
url='https://www.buymeacoffee.com/ludeeus', target='_blank', style='',
id='', htmlclass='', extra='', text=load.COFFEEICON+'Buy me a coffee? :D')
content += load.NO_TITLE_CARD.format(text)
#------------------------------------------------------------------------------
html = load.TOP
html += load.BASE.format(content)
html += load.END
return html
| [
"componentstore.resources.html.NO_TITLE_CARD.format",
"componentstore.resources.html.TEXT.format",
"componentstore.resources.html.BASE_CARD.format",
"componentstore.resources.html.LINK.format",
"componentstore.resources.html.LIST.format",
"componentstore.resources.html.LINE.format",
"componentstore.resources.html.BASE.format"
] | [((421, 525), 'componentstore.resources.html.TEXT.format', 'load.TEXT.format', (['"""This tool can help you manage your \'custom_components\' for Home Assistant."""'], {}), '(\n "This tool can help you manage your \'custom_components\' for Home Assistant."\n )\n', (437, 525), True, 'import componentstore.resources.html as load\n'), ((562, 736), 'componentstore.resources.html.TEXT.format', 'load.TEXT.format', (['"""This will only manage the \'.py\' files for you under \'custom_components/\', you still need to manually add/remove entries in \'configuration.yaml\'."""'], {}), '(\n "This will only manage the \'.py\' files for you under \'custom_components/\', you still need to manually add/remove entries in \'configuration.yaml\'."\n )\n', (578, 736), True, 'import componentstore.resources.html as load\n'), ((784, 944), 'componentstore.resources.html.TEXT.format', 'load.TEXT.format', (['"""All components that are trackable with this has a \'REPOSITORY\' button, use that to verify the content before installing/upgrading."""'], {}), '(\n "All components that are trackable with this has a \'REPOSITORY\' button, use that to verify the content before installing/upgrading."\n )\n', (800, 944), True, 'import componentstore.resources.html as load\n'), ((992, 1082), 'componentstore.resources.html.TEXT.format', 'load.TEXT.format', (['"""Do not install/upgrade something with this that you do not trust."""'], {}), "(\n 'Do not install/upgrade something with this that you do not trust.')\n", (1008, 1082), True, 'import componentstore.resources.html as load\n'), ((1121, 1215), 'componentstore.resources.html.LINE.format', 'load.LINE.format', ([], {'type': '"""installed_version"""', 'text': "('installed version: ' + installed_version)"}), "(type='installed_version', text='installed version: ' +\n installed_version)\n", (1137, 1215), True, 'import componentstore.resources.html as load\n'), ((1231, 1396), 'componentstore.resources.html.LINK.format', 'load.LINK.format', ([], {'url': '"""https://github.com/ludeeus/custom-component-store"""', 'target': '"""_blank"""', 'style': '""""""', 'id': '""""""', 'htmlclass': '""""""', 'extra': '""""""', 'text': '"""Project @ GitHub"""'}), "(url='https://github.com/ludeeus/custom-component-store',\n target='_blank', style='', id='', htmlclass='', extra='', text=\n 'Project @ GitHub')\n", (1247, 1396), True, 'import componentstore.resources.html as load\n'), ((1426, 1473), 'componentstore.resources.html.LINE.format', 'load.LINE.format', ([], {'type': '"""github_link"""', 'text': 'text'}), "(type='github_link', text=text)\n", (1442, 1473), True, 'import componentstore.resources.html as load\n'), ((1486, 1667), 'componentstore.resources.html.LINK.format', 'load.LINK.format', ([], {'url': '"""https://hub.docker.com/r/ludeeus/custom-component-store"""', 'target': '"""_blank"""', 'style': '""""""', 'id': '""""""', 'htmlclass': '""""""', 'extra': '""""""', 'text': '"""Project @ Docker hub"""'}), "(url=\n 'https://hub.docker.com/r/ludeeus/custom-component-store', target=\n '_blank', style='', id='', htmlclass='', extra='', text=\n 'Project @ Docker hub')\n", (1502, 1667), True, 'import componentstore.resources.html as load\n'), ((1691, 1742), 'componentstore.resources.html.LINE.format', 'load.LINE.format', ([], {'type': '"""docker_hub_link"""', 'text': 'text'}), "(type='docker_hub_link', text=text)\n", (1707, 1742), True, 'import componentstore.resources.html as load\n'), ((1757, 1796), 'componentstore.resources.html.LIST.format', 'load.LIST.format', ([], {'title': '""""""', 'lines': 'lines'}), "(title='', lines=lines)\n", (1773, 1796), True, 'import componentstore.resources.html as load\n'), ((1812, 1863), 'componentstore.resources.html.BASE_CARD.format', 'load.BASE_CARD.format', ([], {'title': '"""About"""', 'content': 'about'}), "(title='About', content=about)\n", (1833, 1863), True, 'import componentstore.resources.html as load\n'), ((1972, 2115), 'componentstore.resources.html.LINK.format', 'load.LINK.format', ([], {'url': '"""https://github.com/ludeeus/customjson"""', 'target': '"""_blank"""', 'style': '""""""', 'id': '""""""', 'htmlclass': '""""""', 'extra': '""""""', 'text': '"""customjson"""'}), "(url='https://github.com/ludeeus/customjson', target=\n '_blank', style='', id='', htmlclass='', extra='', text='customjson')\n", (1988, 2115), True, 'import componentstore.resources.html as load\n'), ((2147, 2315), 'componentstore.resources.html.LINK.format', 'load.LINK.format', ([], {'url': '"""https://github.com/custom-components"""', 'target': '"""_blank"""', 'style': '""""""', 'id': '""""""', 'htmlclass': '""""""', 'extra': '""""""', 'text': '"""custom-component org. on GitHub"""'}), "(url='https://github.com/custom-components', target=\n '_blank', style='', id='', htmlclass='', extra='', text=\n 'custom-component org. on GitHub')\n", (2163, 2315), True, 'import componentstore.resources.html as load\n'), ((2708, 2730), 'componentstore.resources.html.TEXT.format', 'load.TEXT.format', (['text'], {}), '(text)\n', (2724, 2730), True, 'import componentstore.resources.html as load\n'), ((2749, 2855), 'componentstore.resources.html.TEXT.format', 'load.TEXT.format', (['"""The platform structure needs to be as embedded platforms to be managed here."""'], {}), "(\n 'The platform structure needs to be as embedded platforms to be managed here.'\n )\n", (2765, 2855), True, 'import componentstore.resources.html as load\n'), ((2871, 2939), 'componentstore.resources.html.BASE_CARD.format', 'load.BASE_CARD.format', ([], {'title': '"""Custom Components"""', 'content': 'components'}), "(title='Custom Components', content=components)\n", (2892, 2939), True, 'import componentstore.resources.html as load\n'), ((3044, 3105), 'componentstore.resources.html.TEXT.format', 'load.TEXT.format', (['"""This project uses many recources to work:"""'], {}), "('This project uses many recources to work:')\n", (3060, 3105), True, 'import componentstore.resources.html as load\n'), ((4135, 4188), 'componentstore.resources.html.BASE_CARD.format', 'load.BASE_CARD.format', ([], {'title': '"""Notice"""', 'content': 'notice'}), "(title='Notice', content=notice)\n", (4156, 4188), True, 'import componentstore.resources.html as load\n'), ((4291, 4399), 'componentstore.resources.html.TEXT.format', 'load.TEXT.format', (['"""This is in the footer of every page here, but I think that it belongs here to."""'], {}), "(\n 'This is in the footer of every page here, but I think that it belongs here to.'\n )\n", (4307, 4399), True, 'import componentstore.resources.html as load\n'), ((4434, 4584), 'componentstore.resources.html.TEXT.format', 'load.TEXT.format', (['"""This site and the items here is not created, developed, affiliated, supported, maintained or endorsed by Home Assistant."""'], {}), "(\n 'This site and the items here is not created, developed, affiliated, supported, maintained or endorsed by Home Assistant.'\n )\n", (4450, 4584), True, 'import componentstore.resources.html as load\n'), ((4611, 4642), 'componentstore.resources.html.NO_TITLE_CARD.format', 'load.NO_TITLE_CARD.format', (['text'], {}), '(text)\n', (4636, 4642), True, 'import componentstore.resources.html as load\n'), ((4736, 4909), 'componentstore.resources.html.LINK.format', 'load.LINK.format', ([], {'url': '"""https://www.buymeacoffee.com/ludeeus"""', 'target': '"""_blank"""', 'style': '""""""', 'id': '""""""', 'htmlclass': '""""""', 'extra': '""""""', 'text': "(load.COFFEEICON + 'Buy me a coffee? :D')"}), "(url='https://www.buymeacoffee.com/ludeeus', target=\n '_blank', style='', id='', htmlclass='', extra='', text=load.COFFEEICON +\n 'Buy me a coffee? :D')\n", (4752, 4909), True, 'import componentstore.resources.html as load\n'), ((4932, 4963), 'componentstore.resources.html.NO_TITLE_CARD.format', 'load.NO_TITLE_CARD.format', (['text'], {}), '(text)\n', (4957, 4963), True, 'import componentstore.resources.html as load\n'), ((5078, 5103), 'componentstore.resources.html.BASE.format', 'load.BASE.format', (['content'], {}), '(content)\n', (5094, 5103), True, 'import componentstore.resources.html as load\n'), ((3953, 4068), 'componentstore.resources.html.LINK.format', 'load.LINK.format', ([], {'url': "link['link']", 'target': '"""_blank"""', 'style': '""""""', 'id': '""""""', 'htmlclass': '""""""', 'extra': '""""""', 'text': "link['text']"}), "(url=link['link'], target='_blank', style='', id='',\n htmlclass='', extra='', text=link['text'])\n", (3969, 4068), True, 'import componentstore.resources.html as load\n')] |
import numpy
from act.act_jugador.pase import Pase
from config import Config
config = Config()
class Saque_porteria(Pase):
def __init__(self, agente) -> None:
super().__init__(agente)
self.__descripcion = f"El portero {self.agente.nombre} saca de porteria a "
self.tipo = config.ACCIONES.JUGADOR.ACT_SAQUE_PORTERIA
self.tiempo = 0.17
def descripcion(self):
return self.__descripcion
def precondicion(self, partido) -> bool:
return ((partido.ultima_accion.tipo == config.ACCIONES.JUGADOR.ACT_RECIBIR_BALON and partido.ultima_accion.estado == config.ACCIONES.ESTADO.RECIBIR_BALON.NO_RECIBE_BALON and partido.ultima_accion.sub_estado == config.ACCIONES.ESTADO.RECIBIR_BALON.LINEA_FINAL) or \
(partido.ultima_accion.tipo == config.ACCIONES.JUGADOR.ACT_TIRO_PORTERIA and partido.ultima_accion.estado == config.ACCIONES.ESTADO.TIRO_PORTERIA.POR_FUERA)) and \
partido.ultima_accion.agente.equipo != self.agente.equipo
def poscondicion(self, partido):
partido.pos_balon = None
partido.estado = config.PARTIDO.ESTADO.EN_JUEGO
partido.ultima_accion = self
| [
"config.Config"
] | [((87, 95), 'config.Config', 'Config', ([], {}), '()\n', (93, 95), False, 'from config import Config\n')] |
#!/usr/bin/python3
#
# Copyright (c) 2012 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from __future__ import annotations
import atexit
import os
import shlex
import signal
import sys
from subprocess import Popen, TimeoutExpired
from typing import IO, Any, Iterable, List, Optional
# List of running processes; can be terminated with `terminate_all_processes`
_RUNNING_PROCS: List[Any] = []
def quote_args(args) -> str:
if isinstance(args, (str, bytes, os.PathLike)):
args = [args]
values = []
for arg in args:
if isinstance(arg, os.PathLike):
arg = os.fspath(arg)
elif isinstance(arg, bytes):
arg = arg.decode("utf-8", errors="replace")
values.append(shlex.quote(arg))
return " ".join(values)
def join_procs(procs: Iterable[Popen[Any]], out: IO[str] = sys.stderr):
"""Joins a set of Popen processes. If a processes fail, the remaining
processes are terminated. The function returns a list of return-code,
containing the result of each call. Status messages are written to STDERR
by default.
"""
sleep_time = 0.05
commands = list(enumerate(procs))
return_codes = [None] * len(commands) # type: List[Optional[int]]
assert all(hasattr(cmd, "args") for (_, cmd) in commands)
print("Joinining subprocesses:", file=out)
while commands and not any(return_codes):
try:
# Wait for arbitrary command
commands[0][1].wait(sleep_time if len(commands) > 1 else None)
except TimeoutExpired:
sleep_time = min(1, sleep_time * 2)
for (index, command) in list(commands):
if command.poll() is not None:
return_code = command.wait()
return_codes[index] = return_code
commands.remove((index, command))
sleep_time = 0.05
if return_code < 0:
return_code = signal.Signals(-return_code).name
print(f" - Command finished: {quote_args(command.args)}", file=out)
print(f" Return-code: {return_code}", file=out)
if any(return_codes):
for index, command in commands:
print(f" - Terminating command: {quote_args(command.args)}", file=out)
command.terminate()
return_codes[index] = command.wait()
print("Errors occured during processing!", file=out)
return return_codes
def register_process(proc):
"""Register a process for automatic/forced termination."""
_RUNNING_PROCS.append(proc)
def unregister_process(proc):
"""Unregister a process for automatic/forced termination."""
if proc in _RUNNING_PROCS:
_RUNNING_PROCS.remove(proc)
@atexit.register
def terminate_all_processes():
"""Terminate all registered proceses. Must be called in signal handlers."""
while _RUNNING_PROCS:
proc = _RUNNING_PROCS.pop()
try:
proc.terminate()
except OSError:
# Ignore already closed processes, etc.
pass
| [
"signal.Signals",
"os.fspath",
"shlex.quote"
] | [((1626, 1640), 'os.fspath', 'os.fspath', (['arg'], {}), '(arg)\n', (1635, 1640), False, 'import os\n'), ((1757, 1773), 'shlex.quote', 'shlex.quote', (['arg'], {}), '(arg)\n', (1768, 1773), False, 'import shlex\n'), ((2966, 2994), 'signal.Signals', 'signal.Signals', (['(-return_code)'], {}), '(-return_code)\n', (2980, 2994), False, 'import signal\n')] |
import sys
import traceback
from django.core.management.base import BaseCommand, CommandError
from books.run_queries import run
class Command(BaseCommand):
help = "run tests"
def add_arguments(self, parser):
parser.add_argument(
"--funcname", default=None, action="store", dest="funcname", type=str
)
parser.add_argument("--v", action="store", dest="verbose", default=0, type=int)
parser.add_argument("--sql", default=False, action="store_true", dest="sql")
def handle(self, *args, **options):
run(options)
| [
"books.run_queries.run"
] | [((568, 580), 'books.run_queries.run', 'run', (['options'], {}), '(options)\n', (571, 580), False, 'from books.run_queries import run\n')] |
#!/bin/python3
import requests
import mimetypes
import sys
from pathlib import Path
from datetime import datetime
import time
import socket
from util import *
from headers import *
# SYNPOSIS:
# To download posts from an artist:
# python3 grab-artist.py mixppl
# 2 minute timeout in case something gets stuck.
socket.setdefaulttimeout(120)
artist_name = str.lower(sys.argv[1])
# Create artist directory if it doesn't exist
artist_directory = "./downloads/" + slugify(artist_name) + "/"
Path(artist_directory).mkdir(parents=True, exist_ok=True)
# Create directory for already saved posts, and generate filename
Path("./already_saved/").mkdir(parents=True, exist_ok=True)
# Create directory for logging, and generate filename
Path("./logs/").mkdir(parents=True, exist_ok=True)
# Request project info for artist
lastPageReached = False
pageCounter = 1
try:
while not lastPageReached:
logMsg(f"Fetching page {pageCounter} of {artist_name}...", "okndl", artist_name)
projects_data = requests.get(f"https://www.artstation.com/users/{artist_name}/projects.json?page={pageCounter}", headers=project_fetch_headers)
projects = projects_data.json()["data"]
page_num_projects = len(projects)
lastPageReached = page_num_projects < 50 # Each full page contains 50 projects. If it has less than 50, it is the last page
if not lastPageReached:
pageCounter = pageCounter + 1
logMsg(f"Page contains {page_num_projects} projects...", "okndl", artist_name)
else:
logMsg(f"Page contains {page_num_projects} projects... That's the last page!", "okndl", artist_name)
# For each project in all of the artists projects
for project in projects:
project_name = project["title"]
project_hash_id = project["hash_id"]
logMsg(f"Found project '{project_name}' with id {project_hash_id}. Fetching more info about it...", "okndl", artist_name)
# Have we already downloaded this post?
if not isPostAlreadySaved(project_hash_id, artist_name):
# Fetch information about the project
project_info = requests.get(f"https://www.artstation.com/projects/{project_hash_id}.json", headers=project_fetch_headers)
assets = project_info.json()["assets"]
# For each asset in the project (might be multiple images)
for asset in assets:
asset_type = asset["asset_type"]
asset_position = asset["position"]
# If the asset is an image
if asset_type == "image":
asset_image_url = asset["image_url"]
# Generate a download filename
filename = artist_directory + slugify(project_name[:60] + "_" + project_hash_id + "_" + str(asset_position)) + "." + extensionFromUrl(asset_image_url)
logMsg(f"Found image-asset for project '{project_name}' [{project_hash_id}] at position {asset_position}. Downloading to '{filename}'...", "okdl", artist_name)
# Download it
downloadMedia(asset_image_url, filename)
else:
logMsg(f"Found non-image-asset for project '{project_name}' [{project_hash_id}] at position {asset_position}. Skipping...", "okdl", artist_name)
# After downloading all assets, mark the project as downloaded.
markPostAsSaved(project_hash_id, artist_name)
# Project is already downloaded
else:
logMsg(f"Skipping project '{project_name}' [{project_hash_id}] because it is already downloaded.", "okndl", artist_name)
logMsg(f"Finished all pages of {artist_name}... Total pages of this artist scanned: {pageCounter}", "okndl", artist_name)
except socket.timeout as exc:
logMsg("Socket timeout of two minutes reached! We'll get 'em next time, boys!", "err", artist_name)
except BaseException as exc:
logMsg("Failed for some reason!: " + repr(exc), "err", artist_name)
| [
"requests.get",
"socket.setdefaulttimeout",
"pathlib.Path"
] | [((313, 342), 'socket.setdefaulttimeout', 'socket.setdefaulttimeout', (['(120)'], {}), '(120)\n', (337, 342), False, 'import socket\n'), ((491, 513), 'pathlib.Path', 'Path', (['artist_directory'], {}), '(artist_directory)\n', (495, 513), False, 'from pathlib import Path\n'), ((616, 640), 'pathlib.Path', 'Path', (['"""./already_saved/"""'], {}), "('./already_saved/')\n", (620, 640), False, 'from pathlib import Path\n'), ((731, 746), 'pathlib.Path', 'Path', (['"""./logs/"""'], {}), "('./logs/')\n", (735, 746), False, 'from pathlib import Path\n'), ((1007, 1144), 'requests.get', 'requests.get', (['f"""https://www.artstation.com/users/{artist_name}/projects.json?page={pageCounter}"""'], {'headers': 'project_fetch_headers'}), "(\n f'https://www.artstation.com/users/{artist_name}/projects.json?page={pageCounter}'\n , headers=project_fetch_headers)\n", (1019, 1144), False, 'import requests\n'), ((2184, 2294), 'requests.get', 'requests.get', (['f"""https://www.artstation.com/projects/{project_hash_id}.json"""'], {'headers': 'project_fetch_headers'}), "(f'https://www.artstation.com/projects/{project_hash_id}.json',\n headers=project_fetch_headers)\n", (2196, 2294), False, 'import requests\n')] |
from abc import ABC
import cv2
class StainingColor:
def __init__(self, hsv_min, hsv_max):
self.hsv_min = hsv_min
self.hsv_max = hsv_max
@property
def min(self):
return self.hsv_min
@property
def max(self):
return self.hsv_max
BLACK = StainingColor((0, 0, 0), (180, 255, 100))
ORANGE = StainingColor((15, 120, 100), (40, 255, 255))
PURPLE = StainingColor((110, 60, 70), (160, 255, 255))
BLUE = StainingColor((85, 80, 120), (110, 255, 255))
BROWN = StainingColor((0, 100, 0), (15, 255, 255))
class StainingABC(ABC):
def __init__(self, name: str):
self.name = name
class StainingIHC(StainingABC):
def __init__(self, name: str, hsv_min, hsv_max):
super().__init__(name)
self.hsv_min = hsv_min
self.hsv_max = hsv_max
def get_mask(self, image):
return cv2.inRange(image, self.hsv_min, self.hsv_max)
CD107A = StainingIHC('CD107a', BLACK.min, BLACK.max)
SOX10 = StainingIHC('Sox10', ORANGE.min, ORANGE.max)
CD8 = StainingIHC('CD8', PURPLE.min, PURPLE.max)
DAPI = StainingIHC('BLUE', BLUE.min, BLUE.max)
class ConfocalStaining(StainingABC):
def __init__(self, name: str, channel: int, threshold=0):
super().__init__(name)
self.channel = channel
self.threshold = threshold
class StainingIntensityRange(object):
def __init__(self, min_range: tuple, max_range: tuple):
self._min = min_range
self._max = max_range
def get_min(self):
return self._min
def get_max(self):
return self._max
class ConfigurableStainingIntensityRange(StainingIntensityRange):
def __init__(self, min_range: tuple, max_range: tuple):
super().__init__(min_range, max_range)
def set_min(self, min_range: tuple):
self._min = min_range
def set_max(self, max_range: tuple):
self._max = max_range
# FM4_64 = ConfocalStaining('FM4-64')
# PKH_67 = ConfocalStaining('PKH-67')
# TUBULIN = ConfocalStaining('Tubulin')
# CASPASE_3 = ConfocalStaining('Caspase-3')
| [
"cv2.inRange"
] | [((874, 920), 'cv2.inRange', 'cv2.inRange', (['image', 'self.hsv_min', 'self.hsv_max'], {}), '(image, self.hsv_min, self.hsv_max)\n', (885, 920), False, 'import cv2\n')] |
import turtle
import random
import time
# sets background
bg = turtle.Screen()
bg.bgcolor("black")
# Bottom Line 1
turtle.penup()
turtle.goto(-170, -180)
turtle.color("white")
turtle.pendown()
turtle.forward(350)
# Mid Line 2
turtle.penup()
turtle.goto(-160, -150)
turtle.color("white")
turtle.pendown()
turtle.forward(300)
# First Line 3
turtle.penup()
turtle.goto(-150, -120)
turtle.color("white")
turtle.pendown()
turtle.forward(250)
# Cake
turtle.penup()
turtle.goto(-100, -100)
turtle.color("white")
turtle.begin_fill()
turtle.pendown()
turtle.forward(140)
turtle.left(90)
turtle.forward(95)
turtle.left(90)
turtle.forward(140)
turtle.left(90)
turtle.forward(95)
turtle.end_fill()
# Candles
turtle.penup()
turtle.goto(-90, 0)
turtle.color("red")
turtle.left(180)
turtle.pendown()
turtle.forward(20)
turtle.penup()
turtle.goto(-60, 0)
turtle.color("white")
turtle.pendown()
turtle.forward(20)
turtle.penup()
turtle.goto(-30, 0)
turtle.color("yellow")
turtle.pendown()
turtle.forward(20)
turtle.penup()
turtle.goto(0, 0)
turtle.color("green")
turtle.pendown()
turtle.forward(20)
turtle.penup()
turtle.goto(30, 0)
turtle.color("purple")
turtle.pendown()
turtle.forward(20)
# Decoration
colors = ["red", "orange", "black", "green", "blue", "purple", "black"]
turtle.penup()
turtle.goto(-40, -50)
turtle.pendown()
for each_color in colors:
angle = 360 / len(colors)
turtle.color(each_color)
turtle.circle(10)
turtle.right(angle)
turtle.forward(10)
time.sleep(2)
turtle.clear()
turtle.bgpic("<your image name>.gif")
time.sleep(3)
turtle.clear()
bg.bgcolor("black")
# Happy Birthday message
turtle.clear()
turtle.penup()
turtle.goto(-150, 50)
turtle.color("red")
turtle.pendown()
turtle.write("Happy Birthday <birthday boy/girl name>!!", move=False, align="center", font=("Arial", 40, "normal"))
time.sleep(5)
turtle.clear()
turtle.write("May God bless You!!", move=False, align="center", font=("Arial", 40, "normal"))
time.sleep(5)
turtle.color("black")
| [
"turtle.begin_fill",
"turtle.pendown",
"turtle.penup",
"turtle.circle",
"turtle.color",
"time.sleep",
"turtle.forward",
"turtle.bgpic",
"turtle.right",
"turtle.end_fill",
"turtle.goto",
"turtle.write",
"turtle.left",
"turtle.clear",
"turtle.Screen"
] | [((64, 79), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (77, 79), False, 'import turtle\n'), ((116, 130), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (128, 130), False, 'import turtle\n'), ((131, 154), 'turtle.goto', 'turtle.goto', (['(-170)', '(-180)'], {}), '(-170, -180)\n', (142, 154), False, 'import turtle\n'), ((155, 176), 'turtle.color', 'turtle.color', (['"""white"""'], {}), "('white')\n", (167, 176), False, 'import turtle\n'), ((177, 193), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (191, 193), False, 'import turtle\n'), ((194, 213), 'turtle.forward', 'turtle.forward', (['(350)'], {}), '(350)\n', (208, 213), False, 'import turtle\n'), ((228, 242), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (240, 242), False, 'import turtle\n'), ((243, 266), 'turtle.goto', 'turtle.goto', (['(-160)', '(-150)'], {}), '(-160, -150)\n', (254, 266), False, 'import turtle\n'), ((267, 288), 'turtle.color', 'turtle.color', (['"""white"""'], {}), "('white')\n", (279, 288), False, 'import turtle\n'), ((289, 305), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (303, 305), False, 'import turtle\n'), ((306, 325), 'turtle.forward', 'turtle.forward', (['(300)'], {}), '(300)\n', (320, 325), False, 'import turtle\n'), ((342, 356), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (354, 356), False, 'import turtle\n'), ((357, 380), 'turtle.goto', 'turtle.goto', (['(-150)', '(-120)'], {}), '(-150, -120)\n', (368, 380), False, 'import turtle\n'), ((381, 402), 'turtle.color', 'turtle.color', (['"""white"""'], {}), "('white')\n", (393, 402), False, 'import turtle\n'), ((403, 419), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (417, 419), False, 'import turtle\n'), ((420, 439), 'turtle.forward', 'turtle.forward', (['(250)'], {}), '(250)\n', (434, 439), False, 'import turtle\n'), ((448, 462), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (460, 462), False, 'import turtle\n'), ((463, 486), 'turtle.goto', 'turtle.goto', (['(-100)', '(-100)'], {}), '(-100, -100)\n', (474, 486), False, 'import turtle\n'), ((487, 508), 'turtle.color', 'turtle.color', (['"""white"""'], {}), "('white')\n", (499, 508), False, 'import turtle\n'), ((509, 528), 'turtle.begin_fill', 'turtle.begin_fill', ([], {}), '()\n', (526, 528), False, 'import turtle\n'), ((529, 545), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (543, 545), False, 'import turtle\n'), ((546, 565), 'turtle.forward', 'turtle.forward', (['(140)'], {}), '(140)\n', (560, 565), False, 'import turtle\n'), ((566, 581), 'turtle.left', 'turtle.left', (['(90)'], {}), '(90)\n', (577, 581), False, 'import turtle\n'), ((582, 600), 'turtle.forward', 'turtle.forward', (['(95)'], {}), '(95)\n', (596, 600), False, 'import turtle\n'), ((601, 616), 'turtle.left', 'turtle.left', (['(90)'], {}), '(90)\n', (612, 616), False, 'import turtle\n'), ((617, 636), 'turtle.forward', 'turtle.forward', (['(140)'], {}), '(140)\n', (631, 636), False, 'import turtle\n'), ((637, 652), 'turtle.left', 'turtle.left', (['(90)'], {}), '(90)\n', (648, 652), False, 'import turtle\n'), ((653, 671), 'turtle.forward', 'turtle.forward', (['(95)'], {}), '(95)\n', (667, 671), False, 'import turtle\n'), ((672, 689), 'turtle.end_fill', 'turtle.end_fill', ([], {}), '()\n', (687, 689), False, 'import turtle\n'), ((701, 715), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (713, 715), False, 'import turtle\n'), ((716, 735), 'turtle.goto', 'turtle.goto', (['(-90)', '(0)'], {}), '(-90, 0)\n', (727, 735), False, 'import turtle\n'), ((736, 755), 'turtle.color', 'turtle.color', (['"""red"""'], {}), "('red')\n", (748, 755), False, 'import turtle\n'), ((756, 772), 'turtle.left', 'turtle.left', (['(180)'], {}), '(180)\n', (767, 772), False, 'import turtle\n'), ((773, 789), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (787, 789), False, 'import turtle\n'), ((790, 808), 'turtle.forward', 'turtle.forward', (['(20)'], {}), '(20)\n', (804, 808), False, 'import turtle\n'), ((810, 824), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (822, 824), False, 'import turtle\n'), ((825, 844), 'turtle.goto', 'turtle.goto', (['(-60)', '(0)'], {}), '(-60, 0)\n', (836, 844), False, 'import turtle\n'), ((845, 866), 'turtle.color', 'turtle.color', (['"""white"""'], {}), "('white')\n", (857, 866), False, 'import turtle\n'), ((867, 883), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (881, 883), False, 'import turtle\n'), ((884, 902), 'turtle.forward', 'turtle.forward', (['(20)'], {}), '(20)\n', (898, 902), False, 'import turtle\n'), ((904, 918), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (916, 918), False, 'import turtle\n'), ((919, 938), 'turtle.goto', 'turtle.goto', (['(-30)', '(0)'], {}), '(-30, 0)\n', (930, 938), False, 'import turtle\n'), ((939, 961), 'turtle.color', 'turtle.color', (['"""yellow"""'], {}), "('yellow')\n", (951, 961), False, 'import turtle\n'), ((962, 978), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (976, 978), False, 'import turtle\n'), ((979, 997), 'turtle.forward', 'turtle.forward', (['(20)'], {}), '(20)\n', (993, 997), False, 'import turtle\n'), ((999, 1013), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (1011, 1013), False, 'import turtle\n'), ((1014, 1031), 'turtle.goto', 'turtle.goto', (['(0)', '(0)'], {}), '(0, 0)\n', (1025, 1031), False, 'import turtle\n'), ((1032, 1053), 'turtle.color', 'turtle.color', (['"""green"""'], {}), "('green')\n", (1044, 1053), False, 'import turtle\n'), ((1054, 1070), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (1068, 1070), False, 'import turtle\n'), ((1071, 1089), 'turtle.forward', 'turtle.forward', (['(20)'], {}), '(20)\n', (1085, 1089), False, 'import turtle\n'), ((1091, 1105), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (1103, 1105), False, 'import turtle\n'), ((1106, 1124), 'turtle.goto', 'turtle.goto', (['(30)', '(0)'], {}), '(30, 0)\n', (1117, 1124), False, 'import turtle\n'), ((1125, 1147), 'turtle.color', 'turtle.color', (['"""purple"""'], {}), "('purple')\n", (1137, 1147), False, 'import turtle\n'), ((1148, 1164), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (1162, 1164), False, 'import turtle\n'), ((1165, 1183), 'turtle.forward', 'turtle.forward', (['(20)'], {}), '(20)\n', (1179, 1183), False, 'import turtle\n'), ((1270, 1284), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (1282, 1284), False, 'import turtle\n'), ((1285, 1306), 'turtle.goto', 'turtle.goto', (['(-40)', '(-50)'], {}), '(-40, -50)\n', (1296, 1306), False, 'import turtle\n'), ((1307, 1323), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (1321, 1323), False, 'import turtle\n'), ((1480, 1493), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1490, 1493), False, 'import time\n'), ((1494, 1508), 'turtle.clear', 'turtle.clear', ([], {}), '()\n', (1506, 1508), False, 'import turtle\n'), ((1509, 1546), 'turtle.bgpic', 'turtle.bgpic', (['"""<your image name>.gif"""'], {}), "('<your image name>.gif')\n", (1521, 1546), False, 'import turtle\n'), ((1547, 1560), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1557, 1560), False, 'import time\n'), ((1561, 1575), 'turtle.clear', 'turtle.clear', ([], {}), '()\n', (1573, 1575), False, 'import turtle\n'), ((1623, 1637), 'turtle.clear', 'turtle.clear', ([], {}), '()\n', (1635, 1637), False, 'import turtle\n'), ((1638, 1652), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (1650, 1652), False, 'import turtle\n'), ((1653, 1674), 'turtle.goto', 'turtle.goto', (['(-150)', '(50)'], {}), '(-150, 50)\n', (1664, 1674), False, 'import turtle\n'), ((1675, 1694), 'turtle.color', 'turtle.color', (['"""red"""'], {}), "('red')\n", (1687, 1694), False, 'import turtle\n'), ((1695, 1711), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (1709, 1711), False, 'import turtle\n'), ((1712, 1832), 'turtle.write', 'turtle.write', (['"""Happy Birthday <birthday boy/girl name>!!"""'], {'move': '(False)', 'align': '"""center"""', 'font': "('Arial', 40, 'normal')"}), "('Happy Birthday <birthday boy/girl name>!!', move=False, align\n ='center', font=('Arial', 40, 'normal'))\n", (1724, 1832), False, 'import turtle\n'), ((1828, 1841), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1838, 1841), False, 'import time\n'), ((1842, 1856), 'turtle.clear', 'turtle.clear', ([], {}), '()\n', (1854, 1856), False, 'import turtle\n'), ((1857, 1955), 'turtle.write', 'turtle.write', (['"""May God bless You!!"""'], {'move': '(False)', 'align': '"""center"""', 'font': "('Arial', 40, 'normal')"}), "('May God bless You!!', move=False, align='center', font=(\n 'Arial', 40, 'normal'))\n", (1869, 1955), False, 'import turtle\n'), ((1951, 1964), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1961, 1964), False, 'import time\n'), ((1965, 1986), 'turtle.color', 'turtle.color', (['"""black"""'], {}), "('black')\n", (1977, 1986), False, 'import turtle\n'), ((1385, 1409), 'turtle.color', 'turtle.color', (['each_color'], {}), '(each_color)\n', (1397, 1409), False, 'import turtle\n'), ((1414, 1431), 'turtle.circle', 'turtle.circle', (['(10)'], {}), '(10)\n', (1427, 1431), False, 'import turtle\n'), ((1436, 1455), 'turtle.right', 'turtle.right', (['angle'], {}), '(angle)\n', (1448, 1455), False, 'import turtle\n'), ((1460, 1478), 'turtle.forward', 'turtle.forward', (['(10)'], {}), '(10)\n', (1474, 1478), False, 'import turtle\n')] |
import os
from collections import OrderedDict
from coverage_checker.utils import get_all_path_combinations
def test_get_all_path_combinations():
facets = OrderedDict([('a', ['1', '2']), ('b', ['3', '4']), ('c', ['5', '6'])])
all_paths = get_all_path_combinations(facets)
expected_result = ['1/3/5', '1/3/6', '1/4/5', '1/4/6', '2/3/5', '2/3/6', '2/4/5', '2/4/6']
assert(all_paths == expected_result)
| [
"collections.OrderedDict",
"coverage_checker.utils.get_all_path_combinations"
] | [((161, 231), 'collections.OrderedDict', 'OrderedDict', (["[('a', ['1', '2']), ('b', ['3', '4']), ('c', ['5', '6'])]"], {}), "([('a', ['1', '2']), ('b', ['3', '4']), ('c', ['5', '6'])])\n", (172, 231), False, 'from collections import OrderedDict\n'), ((248, 281), 'coverage_checker.utils.get_all_path_combinations', 'get_all_path_combinations', (['facets'], {}), '(facets)\n', (273, 281), False, 'from coverage_checker.utils import get_all_path_combinations\n')] |
# -*- coding: utf-8 -*-
"""
Neural network - nsp
"""
import numpy as np
def nn_forward(Wi, Wo, train_inputs):
""" Propagate exaples forward through network calculating all hidden-
and output unit outputs.
Args:
Wi: Matrix with input-to-hidden weights.\n
Wo: Matrix with hidden-to-outputs weights.\n
train_inputs: Matrix with example inputs as rows.
Yields:
Vj: Matrix with hidden unit outputs as rows.\n
yj: Vector with output unit outputs as rows.
"""
# Determine the size of the problem
examples, inp = train_inputs.shape
# Calculate hidden unit outputs for every exaple
Vj = np.concatenate((train_inputs, np.ones((examples, 1))), 1)
Vj = Vj.dot(Wi.T)
Vj = np.tanh(Vj)
# Caluculate (linear) output unit outputs for every exaple
yj = np.concatenate((Vj, np.ones((examples, 1))), 1)
yj = yj.dot(Wo.T)
return (Vj, yj)
| [
"numpy.ones",
"numpy.tanh"
] | [((776, 787), 'numpy.tanh', 'np.tanh', (['Vj'], {}), '(Vj)\n', (783, 787), True, 'import numpy as np\n'), ((715, 737), 'numpy.ones', 'np.ones', (['(examples, 1)'], {}), '((examples, 1))\n', (722, 737), True, 'import numpy as np\n'), ((884, 906), 'numpy.ones', 'np.ones', (['(examples, 1)'], {}), '((examples, 1))\n', (891, 906), True, 'import numpy as np\n')] |
import argparse
import json
import multiprocessing as mp
import os
import random
import time
from typing import Dict, List, Optional, Union
import lmdb
import msgpack
import tokenizers
from whitespace_repair.model import tokenizer as toklib
from whitespace_repair.utils import common, data, io
from whitespace_repair.utils.config import DataPreprocessingConfig
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str, required=True,
help="Path to config file")
return parser.parse_args()
logger = common.get_logger("DATA_PREPROCESSING")
def process_line(tokenizer: tokenizers.Tokenizer,
target_tokenizer: tokenizers.Tokenizer,
line: str,
pretokenize: bool,
ensure_equal_length: bool,
preprocessing_fn: Optional[data.PREPROCESSING_FN] = None) -> Optional[Dict[str, List[int]]]:
json_obj: Dict[str, str] = json.loads(line)
if preprocessing_fn is not None:
preprocessed_json_obj = preprocessing_fn(json_obj)
assert isinstance(preprocessed_json_obj, dict)
json_obj.update(preprocessed_json_obj)
sequence: Union[str, List[str]] = json_obj["sequence"]
if pretokenize:
sequence = tokenizer.pre_tokenizer.pre_tokenize_str(sequence)
sequence = [item[0] for item in sequence]
enc = tokenizer.encode(sequence, is_pretokenized=pretokenize, pair=None)
enc_dict = {"input_ids": enc.ids}
if "labels" in json_obj:
enc_dict["labels"] = json_obj["labels"]
if ensure_equal_length and len(enc_dict["labels"]) != len(enc_dict["input_ids"]):
lengths = {k: len(v) for k, v in enc_dict.items()}
logger.info(f"Skipping sample because lengths of input ids and labels are not equal: {lengths}\n"
f"{sequence} --> {enc_dict['labels']}")
return None
if "target_sequence" in json_obj:
target_sequence: Union[str, List[str]] = json_obj["target_sequence"]
if pretokenize:
target_sequence = target_tokenizer.pre_tokenizer.pre_tokenize_str(target_sequence)
target_sequence = [item[0] for item in target_sequence]
enc = target_tokenizer.encode(target_sequence, is_pretokenized=pretokenize, pair=None)
enc_dict["target_input_ids"] = enc.ids
if ensure_equal_length and len(enc_dict["target_input_ids"]) != len(enc_dict["input_ids"]):
lengths = {k: len(v) for k, v in enc_dict.items()}
logger.info(f"Skipping sample because lengths of input ids and target ids are not equal: {lengths}\n"
f"{sequence} --> {target_sequence}")
return None
return enc_dict
def process_files(queue: mp.Queue,
files: List[str],
tokenizer_path: tokenizers.Tokenizer,
target_tokenizer_path: tokenizers.Tokenizer,
pretokenize: bool,
ensure_equal_length: bool,
preprocessing_fn: data.PREPROCESSING_FN,
max_sequence_length: int,
cut_overflowing: bool) -> None:
tokenizer = toklib.load_tokenizer(tokenizer_path)
target_tokenizer = toklib.load_tokenizer(target_tokenizer_path)
for filepath in files:
samples = []
with open(filepath, "r", encoding="utf8") as f:
for line in f:
enc_dict = process_line(tokenizer,
target_tokenizer,
line,
pretokenize=pretokenize,
ensure_equal_length=ensure_equal_length,
preprocessing_fn=preprocessing_fn)
if enc_dict is None:
continue
enc_length = max(
len(enc_dict["input_ids"]),
len(enc_dict.get("target_input_ids", [])),
len(enc_dict.get("labels", []))
)
if enc_length > max_sequence_length:
# if a sequence overflows we still can cut it instead of skipping it
# if the corresponding config is set
# should only be used when cutting off all sequences at some specific position is a sensible thing
# to do
if cut_overflowing:
enc_dict = {k: v[:max_sequence_length] for k, v in enc_dict.items()}
enc_length = max_sequence_length
else:
continue
samples.append((enc_dict, enc_length))
queue.put(samples)
# signal to main process that this process is finished
queue.put(None)
def write_lmdb(output_dir: str,
lmdb_name: str,
files: List[str],
tokenizer_path: str,
target_tokenizer_path: str,
pretokenize: bool,
ensure_equal_length: bool,
preprocessing_fn: data.PREPROCESSING_FN,
max_sequence_length: int,
cut_overflowing: bool,
max_sequences: int) -> None:
env = lmdb.open(os.path.join(output_dir, lmdb_name),
subdir=False,
map_size=int(10e11), # approx. 100 GB
readonly=False,
meminit=False,
map_async=True,
lock=False)
start = time.monotonic()
# overwrite / drop existing database
db_handle = env.open_db()
with env.begin(write=True) as txn:
txn.drop(db_handle)
# give each process a subset of the files
queue: mp.Queue = mp.Queue()
processes = []
num_finished = 0
num_processes = int(os.environ.get("NUM_PROCESSES", min(len(os.sched_getaffinity(0)), 8, len(files))))
batch_size = len(files) // num_processes
for i in range(num_processes):
lower_idx = i * batch_size
# last process gets all remaining files which could be more than batch size
if i == (num_processes - 1):
file_batch = files[lower_idx:]
else:
file_batch = files[lower_idx:lower_idx + batch_size]
p = mp.Process(target=process_files,
args=(queue,
file_batch,
tokenizer_path,
target_tokenizer_path,
pretokenize,
ensure_equal_length,
preprocessing_fn,
max_sequence_length,
cut_overflowing))
p.start()
processes.append(p)
logger.info(f"Started worker process {p.pid} on {len(file_batch)} files")
lengths_keys = []
lengths = []
keys_keys = []
keys = []
num_sequences = 0
txn = env.begin(write=True)
txn.put(b"__files__", msgpack.dumps(files))
while True:
if num_sequences >= max_sequences:
logger.info(f"Reached maximum sequences {max_sequences}")
break
if num_finished >= num_processes:
logger.info(f"All processes are finished, processed {num_sequences} sequences")
break
samples = queue.get()
if samples is None:
num_finished += 1
continue
for enc_dict, enc_length in samples:
key = f"{num_sequences}".encode("ascii")
txn.put(key, msgpack.dumps(enc_dict))
keys.append(key)
lengths.append(enc_length)
num_sequences += 1
# commit every 1000000 samples if preprocessing is aborted
if num_sequences % 1000000 == 0:
_lengths_key = f"__lengths_upto_{num_sequences}__".encode("ascii")
_keys_key = f"__keys_upto_{num_sequences}__".encode("ascii")
txn.put(_keys_key, msgpack.dumps(keys))
txn.put(_lengths_key, msgpack.dumps(lengths))
keys_keys.append(_keys_key)
lengths_keys.append(_lengths_key)
txn.put(b"__len__", msgpack.dumps(num_sequences))
txn.put(b"__keys__", msgpack.dumps(keys_keys))
txn.put(b"__lengths__", msgpack.dumps(lengths_keys))
txn.commit()
txn = env.begin(write=True)
lengths = []
keys = []
# log progress 100 times
if num_sequences % max(max_sequences // 100, 1) == 0:
end = time.monotonic()
logger.info(
f"[{num_sequences}/{max_sequences}] Processed {num_sequences * 100 / max_sequences:.2f}% of"
f" all sequences, {common.eta_minutes((end - start) / 60, num_sequences, max_sequences)}")
if num_sequences >= max_sequences:
break
for p in processes:
logger.info(f"Stopping process {p.pid}")
p.terminate()
p.join()
logger.info(f"Successfully stopped process {p.pid}")
if len(keys) > 0 and len(lengths) > 0:
_lengths_key = f"__lengths_upto_{num_sequences}__".encode("ascii")
_keys_key = f"__keys_upto_{num_sequences}__".encode("ascii")
txn.put(_keys_key, msgpack.dumps(keys))
txn.put(_lengths_key, msgpack.dumps(lengths))
keys_keys.append(_keys_key)
lengths_keys.append(_lengths_key)
txn.put(b"__len__", msgpack.dumps(num_sequences))
txn.put(b"__keys__", msgpack.dumps(keys_keys))
txn.put(b"__lengths__", msgpack.dumps(lengths_keys))
txn.commit()
if __name__ == "__main__":
args = parse_args()
# disable parallelism for tokenizers explicitly
os.environ["TOKENIZERS_PARALLELISM"] = "false"
CONFIG = DataPreprocessingConfig.from_yaml(args.config)
assert isinstance(CONFIG, DataPreprocessingConfig)
logger.info(f"Using data preprocessing config:\n"
f"{CONFIG}")
os.makedirs(CONFIG.output_dir, exist_ok=True)
# save copy of config file to output directory
with open(os.path.join(CONFIG.output_dir, "config.yaml"), "w", encoding="utf8") as f:
f.write(str(CONFIG))
common.add_file_log(logger, os.path.join(CONFIG.output_dir, "logs.txt"))
tokenizer = toklib.load_tokenizer(CONFIG.tokenizer)
tokenizer_path = CONFIG.tokenizer
if CONFIG.target_tokenizer is None:
logger.info(f"No target tokenizer specified, reusing the tokenizer '{CONFIG.tokenizer}' "
f"for the target sequences if necessary")
target_tokenizer = tokenizer
target_tokenizer_path = tokenizer_path
else:
target_tokenizer = toklib.load_tokenizer(CONFIG.target_tokenizer)
target_tokenizer_path = CONFIG.target_tokenizer
test_sentence = "This is a sentence to test the preprocessing functions before the data preprocessing starts."
logger.info(f"Testing tokenizer: {tokenizer.encode(test_sentence, pair=None).tokens}\n"
f"Testing target tokenizer: {target_tokenizer.encode(test_sentence, pair=None).tokens}")
if CONFIG.pretokenize:
assert tokenizer.pre_tokenizer is not None and target_tokenizer.pre_tokenizer is not None, \
"Expected that both the tokenizer and target tokenizer have pre tokenizers if pretokenize is set to true," \
" but got None."
logger.info("Pretokenize is set to True.\n"
f"Testing pre tokenizer: {tokenizer.pre_tokenizer.pre_tokenize_str(test_sentence)}\n"
f"Testing target pre tokenizer: {target_tokenizer.pre_tokenizer.pre_tokenize_str(test_sentence)}")
if CONFIG.preprocessing is None:
preprocessing_fn = None
else:
test_item = {"sequence": test_sentence, "target_sequence": test_sentence}
corruption_fns = []
for cfg in CONFIG.preprocessing:
preprocessing_fn = data.get_preprocessing_fn(cfg.type, **cfg.arguments)
logger.info(f"Testing '{cfg.type}' preprocessing function: {test_item} \u2192 "
f"{preprocessing_fn(test_item.copy())}")
corruption_fns.append(preprocessing_fn)
preprocessing_fn = data.chain_preprocessing_fns(corruption_fns)
logger.info(f"Testing chained preprocessing function: {test_item} \u2192 {preprocessing_fn(test_item.copy())}")
files = [file
for g in CONFIG.data
for file in io.glob_safe(g)]
if CONFIG.seed is not None:
rand = random.Random(CONFIG.seed)
rand.shuffle(files)
max_sequences = sum(io.line_count(file) for file in files)
if CONFIG.max_sequences is not None:
max_sequences = min(max_sequences, CONFIG.max_sequences)
max_sequence_length = CONFIG.max_sequence_length if CONFIG.max_sequence_length is not None else float("inf")
logger.info(f"Number of sequences limited to {max_sequences:,} "
f"with a maximum sequence length of {max_sequence_length}")
start = time.monotonic()
write_lmdb(output_dir=CONFIG.output_dir,
lmdb_name=CONFIG.lmdb_name,
files=files,
tokenizer_path=tokenizer_path,
target_tokenizer_path=target_tokenizer_path,
pretokenize=CONFIG.pretokenize,
ensure_equal_length=CONFIG.ensure_equal_length,
preprocessing_fn=preprocessing_fn,
max_sequence_length=max_sequence_length,
cut_overflowing=CONFIG.cut_overflowing,
max_sequences=max_sequences)
end = time.monotonic()
logger.info(f"Finished preprocessing in {(end - start) / 60:.2f} minutes")
| [
"json.loads",
"whitespace_repair.utils.data.get_preprocessing_fn",
"argparse.ArgumentParser",
"os.makedirs",
"whitespace_repair.utils.data.chain_preprocessing_fns",
"whitespace_repair.model.tokenizer.load_tokenizer",
"time.monotonic",
"multiprocessing.Process",
"os.path.join",
"random.Random",
"whitespace_repair.utils.io.glob_safe",
"whitespace_repair.utils.common.get_logger",
"whitespace_repair.utils.io.line_count",
"os.sched_getaffinity",
"msgpack.dumps",
"multiprocessing.Queue",
"whitespace_repair.utils.common.eta_minutes",
"whitespace_repair.utils.config.DataPreprocessingConfig.from_yaml"
] | [((649, 688), 'whitespace_repair.utils.common.get_logger', 'common.get_logger', (['"""DATA_PREPROCESSING"""'], {}), "('DATA_PREPROCESSING')\n", (666, 688), False, 'from whitespace_repair.utils import common, data, io\n'), ((462, 487), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (485, 487), False, 'import argparse\n'), ((1047, 1063), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1057, 1063), False, 'import json\n'), ((3281, 3318), 'whitespace_repair.model.tokenizer.load_tokenizer', 'toklib.load_tokenizer', (['tokenizer_path'], {}), '(tokenizer_path)\n', (3302, 3318), True, 'from whitespace_repair.model import tokenizer as toklib\n'), ((3342, 3386), 'whitespace_repair.model.tokenizer.load_tokenizer', 'toklib.load_tokenizer', (['target_tokenizer_path'], {}), '(target_tokenizer_path)\n', (3363, 3386), True, 'from whitespace_repair.model import tokenizer as toklib\n'), ((5664, 5680), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (5678, 5680), False, 'import time\n'), ((5888, 5898), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (5896, 5898), True, 'import multiprocessing as mp\n'), ((10020, 10066), 'whitespace_repair.utils.config.DataPreprocessingConfig.from_yaml', 'DataPreprocessingConfig.from_yaml', (['args.config'], {}), '(args.config)\n', (10053, 10066), False, 'from whitespace_repair.utils.config import DataPreprocessingConfig\n'), ((10211, 10256), 'os.makedirs', 'os.makedirs', (['CONFIG.output_dir'], {'exist_ok': '(True)'}), '(CONFIG.output_dir, exist_ok=True)\n', (10222, 10256), False, 'import os\n'), ((10523, 10562), 'whitespace_repair.model.tokenizer.load_tokenizer', 'toklib.load_tokenizer', (['CONFIG.tokenizer'], {}), '(CONFIG.tokenizer)\n', (10544, 10562), True, 'from whitespace_repair.model import tokenizer as toklib\n'), ((13250, 13266), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (13264, 13266), False, 'import time\n'), ((13814, 13830), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (13828, 13830), False, 'import time\n'), ((5383, 5418), 'os.path.join', 'os.path.join', (['output_dir', 'lmdb_name'], {}), '(output_dir, lmdb_name)\n', (5395, 5418), False, 'import os\n'), ((6416, 6611), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'process_files', 'args': '(queue, file_batch, tokenizer_path, target_tokenizer_path, pretokenize,\n ensure_equal_length, preprocessing_fn, max_sequence_length, cut_overflowing\n )'}), '(target=process_files, args=(queue, file_batch, tokenizer_path,\n target_tokenizer_path, pretokenize, ensure_equal_length,\n preprocessing_fn, max_sequence_length, cut_overflowing))\n', (6426, 6611), True, 'import multiprocessing as mp\n'), ((7142, 7162), 'msgpack.dumps', 'msgpack.dumps', (['files'], {}), '(files)\n', (7155, 7162), False, 'import msgpack\n'), ((10461, 10504), 'os.path.join', 'os.path.join', (['CONFIG.output_dir', '"""logs.txt"""'], {}), "(CONFIG.output_dir, 'logs.txt')\n", (10473, 10504), False, 'import os\n'), ((10923, 10969), 'whitespace_repair.model.tokenizer.load_tokenizer', 'toklib.load_tokenizer', (['CONFIG.target_tokenizer'], {}), '(CONFIG.target_tokenizer)\n', (10944, 10969), True, 'from whitespace_repair.model import tokenizer as toklib\n'), ((12446, 12490), 'whitespace_repair.utils.data.chain_preprocessing_fns', 'data.chain_preprocessing_fns', (['corruption_fns'], {}), '(corruption_fns)\n', (12474, 12490), False, 'from whitespace_repair.utils import common, data, io\n'), ((12753, 12779), 'random.Random', 'random.Random', (['CONFIG.seed'], {}), '(CONFIG.seed)\n', (12766, 12779), False, 'import random\n'), ((9498, 9517), 'msgpack.dumps', 'msgpack.dumps', (['keys'], {}), '(keys)\n', (9511, 9517), False, 'import msgpack\n'), ((9549, 9571), 'msgpack.dumps', 'msgpack.dumps', (['lengths'], {}), '(lengths)\n', (9562, 9571), False, 'import msgpack\n'), ((9681, 9709), 'msgpack.dumps', 'msgpack.dumps', (['num_sequences'], {}), '(num_sequences)\n', (9694, 9709), False, 'import msgpack\n'), ((9740, 9764), 'msgpack.dumps', 'msgpack.dumps', (['keys_keys'], {}), '(keys_keys)\n', (9753, 9764), False, 'import msgpack\n'), ((9798, 9825), 'msgpack.dumps', 'msgpack.dumps', (['lengths_keys'], {}), '(lengths_keys)\n', (9811, 9825), False, 'import msgpack\n'), ((10323, 10369), 'os.path.join', 'os.path.join', (['CONFIG.output_dir', '"""config.yaml"""'], {}), "(CONFIG.output_dir, 'config.yaml')\n", (10335, 10369), False, 'import os\n'), ((12157, 12209), 'whitespace_repair.utils.data.get_preprocessing_fn', 'data.get_preprocessing_fn', (['cfg.type'], {}), '(cfg.type, **cfg.arguments)\n', (12182, 12209), False, 'from whitespace_repair.utils import common, data, io\n'), ((12689, 12704), 'whitespace_repair.utils.io.glob_safe', 'io.glob_safe', (['g'], {}), '(g)\n', (12701, 12704), False, 'from whitespace_repair.utils import common, data, io\n'), ((12833, 12852), 'whitespace_repair.utils.io.line_count', 'io.line_count', (['file'], {}), '(file)\n', (12846, 12852), False, 'from whitespace_repair.utils import common, data, io\n'), ((7698, 7721), 'msgpack.dumps', 'msgpack.dumps', (['enc_dict'], {}), '(enc_dict)\n', (7711, 7721), False, 'import msgpack\n'), ((8768, 8784), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (8782, 8784), False, 'import time\n'), ((6003, 6026), 'os.sched_getaffinity', 'os.sched_getaffinity', (['(0)'], {}), '(0)\n', (6023, 6026), False, 'import os\n'), ((8135, 8154), 'msgpack.dumps', 'msgpack.dumps', (['keys'], {}), '(keys)\n', (8148, 8154), False, 'import msgpack\n'), ((8194, 8216), 'msgpack.dumps', 'msgpack.dumps', (['lengths'], {}), '(lengths)\n', (8207, 8216), False, 'import msgpack\n'), ((8350, 8378), 'msgpack.dumps', 'msgpack.dumps', (['num_sequences'], {}), '(num_sequences)\n', (8363, 8378), False, 'import msgpack\n'), ((8417, 8441), 'msgpack.dumps', 'msgpack.dumps', (['keys_keys'], {}), '(keys_keys)\n', (8430, 8441), False, 'import msgpack\n'), ((8483, 8510), 'msgpack.dumps', 'msgpack.dumps', (['lengths_keys'], {}), '(lengths_keys)\n', (8496, 8510), False, 'import msgpack\n'), ((8966, 9034), 'whitespace_repair.utils.common.eta_minutes', 'common.eta_minutes', (['((end - start) / 60)', 'num_sequences', 'max_sequences'], {}), '((end - start) / 60, num_sequences, max_sequences)\n', (8984, 9034), False, 'from whitespace_repair.utils import common, data, io\n')] |
Subsets and Splits